hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
37502e803d17afb4e35f39125a442b13206fa4f2052c7132d8474b373a0d48ac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import z_at_value
from astropy.cosmology.funcs.optimize import _z_at_scalar_value
from astropy.cosmology.realizations import (
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
Planck13,
Planck15,
Planck18,
)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(
z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6
)
assert allclose(
z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3,
rtol=1e-9,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=2),
0.681277696,
rtol=1e-6,
)
assert allclose(
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=2.5),
3.7914908,
rtol=1e-6,
)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=4.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182],
rtol=1e-6,
)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5],
zmax=[2, 4],
),
[0.681277696, 3.7914908],
rtol=1e-6,
)
# more interesting broadcast
assert allclose(
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[[0, 2.5]],
zmax=[2, 4],
),
[[0.681277696, 3.7914908]],
rtol=1e-6,
)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115,
rtol=1e-6,
)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115,
rtol=1e-6,
)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket = np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115],
rtol=1e-6,
)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(
self.cosmo.angular_diameter_distance,
1500 * u.Mpc,
zmin=[0, 2.5, 0.1],
zmax=[2, 4],
)
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(
self.cosmo.angular_diameter_distance, 1500 * u.Mpc, zmin=0, zmax=2
)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(
_z_at_scalar_value, excluded=["func", "method", "verbose"]
)
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10 * u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, "stdout", mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == "Bounded":
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z = z_at_value(cosmo.angular_diameter_distance, 1500 * u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=bracket,
),
z,
rtol=1e-6,
)
else:
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.3, 1.0),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(2.0, 4.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.1, 1.0, 2.0),
),
0.6812777,
rtol=1e-6,
)
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
),
3.7914908,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(1.6, 2.0),
zmax=1.6,
),
0.6812777,
rtol=1e-6,
)
assert allclose(
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(0.9, 1.5),
zmin=1.5,
),
3.7914908,
rtol=1e-6,
)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r"fval is not bracketed"):
z_at_value(
cosmo.angular_diameter_distance,
1500 * u.Mpc,
method=method,
bracket=(3.9, 5.0),
zmin=4.0,
)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize("method", ["Brent", "Golden", "Bounded"])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {"Brent": [1e-4, 1e-4], "Golden": [1e-3, 1e-2], "Bounded": [1e-3, 1e-1]}
if method == "Bounded":
ctx = pytest.warns(
AstropyUserWarning,
match="Solver returned 1: Maximum number of function calls reached",
)
else:
ctx = pytest.warns(AstropyUserWarning, match="Solver returned None")
with ctx:
z0 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmax=2, maxfun=13, method=method
)
with ctx:
z1 = z_at_value(
cosmo.angular_diameter_distance, 1 * u.Gpc, zmin=2, maxfun=13, method=method
)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize(
"cosmo",
[
Planck13,
Planck15,
Planck18,
WMAP1,
WMAP3,
WMAP5,
WMAP7,
WMAP9,
flrw.LambdaCDM,
flrw.FlatLambdaCDM,
flrw.wpwaCDM,
flrw.w0wzCDM,
flrw.wCDM,
flrw.FlatwCDM,
flrw.w0waCDM,
flrw.Flatw0waCDM,
],
)
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = (
"Ok",
"Otot",
"angular_diameter_distance_z1z2",
"clone",
"is_equivalent",
"de_density_scale",
"w",
)
if str(cosmo.name).startswith("WMAP"):
skip += ("nu_relative_density",)
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith("_") or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f"Round-trip testing {name} failed"
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2),
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
|
b349ccf86b74ca361c0ac7d55fff030fdcbda50b7f7a75173f9773a8a0617b90 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.__init__.py`."""
##############################################################################
# IMPORTS
import pytest
##############################################################################
# TESTS
##############################################################################
def test_getattr_error_attr_not_found():
"""Test getattr raises error for DNE."""
with pytest.raises(ImportError):
from astropy.cosmology.flrw import this_is_not_a_variable # noqa: F401
|
6edca3723c9ae850b110e8b986da59d2543a77e99b1406f8798cd7e38cb913a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.base`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import copy
# THIRD PARTY
import numpy as np
import pytest
import astropy.constants as const
# LOCAL
import astropy.units as u
from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.flrw.base import _a_B_c2, _critdens_const, _H0units_to_invs, quad
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import (
CosmologyTest,
FlatCosmologyMixinTest,
ParameterTestMixin,
invalid_zs,
valid_zs,
)
from astropy.utils.compat.optional_deps import HAS_SCIPY
##############################################################################
# SETUP / TEARDOWN
class SubFLRW(FLRW):
def w(self, z):
return super().w(z)
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad()
##############################################################################
class ParameterH0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls, cosmo):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
assert isinstance(cosmo_cls.H0, Parameter)
assert "Hubble constant" in cosmo_cls.H0.__doc__
assert cosmo_cls.H0.unit == unit
# validation
assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit
assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls.H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo._H0
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls, ba):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOm0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology.
Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Om0(self, cosmo_cls, cosmo):
"""Test Parameter ``Om0``."""
# on the class
assert isinstance(cosmo_cls.Om0, Parameter)
assert "Omega matter" in cosmo_cls.Om0.__doc__
# validation
assert cosmo_cls.Om0.validate(cosmo, 1) == 1
assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Om0 cannot be negative"):
cosmo_cls.Om0.validate(cosmo, -1)
# on the instance
assert cosmo.Om0 is cosmo._Om0
assert cosmo.Om0 == self._cls_args["Om0"]
assert isinstance(cosmo.Om0, float)
def test_init_Om0(self, cosmo_cls, ba):
"""Test initialization for values of ``Om0``."""
# test that it works with units
ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# also without units
ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# fails for negative numbers
ba.arguments["Om0"] = -0.27
with pytest.raises(ValueError, match="Om0 cannot be negative."):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOde0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
assert isinstance(cosmo_cls.Ode0, Parameter)
assert "Omega dark energy" in cosmo_cls.Ode0.__doc__
def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo):
"""Test Parameter ``Ode0`` validation."""
assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1
assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls.Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterTcmb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls, cosmo):
"""Test Parameter ``Tcmb0``."""
# on the class
assert isinstance(cosmo_cls.Tcmb0, Parameter)
assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__
assert cosmo_cls.Tcmb0.unit == u.K
# validation
assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K
assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls.Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo._Tcmb0
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterNeffTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls, cosmo):
"""Test Parameter ``Neff``."""
# on the class
assert isinstance(cosmo_cls.Neff, Parameter)
assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__
# validation
assert cosmo_cls.Neff.validate(cosmo, 1) == 1
assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
cosmo_cls.Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo._Neff
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
class Parameterm_nuTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology.
m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_m_nu(self, cosmo_cls, cosmo):
"""Test Parameter ``m_nu``."""
# on the class
assert isinstance(cosmo_cls.m_nu, Parameter)
assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__
assert cosmo_cls.m_nu.unit == u.eV
assert cosmo_cls.m_nu.equivalencies == u.mass_energy()
# on the instance
# assert cosmo.m_nu is cosmo._m_nu
assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV)
# set differently depending on the other inputs
if cosmo.Tnu0.value == 0:
assert cosmo.m_nu is None
elif not cosmo._massivenu: # only massless
assert u.allclose(cosmo.m_nu, 0 * u.eV)
elif self._nmasslessnu == 0: # only massive
assert cosmo.m_nu == cosmo._massivenu_mass
else: # a mix -- the most complicated case
assert u.allclose(cosmo.m_nu[: self._nmasslessnu], 0 * u.eV)
assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass)
def test_init_m_nu(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this requires the class to have a property ``has_massive_nu``.
"""
# Test that it works when m_nu has units.
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit)
assert not cosmo.has_massive_nu
assert cosmo.m_nu.unit == u.eV # explicitly check unit once.
# And it works when m_nu doesn't have units.
ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"])
assert not cosmo.has_massive_nu
# A negative m_nu raises an exception.
tba = copy.copy(ba)
tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="invalid"):
cosmo_cls(*tba.args, **tba.kwargs)
def test_init_m_nu_and_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu`` and ``Neff``.
Note this test requires ``Neff`` as constructor input, and a property
``has_massive_nu``.
"""
# Mismatch with Neff = wrong number of neutrinos
tba = copy.copy(ba)
tba.arguments["Neff"] = 4.05
tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="unexpected number of neutrino"):
cosmo_cls(*tba.args, **tba.kwargs)
# No neutrinos, but Neff
tba.arguments["m_nu"] = 0
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert not cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, 0 * u.eV)
# TODO! move this test when create ``test_nu_relative_density``
assert u.allclose(
cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6
)
# All massive neutrinos case, len from Neff
tba.arguments["m_nu"] = 0.1 * u.eV
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV)
def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this test requires ``Tcmb0`` as constructor input, and a property
``has_massive_nu``.
"""
# If Neff = 0, m_nu is None.
tba = copy.copy(ba)
tba.arguments["Neff"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
# If Tcmb0 = 0, m_nu is None
tba = copy.copy(ba)
tba.arguments["Tcmb0"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
class ParameterOb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology.
Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Ob0(self, cosmo_cls, cosmo):
"""Test Parameter ``Ob0``."""
# on the class
assert isinstance(cosmo_cls.Ob0, Parameter)
assert "Omega baryon;" in cosmo_cls.Ob0.__doc__
# validation
assert cosmo_cls.Ob0.validate(cosmo, None) is None
assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1
assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls.Ob0.validate(cosmo, -1)
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1)
# on the instance
assert cosmo.Ob0 is cosmo._Ob0
assert cosmo.Ob0 == 0.03
def test_init_Ob0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ob0``."""
# test that it works with units
assert isinstance(ba.arguments["Ob0"], u.Quantity)
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# also without units
ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# Setting param to 0 respects that. Note this test uses ``Ob()``.
ba.arguments["Ob0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ob(1), 0)
assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# Negative Ob0 errors
tba = copy.copy(ba)
tba.arguments["Ob0"] = -0.04
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls(*tba.args, **tba.kwargs)
# Ob0 > Om0 errors
tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls(*tba.args, **tba.kwargs)
# No baryons specified means baryon-specific methods fail.
tba = copy.copy(ba)
tba.arguments.pop("Ob0", None)
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
with pytest.raises(ValueError):
cosmo.Ob(1)
# also means DM fraction is undefined
with pytest.raises(ValueError):
cosmo.Odm(1)
# The default value is None
assert cosmo_cls._init_signature.parameters["Ob0"].default is None
class FLRWTest(
CosmologyTest,
ParameterH0TestMixin,
ParameterOm0TestMixin,
ParameterOde0TestMixin,
ParameterTcmb0TestMixin,
ParameterNeffTestMixin,
Parameterm_nuTestMixin,
ParameterOb0TestMixin,
):
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# Default cosmology args and kwargs
self._cls_args = dict(
H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one
)
self.cls_kwargs = dict(
Tcmb0=3.0 * u.K,
Ob0=0.03 * u.one,
name=self.__class__.__name__,
meta={"a": "b"},
)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test property ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, property)
assert cosmo_cls.Odm0.fset is None # immutable
# on the instance
assert cosmo.Odm0 is cosmo._Odm0
# Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons.
if cosmo.Ob0 is None:
assert cosmo.Odm0 is None
else:
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, property)
assert cosmo_cls.Ok0.fset is None # immutable
# on the instance
assert cosmo.Ok0 is cosmo._Ok0
assert np.allclose(
cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0)
)
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test property ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, property)
assert cosmo_cls.Tnu0.fset is None # immutable
# on the instance
assert cosmo.Tnu0 is cosmo._Tnu0
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._massivenu
def test_h(self, cosmo_cls, cosmo):
"""Test property ``h``."""
# on the class
assert isinstance(cosmo_cls.h, property)
assert cosmo_cls.h.fset is None # immutable
# on the instance
assert cosmo.h is cosmo._h
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test property ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, property)
assert cosmo_cls.hubble_time.fset is None # immutable
# on the instance
assert cosmo.hubble_time is cosmo._hubble_time
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test property ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, property)
assert cosmo_cls.hubble_distance.fset is None # immutable
# on the instance
assert cosmo.hubble_distance is cosmo._hubble_distance
assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test property ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, property)
assert cosmo_cls.critical_density0.fset is None # immutable
# on the instance
assert cosmo.critical_density0 is cosmo._critical_density0
assert cosmo.critical_density0.unit == u.g / u.cm**3
cd0value = _critdens_const * (cosmo.H0.value * _H0units_to_invs) ** 2
assert cosmo.critical_density0.value == cd0value
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test property ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, property)
assert cosmo_cls.Ogamma0.fset is None # immutable
# on the instance
assert cosmo.Ogamma0 is cosmo._Ogamma0
# Ogamma cor \propto T^4/rhocrit
expect = _a_B_c2 * cosmo.Tcmb0.value**4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test property ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, property)
assert cosmo_cls.Onu0.fset is None # immutable
# on the instance
assert cosmo.Onu0 is cosmo._Onu0
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(
cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff
)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert (
cosmo.Otot0
== cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
)
# ---------------------------------------------------------------
# Methods
_FLRW_redshift_methods = get_redshift_methods(
FLRW, include_private=True, include_z2=False
)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z),
)
def test_scale_factor0(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.scale_factor`."""
assert isinstance(cosmo.scale_factor0, u.Quantity)
assert cosmo.scale_factor0.unit == u.one
assert cosmo.scale_factor0 == 1
assert np.allclose(cosmo.scale_factor0, cosmo.scale_factor(0))
@pytest.mark.parametrize("z", valid_zs)
def test_scale_factor(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.scale_factor`."""
assert np.allclose(cosmo.scale_factor(z), 1 / (1 + np.array(z)))
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = cosmo._init_arguments.copy()
kwargs.pop("name", None) # make sure not setting name
kwargs.pop("meta", None) # make sure not setting name
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c == cosmo
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n in set(cosmo.__parameters__) - {"H0"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n in set(cosmo.__parameters__) - {"H0", "Tcmb0"}:
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(
v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1)
)
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologyTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
# ===============================================================
# Usage Tests
# TODO: this test should be subsumed by other tests
@pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale"))
def test_distance_broadcast(self, cosmo, method):
"""Test distance methods broadcast z correctly."""
g = getattr(cosmo, method)
z = np.linspace(0.1, 1, 6)
z2d = z.reshape(2, 3)
z3d = z.reshape(3, 2, 1)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z2d)
assert value_2d.shape == z2d.shape
value_3d = g(z3d)
assert value_3d.shape == z3d.shape
assert u.allclose(value_flat, value_2d.flatten())
assert u.allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
z = np.array([1.0, 2.0, 3.0, 4.0])
cosmo = cosmo_cls(*args, **kwargs)
assert u.allclose(cosmo.comoving_distance(z), expected, rtol=1e-4)
class TestFLRW(FLRWTest):
"""Test :class:`astropy.cosmology.FLRW`."""
abstract_w = True
def setup_class(self):
"""
Setup for testing.
FLRW is abstract, so tests are done on a subclass.
"""
super().setup_class(self)
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW
self.cls = SubFLRW
def teardown_class(self):
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubFLRW", None)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# Methods
def test_w(self, cosmo):
"""Test abstract :meth:`astropy.cosmology.FLRW.w`."""
with pytest.raises(NotImplementedError, match="not implemented"):
cosmo.w(1)
def test_Otot(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
assert cosmo.Otot(1)
def test_efunc_vs_invefunc(self, cosmo):
"""
Test that efunc and inv_efunc give inverse values.
Here they just fail b/c no ``w(z)`` or no scipy.
"""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
cosmo.efunc(0.5)
with pytest.raises(exception):
cosmo.inv_efunc(0.5)
_FLRW_redshift_methods = get_redshift_methods(
FLRW, include_private=True, include_z2=False
) - {"w"}
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
# ===============================================================
# Usage Tests
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize("method", ("Om", "Ode", "w", "de_density_scale"))
def test_distance_broadcast(self, cosmo, method):
with pytest.raises(NotImplementedError):
super().test_distance_broadcast(cosmo, method)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[((70, 0.27, 0.73), {"Tcmb0": 3.0, "Ob0": 0.03}, None)],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
with pytest.raises(NotImplementedError):
super().test_comoving_distance_example(cosmo_cls, args, kwargs, expected)
# -----------------------------------------------------------------------------
class ParameterFlatOde0TestMixin(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
assert cosmo_cls.Ode0.derived in (True, np.True_)
def test_Ode0(self, cosmo):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo._Ok0 == 0.0
assert cosmo._Ode0 == 1.0 - (
cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0
)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize("method", FLRWTest._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_clone_to_nonflat_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_to_nonflat_change_param(cosmo)
# change Ode0, without non-flat
with pytest.raises(TypeError):
cosmo.clone(Ode0=1)
# change to non-flat
nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
nc = cosmo.clone(to_nonflat=True, Ode0=1)
assert nc.Ode0 == 1.0
assert nc.name == cosmo.name + " (modified)"
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__nonflatclass__
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# flat, but not FlatFLRWMixin
flat = nonflat_cosmo_cls(
*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs
)
flat._Ok0 = 0.0
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
FLRWTest.test_repr(self, cosmo_cls, cosmo)
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
|
ecc438d3935c0c656a5d297a8cb9fa55f2555443d9b92c486d48d67a4b8314b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import random
import numpy as np
import pytest
from astropy.cosmology._io.model import _CosmologyModel, from_model, to_model
from astropy.cosmology.core import Cosmology
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.modeling.models import Gaussian1D
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromModelTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="astropy.model"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture(scope="class")
def method_name(self, cosmo):
# get methods, ignoring private and dunder
methods = get_redshift_methods(cosmo, include_private=False, include_z2=True)
# dynamically detect ABC and optional dependencies
for n in tuple(methods):
params = inspect.signature(getattr(cosmo, n)).parameters.keys()
ERROR_SEIVE = (NotImplementedError, ValueError)
# # ABC can't introspect for good input
if not HAS_SCIPY:
ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError,)
args = np.arange(len(params)) + 1
try:
getattr(cosmo, n)(*args)
except ERROR_SEIVE:
methods.discard(n)
# TODO! pytest doesn't currently allow multiple yields (`cosmo`) so
# testing with 1 random method
# yield from methods
return random.choice(tuple(methods)) if methods else None
# ===============================================================
def test_fromformat_model_wrong_cls(self, from_format):
"""Test when Model is not the correct class."""
model = Gaussian1D(amplitude=10, mean=14)
with pytest.raises(AttributeError):
from_format(model)
def test_toformat_model_not_method(self, to_format):
"""Test when method is not a method."""
with pytest.raises(AttributeError):
to_format("astropy.model", method="this is definitely not a method.")
def test_toformat_model_not_callable(self, to_format):
"""Test when method is actually an attribute."""
with pytest.raises(ValueError):
to_format("astropy.model", method="name")
def test_toformat_model(self, cosmo, to_format, method_name):
"""Test cosmology -> astropy.model."""
if method_name is None: # no test if no method
return
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
# Parameters
expect = tuple(n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None)
assert model.param_names == expect
# scalar result
args = np.arange(model.n_inputs) + 1
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
np.testing.assert_allclose(got, expected)
# vector result
if "scalar" not in method_name:
args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
np.testing.assert_allclose(got, expected)
def test_tofromformat_model_instance(
self, cosmo_cls, cosmo, method_name, to_format, from_format
):
"""Test cosmology -> astropy.model -> cosmology."""
if method_name is None: # no test if no method
return
# ------------
# To Model
# this also serves as a test of all added methods / attributes
# in _CosmologyModel.
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
assert model.cosmology_class is cosmo_cls
assert model.cosmology == cosmo
assert model.method_name == method_name
# ------------
# From Model
# it won't error if everything matches up
got = from_format(model, format="astropy.model")
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
# Note: model adds parameter attributes to the metadata
# also it auto-identifies 'format'
got = from_format(model)
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
def test_fromformat_model_subclass_partial_info(self):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
pass # there's no partial information with a Model
@pytest.mark.parametrize("format", [True, False, None, "astropy.model"])
def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a model.
"""
if method_name is None: # no test if no method
return
obj = to_format("astropy.model", method=method_name)
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin):
"""Directly test ``to/from_model``."""
def setup_class(self):
self.functions = {"to": to_model, "from": from_model}
|
79c067d8b675e1f41c25573893967d6ba452c447f197918489090858ef8be683 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
import numpy as np
import pytest
from astropy.cosmology import Cosmology
from astropy.cosmology._io.mapping import from_mapping, to_mapping
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromMappingTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="mapping"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_mapping_default(self, cosmo, to_format):
"""Test default usage of Cosmology -> mapping."""
m = to_format("mapping")
keys = tuple(m.keys())
assert isinstance(m, dict)
# Check equality of all expected items
assert keys[0] == "cosmology"
assert m.pop("cosmology") is cosmo.__class__
assert keys[1] == "name"
assert m.pop("name") == cosmo.name
for i, k in enumerate(cosmo.__parameters__, start=2):
assert keys[i] == k
assert np.array_equal(m.pop(k), getattr(cosmo, k))
assert keys[-1] == "meta"
assert m.pop("meta") == cosmo.meta
# No unexpected items
assert not m
def test_to_mapping_wrong_cls(self, to_format):
"""Test incorrect argument ``cls`` in ``to_mapping()``."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("mapping", cls=list)
@pytest.mark.parametrize("map_cls", [dict, OrderedDict])
def test_to_mapping_cls(self, to_format, map_cls):
"""Test argument ``cls`` in ``to_mapping()``."""
m = to_format("mapping", cls=map_cls)
assert isinstance(m, map_cls) # test type
def test_to_mapping_cosmology_as_str(self, cosmo_cls, to_format):
"""Test argument ``cosmology_as_str`` in ``to_mapping()``."""
default = to_format("mapping")
# Cosmology is the class
m = to_format("mapping", cosmology_as_str=False)
assert isinstance(m["cosmology"], type)
assert cosmo_cls is m["cosmology"]
assert m == default # False is the default option
# Cosmology is a string
m = to_format("mapping", cosmology_as_str=True)
assert isinstance(m["cosmology"], str)
assert m["cosmology"] == cosmo_cls.__qualname__ # Correct class
assert tuple(m.keys())[0] == "cosmology" # Stayed at same index
def test_tofrom_mapping_cosmology_as_str(self, cosmo, to_format, from_format):
"""Test roundtrip with ``cosmology_as_str=True``.
The test for the default option (`False`) is in ``test_tofrom_mapping_instance``.
"""
m = to_format("mapping", cosmology_as_str=True)
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
def test_to_mapping_move_from_meta(self, to_format):
"""Test argument ``move_from_meta`` in ``to_mapping()``."""
default = to_format("mapping")
# Metadata is 'separate' from main mapping
m = to_format("mapping", move_from_meta=False)
assert "meta" in m.keys()
assert not any(k in m for k in m["meta"]) # Not added to main
assert m == default # False is the default option
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
assert "meta" not in m.keys()
assert all(k in m for k in default["meta"]) # All added to main
# The parameters take precedence over the metadata
assert all(np.array_equal(v, m[k]) for k, v in default.items() if k != "meta")
def test_tofrom_mapping_move_tofrom_meta(self, cosmo, to_format, from_format):
"""Test roundtrip of ``move_from/to_meta`` in ``to/from_mapping()``."""
# Metadata is mixed into main mapping.
m = to_format("mapping", move_from_meta=True)
# (Just adding something to ensure there's 'metadata')
m["mismatching"] = "will error"
# (Tests are different if the last argument is a **kwarg)
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(m, format="mapping")
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# Reading with mismatching parameters errors...
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(m, format="mapping")
# unless mismatched are moved to meta.
got = from_format(m, format="mapping", move_to_meta=True)
assert got == cosmo # (Doesn't check metadata)
assert got.meta["mismatching"] == "will error"
def test_to_mapping_rename_conflict(self, cosmo, to_format):
"""Test ``rename`` in ``to_mapping()``."""
to_rename = {"name": "name", "H0": "H_0"}
match = (
"'renames' values must be disjoint from 'map' keys, "
"the common keys are: {'name'}"
)
with pytest.raises(ValueError, match=match):
to_format("mapping", rename=to_rename)
def test_from_mapping_rename_conflict(self, cosmo, to_format, from_format):
"""Test ``rename`` in `from_mapping()``."""
m = to_format("mapping")
match = (
"'renames' values must be disjoint from 'map' keys, "
"the common keys are: {'name'}"
)
with pytest.raises(ValueError, match=match):
from_format(m, format="mapping", rename={"name": "name", "H0": "H_0"})
def test_tofrom_mapping_rename_roundtrip(self, cosmo, to_format, from_format):
"""Test roundtrip in ``to/from_mapping()`` with ``rename``."""
to_rename = {"name": "cosmo_name"}
m = to_format("mapping", rename=to_rename)
assert "name" not in m
assert "cosmo_name" in m
# Wrong names = error
with pytest.raises(
TypeError, match="there are unused parameters {'cosmo_name':"
):
from_format(m, format="mapping")
# Roundtrip. correct names = success
from_rename = {v: k for k, v in to_rename.items()}
got = from_format(m, format="mapping", rename=from_rename)
assert got == cosmo
# -----------------------------------------------------
def test_from_not_mapping(self, cosmo, from_format):
"""Test incorrect map type in ``from_mapping()``."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A MAP", format="mapping")
def test_from_mapping_default(self, cosmo, to_format, from_format):
"""Test (cosmology -> Mapping) -> cosmology."""
m = to_format("mapping")
# Read from exactly as given.
got = from_format(m, format="mapping")
assert got == cosmo
assert got.meta == cosmo.meta
# Reading auto-identifies 'format'
got = from_format(m)
assert got == cosmo
assert got.meta == cosmo.meta
def test_fromformat_subclass_partial_info_mapping(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
m = cosmo.to_format("mapping")
# partial information
m.pop("cosmology", None)
m.pop("Tcmb0", None)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo.__class__.from_format(m, format="mapping")
got2 = Cosmology.from_format(m, format="mapping", cosmology=cosmo.__class__)
got3 = Cosmology.from_format(
m, format="mapping", cosmology=cosmo.__class__.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo.__class__._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format", [True, False, None, "mapping"])
def test_is_equivalent_to_mapping(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a mapping.
"""
obj = to_format("mapping")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
class TestToFromMapping(ToFromDirectTestBase, ToFromMappingTestMixin):
"""Directly test ``to/from_mapping``."""
def setup_class(self):
self.functions = {"to": to_mapping, "from": from_mapping}
@pytest.mark.skip("N/A")
def test_fromformat_subclass_partial_info_mapping(self):
"""This test does not apply to the direct functions."""
|
83b48d4df6f8ca6ec8a59443ea6b00afc4dfe81319af9ba74bcec0a5479068ae | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.cosmology import Cosmology
from astropy.cosmology._io.table import from_table, to_table
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.table import QTable, Table, vstack
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromTableTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.table"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_table_bad_index(self, from_format, to_format):
"""Test if argument ``index`` is incorrect"""
tbl = to_format("astropy.table")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
from_format(tbl, index=2, format="astropy.table")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
from_format(tbl, index="row 0", format="astropy.table")
# -----------------------
def test_to_table_failed_cls(self, to_format):
"""Test failed table type."""
with pytest.raises(TypeError, match="'cls' must be"):
to_format("astropy.table", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_table_cls(self, to_format, tbl_cls):
tbl = to_format("astropy.table", cls=tbl_cls)
assert isinstance(tbl, tbl_cls) # test type
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_table_in_meta(self, cosmo_cls, to_format, in_meta):
"""Test where the cosmology class is placed."""
tbl = to_format("astropy.table", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_to_table(self, cosmo_cls, cosmo, to_format):
"""Test cosmology -> astropy.table."""
tbl = to_format("astropy.table")
# Test properties of Table.
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
assert tbl.indices # indexed
# Test each Parameter column has expected information.
for n in cosmo.__parameters__:
P = getattr(cosmo_cls, n) # Parameter
col = tbl[n] # Column
# Compare the two
assert col.info.name == P.name
assert col.info.description == P.__doc__
assert col.info.meta == (cosmo.meta.get(n) or {})
# -----------------------
def test_from_not_table(self, cosmo, from_format):
"""Test not passing a Table to the Table parser."""
with pytest.raises((TypeError, ValueError)):
from_format("NOT A TABLE", format="astropy.table")
def test_tofrom_table_instance(self, cosmo_cls, cosmo, from_format, to_format):
"""Test cosmology -> astropy.table -> cosmology."""
tbl = to_format("astropy.table")
# add information
tbl["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(tbl, format="astropy.table")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(tbl, format="astropy.table")
# unless mismatched are moved to meta
got = from_format(tbl, format="astropy.table", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = from_format(tbl, format="astropy.table")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(tbl)
assert got == cosmo
def test_fromformat_table_subclass_partial_info(
self, cosmo_cls, cosmo, from_format, to_format
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
# test to_format
tbl = to_format("astropy.table")
assert isinstance(tbl, QTable)
# partial information
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.from_format(tbl, format="astropy.table")
got2 = from_format(tbl, format="astropy.table", cosmology=cosmo_cls)
got3 = from_format(
tbl, format="astropy.table", cosmology=cosmo_cls.__qualname__
)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
@pytest.mark.parametrize("add_index", [True, False])
def test_tofrom_table_mutlirow(self, cosmo_cls, cosmo, from_format, add_index):
"""Test if table has multiple rows."""
# ------------
# To Table
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
assert isinstance(tbl, QTable)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl[1]["name"] == cosmo.name
# whether to add an index. `from_format` can work with or without.
if add_index:
tbl.add_index("name", unique=True)
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
from_format(tbl, format="astropy.table")
# unless the index argument is provided
got = from_format(tbl, index=1, format="astropy.table")
assert got == cosmo
# the index can be a string
got = from_format(tbl, index=cosmo.name, format="astropy.table")
assert got == cosmo
# when there's more than one cosmology found
tbls = vstack([tbl, tbl], metadata_conflicts="silent")
with pytest.raises(ValueError, match="more than one"):
from_format(tbls, index=cosmo.name, format="astropy.table")
def test_tofrom_table_rename(self, cosmo, to_format, from_format):
"""Test renaming columns in row."""
rename = {"name": "cosmo_name"}
table = to_format("astropy.table", rename=rename)
assert "name" not in table.colnames
assert "cosmo_name" in table.colnames
# Error if just reading
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(table)
# Roundtrip
inv_rename = {v: k for k, v in rename.items()}
got = from_format(table, rename=inv_rename)
assert got == cosmo
def test_from_table_renamed_index_column(self, cosmo, to_format, from_format):
"""Test reading from a table with a renamed index column."""
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
tbl.rename_column("name", "cosmo_name")
inv_rename = {"cosmo_name": "name"}
newcosmo = from_format(
tbl, index="row 0", rename=inv_rename, format="astropy.table"
)
assert newcosmo == cosmo1
@pytest.mark.parametrize("format", [True, False, None, "astropy.table"])
def test_is_equivalent_to_table(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a |Table|.
"""
obj = to_format("astropy.table")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
class TestToFromTable(ToFromDirectTestBase, ToFromTableTestMixin):
"""Directly test ``to/from_table``."""
def setup_class(self):
self.functions = {"to": to_table, "from": from_table}
|
f281429e74c8ce8affb7811dc55ab29b19ce003533a0231e679feec5860aed03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from astropy.cosmology import Cosmology, FlatLambdaCDM, Planck18
from astropy.cosmology import units as cu
from astropy.cosmology._io.yaml import (
from_yaml,
to_yaml,
yaml_constructor,
yaml_representer,
)
from astropy.io.misc.yaml import AstropyDumper, dump, load
from .base import ToFromDirectTestBase, ToFromTestMixinBase
##############################################################################
# Test Serializer
def test_yaml_representer():
"""Test :func:`~astropy.cosmology._io.yaml.yaml_representer`."""
# test function `representer`
representer = yaml_representer("!astropy.cosmology.flrw.LambdaCDM")
assert callable(representer)
# test the normal method of dumping to YAML
yml = dump(Planck18)
assert isinstance(yml, str)
assert yml.startswith("!astropy.cosmology.flrw.FlatLambdaCDM")
def test_yaml_constructor():
"""Test :func:`~astropy.cosmology._io.yaml.yaml_constructor`."""
# test function `constructor`
constructor = yaml_constructor(FlatLambdaCDM)
assert callable(constructor)
# it's too hard to manually construct a node, so we only test dump/load
# this is also a good round-trip test
yml = dump(Planck18)
with u.add_enabled_units(cu): # needed for redshift units
cosmo = load(yml)
assert isinstance(cosmo, FlatLambdaCDM)
assert cosmo == Planck18
assert cosmo.meta == Planck18.meta
##############################################################################
# Test Unified I/O
class ToFromYAMLTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="yaml"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture
def xfail_if_not_registered_with_yaml(self, cosmo_cls):
"""
YAML I/O only works on registered classes. So the thing to check is
if this class is registered. If not, :func:`pytest.xfail` this test.
Some of the tests define custom cosmologies. They are not registered.
"""
if cosmo_cls not in AstropyDumper.yaml_representers:
pytest.xfail(
f"Cosmologies of type {cosmo_cls} are not registered with YAML."
)
# ===============================================================
def test_to_yaml(self, cosmo_cls, to_format, xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML."""
yml = to_format("yaml")
assert isinstance(yml, str) # test type
assert yml.startswith("!" + ".".join(cosmo_cls.__module__.split(".")[:2]))
# e.g. "astropy.cosmology" for built-in cosmologies, or "__main__" for the test
# SubCosmology class defined in ``astropy.cosmology.tests.test_core``.
def test_from_yaml_default(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""Test cosmology -> YAML -> cosmology."""
yml = to_format("yaml")
got = from_format(yml, format="yaml") # (cannot autoidentify)
assert got.name == cosmo.name
assert got.meta == cosmo.meta
# it won't error if everything matches up
got = from_format(yml, format="yaml")
assert got == cosmo
assert got.meta == cosmo.meta
# auto-identify test moved because it doesn't work.
# see test_from_yaml_autoidentify
def test_from_yaml_autoidentify(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""As a non-path string, it does NOT auto-identifies 'format'.
TODO! this says there should be different types of I/O registries.
not just hacking object conversion on top of file I/O.
"""
assert self.can_autodentify("yaml") is False
# Showing the specific error. The str is interpreted as a file location
# but is too long a file name.
yml = to_format("yaml")
with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows
from_format(yml)
# # TODO! this is a challenging test to write. It's also unlikely to happen.
# def test_fromformat_subclass_partial_info_yaml(self, cosmo):
# """
# Test writing from an instance and reading from that class.
# This works with missing information.
# """
# -----------------------------------------------------
@pytest.mark.parametrize("format", [True, False, None])
def test_is_equivalent_to_yaml(
self, cosmo, to_format, format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a YAML string. YAML can't be identified without "format" specified.
"""
obj = to_format("yaml")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is False
def test_is_equivalent_to_yaml_specify_format(
self, cosmo, to_format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``.
"""
assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True
class TestToFromYAML(ToFromDirectTestBase, ToFromYAMLTestMixin):
"""
Directly test ``to/from_yaml``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="yaml")``, but should be tested
regardless b/c 3rd party packages might use these in their Cosmology I/O.
Also, it's cheap to test.
"""
def setup_class(self):
"""Set up fixtures to use ``to/from_yaml``, not the I/O abstractions."""
self.functions = {"to": to_yaml, "from": from_yaml}
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""
Setup and teardown for tests.
This overrides from super because `ToFromDirectTestBase` adds a custom
Cosmology ``CosmologyWithKwargs`` that is not registered with YAML.
"""
yield # run tests
def test_from_yaml_autoidentify(self, cosmo, to_format, from_format):
"""
If directly calling the function there's no auto-identification.
So this overrides the test from `ToFromYAMLTestMixin`
"""
|
b8d7b24ae5de68e9ebcf0c175dd284ef0133d4a214f662fbd39b6da0a538cdf2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.cosmology._io.latex import _FORMAT_TABLE, write_latex
from astropy.io.registry.base import IORegistryError
from astropy.table import QTable, Table
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
class WriteLATEXTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Write] with ``format="latex"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
def test_to_latex_failed_cls(self, write, tmp_path, format):
"""Test failed table type."""
fp = tmp_path / "test_to_latex_failed_cls.tex"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format=format, cls=list)
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_latex_cls(self, write, tbl_cls, tmp_path, format):
fp = tmp_path / "test_to_latex_cls.tex"
write(fp, format=format, cls=tbl_cls)
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
def test_latex_columns(self, write, tmp_path, format):
fp = tmp_path / "test_rename_latex_columns.tex"
write(fp, format=format, latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
def test_write_latex_invalid_path(self, write, format):
"""Test passing an invalid path"""
invalid_fp = ""
with pytest.raises(FileNotFoundError, match="No such file or directory"):
write(invalid_fp, format=format)
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
def test_write_latex_false_overwrite(self, write, tmp_path, format):
"""Test to write a LaTeX file without overwriting an existing file"""
# Test that passing an invalid path to write_latex() raises a IOError
fp = tmp_path / "test_write_latex_false_overwrite.tex"
write(fp, format="latex")
with pytest.raises(OSError, match="overwrite=True"):
write(fp, format=format, overwrite=False)
def test_write_latex_unsupported_format(self, write, tmp_path):
"""Test for unsupported format"""
fp = tmp_path / "test_write_latex_unsupported_format.tex"
invalid_format = "unsupported"
with pytest.raises((ValueError, IORegistryError)) as exc_info:
pytest.raises(ValueError, match="format must be 'latex' or 'ascii.latex'")
pytest.raises(IORegistryError, match="No writer defined for format")
write(fp, format=invalid_format)
class TestReadWriteLaTex(ReadWriteDirectTestBase, WriteLATEXTestMixin):
"""
Directly test ``write_latex``.
These are not public API and are discouraged from use, in favor of
``Cosmology.write(..., format="latex")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"write": write_latex}
@pytest.mark.parametrize("format", ["latex", "ascii.latex"])
def test_rename_direct_latex_columns(self, write, tmp_path, format):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_latex_columns.tex"
write(fp, format=format, latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
for column_name in tbl.colnames[2:]:
# for now, Cosmology as metadata and name is stored in first 2 slots
assert column_name in _FORMAT_TABLE.values()
|
cc536c5a98aa16ab47c288fabcfd07fe18e561c88648e92c75236e9a03cecc5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from astropy.cosmology._io.html import _FORMAT_TABLE, read_html_table, write_html_table
from astropy.cosmology.parameter import Parameter
from astropy.table import QTable, Table, vstack
from astropy.units.decorators import NoneType
from astropy.utils.compat.optional_deps import HAS_BS4
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteHTMLTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.html"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_html_table_bad_index.html"
write(fp, format="ascii.html")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.html")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.html")
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_html_table_failed_cls.html"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.html", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_to_html_table_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_html_table_cls.html"
write(fp, format="ascii.html", cls=tbl_cls)
# -----------------------
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_table_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.html -> cosmology."""
fp = tmp_path / "test_readwrite_html_table_instance.html"
# ------------
# To Table
write(fp, format="ascii.html")
# some checks on the saved file
tbl = QTable.read(fp)
# assert tbl.meta["cosmology"] == cosmo_cls.__qualname__ # metadata read not implemented
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.html", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.html")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
# assert "mismatching" not in got.meta # metadata read not implemented
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.html")
# unless mismatched are moved to meta
got = read(fp, format="ascii.html", move_to_meta=True)
assert got == cosmo
# assert got.meta["mismatching"] == "will error" # metadata read not implemented
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.html", overwrite=True)
got = read(fp, format="ascii.html")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
# tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]] #
# metadata read not implemented
got = read(fp, format="ascii.html")
assert got == cosmo
got = read(fp)
assert got == cosmo
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
# For now, Cosmology class and name are stored in first 2 slots
for column_name in tbl.colnames[2:]:
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
# cosmology name is still stored in first slot
for column_name in converted_tbl.colnames[1:]:
assert column_name in _FORMAT_TABLE.keys()
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
@pytest.mark.parametrize("latex_names", [True, False])
def test_readwrite_html_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, latex_names, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_html_subclass_partial_info.html"
# test write
write(fp, format="ascii.html", latex_names=latex_names)
# partial information
tbl = QTable.read(fp)
# tbl.meta.pop("cosmology", None) # metadata not implemented
cname = "$$T_{0}$$" if latex_names else "Tcmb0"
del tbl[cname] # format is not converted to original units
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.html")
got2 = read(fp, format="ascii.html", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.html", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
# assert got.meta == cosmo.meta # metadata read not implemented
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_readwrite_html_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_html_mutlirow.html"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
table = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
cosmo_cls = type(cosmo)
if cosmo_cls == NoneType:
assert False
for n, col in zip(table.colnames, table.itercols()):
if n == "cosmology":
continue
param = getattr(cosmo_cls, n)
if not isinstance(param, Parameter) or param.unit in (None, u.one):
continue
# Replace column with unitless version
table.replace_column(n, (col << param.unit).value, copy=False)
table.write(fp, format="ascii.html")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.html")
# unless the index argument is provided
got = cosmo_cls.read(fp, index=1, format="ascii.html")
# got = read(fp, index=1, format="ascii.html")
assert got == cosmo
# the index can be a string
got = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
table.add_index("name")
got2 = cosmo_cls.read(fp, index=cosmo.name, format="ascii.html")
assert got2 == cosmo
class TestReadWriteHTML(ReadWriteDirectTestBase, ReadWriteHTMLTestMixin):
"""
Directly test ``read/write_html``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.html")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_html_table, "write": write_html_table}
@pytest.mark.skipif(not HAS_BS4, reason="requires beautifulsoup4")
def test_rename_direct_html_table_columns(self, read, write, tmp_path):
"""Tests renaming columns"""
fp = tmp_path / "test_rename_html_table_columns.html"
write(fp, format="ascii.html", latex_names=True)
tbl = QTable.read(fp)
# asserts each column name has not been reverted yet
for column_name in tbl.colnames[2:]:
# for now, Cosmology as metadata and name is stored in first 2 slots
assert column_name in _FORMAT_TABLE.values()
cosmo = read(fp, format="ascii.html")
converted_tbl = cosmo.to_format("astropy.table")
# asserts each column name has been reverted
for column_name in converted_tbl.colnames[1:]:
# for now now, metadata is still stored in first slot
assert column_name in _FORMAT_TABLE.keys()
|
2bc897f417d830bfc25e38da3aa3c01c18f18da66e8dfd2eea595bc5fa33222b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.cosmology._io.ecsv import read_ecsv, write_ecsv
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.table import QTable, Table, vstack
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
class ReadWriteECSVTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="ascii.ecsv"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_ecsv_bad_index(self, read, write, tmp_path):
"""Test if argument ``index`` is incorrect"""
fp = tmp_path / "test_to_ecsv_bad_index.ecsv"
write(fp, format="ascii.ecsv")
# single-row table and has a non-0/None index
with pytest.raises(IndexError, match="index 2 out of range"):
read(fp, index=2, format="ascii.ecsv")
# string index where doesn't match
with pytest.raises(KeyError, match="No matches found for key"):
read(fp, index="row 0", format="ascii.ecsv")
# -----------------------
def test_to_ecsv_failed_cls(self, write, tmp_path):
"""Test failed table type."""
fp = tmp_path / "test_to_ecsv_failed_cls.ecsv"
with pytest.raises(TypeError, match="'cls' must be"):
write(fp, format="ascii.ecsv", cls=list)
@pytest.mark.parametrize("tbl_cls", [QTable, Table])
def test_to_ecsv_cls(self, write, tbl_cls, tmp_path):
fp = tmp_path / "test_to_ecsv_cls.ecsv"
write(fp, format="ascii.ecsv", cls=tbl_cls)
# -----------------------
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_ecsv_in_meta(self, cosmo_cls, write, in_meta, tmp_path, add_cu):
"""Test where the cosmology class is placed."""
fp = tmp_path / "test_to_ecsv_in_meta.ecsv"
write(fp, format="ascii.ecsv", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
tbl = QTable.read(fp)
if in_meta:
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.colnames # not also a column
else:
assert tbl["cosmology"][0] == cosmo_cls.__qualname__
assert "cosmology" not in tbl.meta
# -----------------------
def test_readwrite_ecsv_instance(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test cosmology -> ascii.ecsv -> cosmology."""
fp = tmp_path / "test_readwrite_ecsv_instance.ecsv"
# ------------
# To Table
write(fp, format="ascii.ecsv")
# some checks on the saved file
tbl = QTable.read(fp)
assert tbl.meta["cosmology"] == cosmo_cls.__qualname__
assert tbl["name"] == cosmo.name
# ------------
# From Table
tbl["mismatching"] = "will error"
tbl.write(fp, format="ascii.ecsv", overwrite=True)
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = read(fp, format="ascii.ecsv")
assert got.__class__ is cosmo_cls
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
read(fp, format="ascii.ecsv")
# unless mismatched are moved to meta
got = read(fp, format="ascii.ecsv", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
tbl.remove_column("mismatching")
tbl.write(fp, format="ascii.ecsv", overwrite=True)
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``write``.
tbl.meta["cosmology"] = _COSMOLOGY_CLASSES[tbl.meta["cosmology"]]
got = read(fp, format="ascii.ecsv")
assert got == cosmo
# also it auto-identifies 'format'
got = read(fp)
assert got == cosmo
def test_readwrite_ecsv_renamed_columns(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""Test rename argument to read/write."""
fp = tmp_path / "test_readwrite_ecsv_rename.ecsv"
rename = {"name": "cosmo_name"}
write(fp, format="ascii.ecsv", rename=rename)
tbl = QTable.read(fp, format="ascii.ecsv")
assert "name" not in tbl.colnames
assert "cosmo_name" in tbl.colnames
# Errors if reading
with pytest.raises(
TypeError, match="there are unused parameters {'cosmo_name':"
):
read(fp)
# Roundtrips
inv_rename = {v: k for k, v in rename.items()}
got = read(fp, rename=inv_rename)
assert got == cosmo
def test_readwrite_ecsv_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_read_ecsv_subclass_partial_info.ecsv"
# test write
write(fp, format="ascii.ecsv")
# partial information
tbl = QTable.read(fp)
tbl.meta.pop("cosmology", None)
del tbl["Tcmb0"]
tbl.write(fp, overwrite=True)
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(fp, format="ascii.ecsv")
got2 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls)
got3 = read(fp, format="ascii.ecsv", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
def test_readwrite_ecsv_mutlirow(self, cosmo, read, write, tmp_path, add_cu):
"""Test if table has multiple rows."""
fp = tmp_path / "test_readwrite_ecsv_mutlirow.ecsv"
# Make
cosmo1 = cosmo.clone(name="row 0")
cosmo2 = cosmo.clone(name="row 2")
tbl = vstack(
[c.to_format("astropy.table") for c in (cosmo1, cosmo, cosmo2)],
metadata_conflicts="silent",
)
tbl.write(fp, format="ascii.ecsv")
# ------------
# From Table
# it will error on a multi-row table
with pytest.raises(ValueError, match="need to select a specific row"):
read(fp, format="ascii.ecsv")
# unless the index argument is provided
got = read(fp, index=1, format="ascii.ecsv")
assert got == cosmo
# the index can be a string
got = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got == cosmo
# it's better if the table already has an index
# this will be identical to the previous ``got``
tbl.add_index("name")
got2 = read(fp, index=cosmo.name, format="ascii.ecsv")
assert got2 == cosmo
class TestReadWriteECSV(ReadWriteDirectTestBase, ReadWriteECSVTestMixin):
"""
Directly test ``read/write_ecsv``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="ascii.ecsv")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_ecsv, "write": write_ecsv}
|
459439a184318110fee35c680b9ba1345dd8775febbecfa33855fe6af3dbc625 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import astropy.units as u
from astropy.cosmology import Cosmology, Parameter, realizations
from astropy.cosmology import units as cu
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.realizations import available
cosmo_instances = [getattr(realizations, name) for name in available]
##############################################################################
class IOTestBase:
"""Base class for Cosmology I/O tests.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
class ToFromTestMixinBase(IOTestBase):
"""Tests for a Cosmology[To/From]Format with some ``format``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using ``Cosmology.from_format()``."""
return Cosmology.from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology instance using ``.to_format()``."""
return cosmo.to_format
def can_autodentify(self, format):
"""Check whether a format can auto-identify."""
return format in Cosmology.from_format.registry._identifiers
class ReadWriteTestMixinBase(IOTestBase):
"""Tests for a Cosmology[Read/Write].
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class")
def read(self):
"""Read Cosmology instance using ``Cosmology.read()``."""
return Cosmology.read
@pytest.fixture(scope="class")
def write(self, cosmo):
"""Write Cosmology using ``.write()``."""
return cosmo.write
@pytest.fixture
def add_cu(self):
"""Add :mod:`astropy.cosmology.units` to the enabled units."""
# TODO! autoenable 'cu' if cosmology is imported?
with u.add_enabled_units(cu):
yield
##############################################################################
class IODirectTestBase(IOTestBase):
"""Directly test Cosmology I/O functions.
These functions are not public API and are discouraged from public use, in
favor of the I/O methods on |Cosmology|. They are tested b/c they are used
internally and because some tests for the methods on |Cosmology| don't need
to be run in the |Cosmology| class's large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
"""
@pytest.fixture(scope="class", autouse=True)
def setup(self):
"""Setup and teardown for tests."""
class CosmologyWithKwargs(Cosmology):
Tcmb0 = Parameter(unit=u.K)
def __init__(
self, Tcmb0=0, name="cosmology with kwargs", meta=None, **kwargs
):
super().__init__(name=name, meta=meta)
self._Tcmb0 = Tcmb0 << u.K
yield # run tests
# pop CosmologyWithKwargs from registered classes
# but don't error b/c it can fail in parallel
_COSMOLOGY_CLASSES.pop(CosmologyWithKwargs.__qualname__, None)
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
"""Cosmology instance."""
if isinstance(request.param, str): # CosmologyWithKwargs
return _COSMOLOGY_CLASSES[request.param](Tcmb0=3)
return request.param
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
"""Cosmology classes."""
return cosmo.__class__
class ToFromDirectTestBase(IODirectTestBase, ToFromTestMixinBase):
"""Directly test ``to/from_<format>``.
These functions are not public API and are discouraged from public use, in
favor of ``Cosmology.to/from_format(..., format="<format>")``. They are
tested because they are used internally and because some tests for the
methods on |Cosmology| don't need to be run in the |Cosmology| class's
large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
Subclasses should have an attribute ``functions`` which is a dictionary
containing two items: ``"to"=<function for to_format>`` and
``"from"=<function for from_format>``.
"""
@pytest.fixture(scope="class")
def from_format(self):
"""Convert to Cosmology using function ``from``."""
def use_from_format(*args, **kwargs):
kwargs.pop("format", None) # specific to Cosmology.from_format
return self.functions["from"](*args, **kwargs)
return use_from_format
@pytest.fixture(scope="class")
def to_format(self, cosmo):
"""Convert Cosmology to format using function ``to``."""
def use_to_format(*args, **kwargs):
return self.functions["to"](cosmo, *args, **kwargs)
return use_to_format
class ReadWriteDirectTestBase(IODirectTestBase, ToFromTestMixinBase):
"""Directly test ``read/write_<format>``.
These functions are not public API and are discouraged from public use, in
favor of ``Cosmology.read/write(..., format="<format>")``. They are tested
because they are used internally and because some tests for the
methods on |Cosmology| don't need to be run in the |Cosmology| class's
large test matrix.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass.
Subclasses should have an attribute ``functions`` which is a dictionary
containing two items: ``"read"=<function for read>`` and
``"write"=<function for write>``.
"""
@pytest.fixture(scope="class")
def read(self):
"""Read Cosmology from file using function ``read``."""
def use_read(*args, **kwargs):
kwargs.pop("format", None) # specific to Cosmology.from_format
return self.functions["read"](*args, **kwargs)
return use_read
@pytest.fixture(scope="class")
def write(self, cosmo):
"""Write Cosmology to file using function ``write``."""
def use_write(*args, **kwargs):
return self.functions["write"](cosmo, *args, **kwargs)
return use_write
|
5ac895ae936a317eb8e37d447da669cfc143d51ce64b05a212fa3ae73c1a3735 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.cosmology._io.row import from_row, to_row
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
from astropy.table import Row
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromRowTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.row"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("in_meta", [True, False])
def test_to_row_in_meta(self, cosmo_cls, cosmo, in_meta):
"""Test where the cosmology class is placed."""
row = cosmo.to_format("astropy.row", cosmology_in_meta=in_meta)
# if it's in metadata, it's not a column. And vice versa.
if in_meta:
assert row.meta["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.colnames # not also a column
else:
assert row["cosmology"] == cosmo_cls.__qualname__
assert "cosmology" not in row.meta
# -----------------------
def test_from_not_row(self, cosmo, from_format):
"""Test not passing a Row to the Row parser."""
with pytest.raises(AttributeError):
from_format("NOT A ROW", format="astropy.row")
def test_tofrom_row_instance(self, cosmo, to_format, from_format):
"""Test cosmology -> astropy.row -> cosmology."""
# ------------
# To Row
row = to_format("astropy.row")
assert isinstance(row, Row)
assert row["cosmology"] == cosmo.__class__.__qualname__
assert row["name"] == cosmo.name
# ------------
# From Row
row.table["mismatching"] = "will error"
# tests are different if the last argument is a **kwarg
if tuple(cosmo._init_signature.parameters.values())[-1].kind == 4:
got = from_format(row, format="astropy.row")
assert got.__class__ is cosmo.__class__
assert got.name == cosmo.name
assert "mismatching" not in got.meta
return # don't continue testing
# read with mismatching parameters errors
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(row, format="astropy.row")
# unless mismatched are moved to meta
got = from_format(row, format="astropy.row", move_to_meta=True)
assert got == cosmo
assert got.meta["mismatching"] == "will error"
# it won't error if everything matches up
row.table.remove_column("mismatching")
got = from_format(row, format="astropy.row")
assert got == cosmo
# and it will also work if the cosmology is a class
# Note this is not the default output of ``to_format``.
cosmology = _COSMOLOGY_CLASSES[row["cosmology"]]
row.table.remove_column("cosmology")
row.table["cosmology"] = cosmology
got = from_format(row, format="astropy.row")
assert got == cosmo
# also it auto-identifies 'format'
got = from_format(row)
assert got == cosmo
def test_tofrom_row_rename(self, cosmo, to_format, from_format):
"""Test renaming columns in row."""
rename = {"name": "cosmo_name"}
row = to_format("astropy.row", rename=rename)
assert "name" not in row.colnames
assert "cosmo_name" in row.colnames
# Error if just reading
with pytest.raises(TypeError, match="there are unused parameters"):
from_format(row)
# Roundtrip
inv_rename = {v: k for k, v in rename.items()}
got = from_format(row, rename=inv_rename)
assert got == cosmo
def test_fromformat_row_subclass_partial_info(self, cosmo):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
pass # there are no partial info options
@pytest.mark.parametrize("format", [True, False, None, "astropy.row"])
def test_is_equivalent_to_row(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a Row.
"""
obj = to_format("astropy.row")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (format is not False)
class TestToFromRow(ToFromDirectTestBase, ToFromRowTestMixin):
"""
Directly test ``to/from_row``.
These are not public API and are discouraged from use, in favor of
``Cosmology.to/from_format(..., format="astropy.row")``, but should be
tested regardless b/c 3rd party packages might use these in their Cosmology
I/O. Also, it's cheap to test.
"""
def setup_class(self):
self.functions = {"to": to_row, "from": from_row}
|
261e3a59d075548be7f74327afe909d8b9a4b22a5c1316dc5a2617985bbb7a41 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
import pytest
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename) as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise OSError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
class ReadWriteJSONTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="json"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class", autouse=True)
def register_and_unregister_json(self):
"""Setup & teardown for JSON read/write tests."""
# Register
readwrite_registry.register_reader("json", Cosmology, read_json, force=True)
readwrite_registry.register_writer("json", Cosmology, write_json, force=True)
readwrite_registry.register_identifier(
"json", Cosmology, json_identify, force=True
)
yield # Run all tests in class
# Unregister
readwrite_registry.unregister_reader("json", Cosmology)
readwrite_registry.unregister_writer("json", Cosmology)
readwrite_registry.unregister_identifier("json", Cosmology)
# ========================================================================
def test_readwrite_json_subclass_partial_info(
self, cosmo_cls, cosmo, read, write, tmp_path, add_cu
):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_readwrite_json_subclass_partial_info.json"
# test write
cosmo.write(fp, format="json")
# partial information
with open(fp) as file:
L = file.readlines()[0]
L = (
L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :]
) # remove cosmology : #203
i = L.index('"Tcmb0":') # delete Tcmb0
L = (
L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :]
) # second occurrence : #203
tempfname = tmp_path / f"{cosmo.name}_temp.json"
with open(tempfname, "w") as file:
file.writelines([L])
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(tempfname, format="json")
got2 = read(tempfname, format="json", cosmology=cosmo_cls)
got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
|
5824381f82bb617021489ae1c9242eadd13cfe00b0bbbf9afd356ec7e8543292 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.cosmology._io.cosmology import from_cosmology, to_cosmology
from .base import IODirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromCosmologyTestMixin(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="astropy.cosmology"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
def test_to_cosmology_default(self, cosmo, to_format):
"""Test cosmology -> cosmology."""
newcosmo = to_format("astropy.cosmology")
assert newcosmo is cosmo
def test_from_not_cosmology(self, cosmo, from_format):
"""Test incorrect type in ``Cosmology``."""
with pytest.raises(TypeError):
from_format("NOT A COSMOLOGY", format="astropy.cosmology")
def test_from_cosmology_default(self, cosmo, from_format):
"""Test cosmology -> cosmology."""
newcosmo = from_format(cosmo)
assert newcosmo is cosmo
@pytest.mark.parametrize("format", [True, False, None, "astropy.cosmology"])
def test_is_equivalent_to_cosmology(self, cosmo, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a Cosmology! Since it's the identity conversion, the cosmology is
always equivalent to itself, regardless of ``format``.
"""
obj = to_format("astropy.cosmology")
assert obj is cosmo
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is True # equivalent to self
class TestToFromCosmology(IODirectTestBase, ToFromCosmologyTestMixin):
"""Directly test ``to/from_cosmology``."""
def setup_class(self):
self.functions = {"to": to_cosmology, "from": from_cosmology}
|
253de22657422a1dc8d4b17c56d75f1df34cda2ff440de49c975d10783e8f8ec | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import glob
import ah_bootstrap
from setuptools import setup
from astropy_helpers.setup_helpers import (
register_commands, get_package_info, get_debug_option)
from astropy_helpers.distutils_helpers import is_distutils_display_option
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
import astropy
NAME = 'astropy'
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '3.1.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(NAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(NAME, VERSION, RELEASE, get_debug_option(NAME),
uses_git=not RELEASE)
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault('astropy', []).append('data/*')
# Add any necessary entry points
entry_points = {}
# Command-line scripts
entry_points['console_scripts'] = [
'fits2bitmap = astropy.visualization.scripts.fits2bitmap:main',
'fitscheck = astropy.io.fits.scripts.fitscheck:main',
'fitsdiff = astropy.io.fits.scripts.fitsdiff:main',
'fitsheader = astropy.io.fits.scripts.fitsheader:main',
'fitsinfo = astropy.io.fits.scripts.fitsinfo:main',
'samp_hub = astropy.samp.hub_script:hub_script',
'showtable = astropy.table.scripts.showtable:main',
'volint = astropy.io.votable.volint:main',
'wcslint = astropy.wcs.wcslint:main',
]
# Register ASDF extensions
entry_points['asdf_extensions'] = [
'astropy = astropy.io.misc.asdf.extension:AstropyExtension',
'astropy-asdf = astropy.io.misc.asdf.extension:AstropyAsdfExtension',
]
min_numpy_version = 'numpy>=' + astropy.__minimum_numpy_version__
setup_requires = [min_numpy_version]
# Make sure to have the packages needed for building astropy, but do not require them
# when installing from an sdist as the c files are included there.
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):
setup_requires.extend(['cython>=0.21', 'jinja2>=2.7'])
install_requires = [min_numpy_version]
extras_require = {
'test': ['pytest-astropy']
}
# Avoid installing setup_requires dependencies if the user just
# queries for information
if is_distutils_display_option():
setup_requires = []
setup(name=NAME,
version=VERSION,
description='Community-developed python astronomy tools',
requires=['numpy'], # scipy not required, but strongly recommended
setup_requires=setup_requires,
install_requires=install_requires,
extras_require=extras_require,
provides=[NAME],
author='The Astropy Developers',
author_email='[email protected]',
license='BSD',
url='http://astropy.org',
long_description=astropy.__doc__,
keywords=['astronomy', 'astrophysics', 'cosmology', 'space', 'science',
'units', 'table', 'wcs', 'samp', 'coordinate', 'fits',
'modeling', 'models', 'fitting', 'ascii'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics'
],
cmdclass=cmdclassd,
zip_safe=False,
entry_points=entry_points,
python_requires='>=' + astropy.__minimum_python_version__,
tests_require=['pytest-astropy'],
**package_info
)
|
2c630f38c06b25a303ca9298b0a3734ceedca96283a08633dc5ba741c8a7eae5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
pytest_plugins = [
'astropy.tests.plugins.config',
'astropy.tests.plugins.display',
]
|
32bd643ff3af1967c2109440da5f0f0e2fdb00cd09ef415fc9f7e7e4cb843638 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import imp
import io
import locale
import os
import re
import subprocess as sp
import sys
try:
from ConfigParser import ConfigParser, RawConfigParser
except ImportError:
from configparser import ConfigParser, RawConfigParser
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Some pre-setuptools checks to ensure that either distribute or setuptools >=
# 0.7 is used (over pre-distribute setuptools) if it is available on the path;
# otherwise the latest setuptools will be downloaded and bootstrapped with
# ``ez_setup.py``. This used to be included in a separate file called
# setuptools_bootstrap.py; but it was combined into ah_bootstrap.py
try:
import pkg_resources
_setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7')
# This may raise a DistributionNotFound in which case no version of
# setuptools or distribute is properly installed
_setuptools = pkg_resources.get_distribution('setuptools')
if _setuptools not in _setuptools_req:
# Older version of setuptools; check if we have distribute; again if
# this results in DistributionNotFound we want to give up
_distribute = pkg_resources.get_distribution('distribute')
if _setuptools != _distribute:
# It's possible on some pathological systems to have an old version
# of setuptools and distribute on sys.path simultaneously; make
# sure distribute is the one that's used
sys.path.insert(1, _distribute.location)
_distribute.activate()
imp.reload(pkg_resources)
except:
# There are several types of exceptions that can occur here; if all else
# fails bootstrap and use the bootstrapped version
from ez_setup import use_setuptools
use_setuptools()
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
# In case it didn't successfully import before the ez_setup checks
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
from setuptools.sandbox import run_setup
from distutils import log
from distutils.debug import DEBUG
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not os.path.exists('setup.cfg'):
return {}
cfg = ConfigParser()
try:
cfg.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
return {}
if not cfg.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not cfg.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = cfg.getboolean('ah_bootstrap', option)
else:
value = cfg.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
with _silence():
run_setup(os.path.join(path, 'setup.py'),
['egg_info'])
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
try:
if DEBUG:
_Distribution(attrs=attrs)
else:
with _silence():
_Distribution(attrs=attrs)
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
156a401bf6ed7df5fefd164e17e55083757da06016c37ea375d28243929e680a | #!/usr/bin/env python
"""
Setuptools bootstrapping installer.
Maintained at https://github.com/pypa/setuptools/tree/bootstrap.
Run this script to install or upgrade setuptools.
This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details.
"""
import os
import shutil
import sys
import tempfile
import zipfile
import optparse
import subprocess
import platform
import textwrap
import contextlib
from distutils import log
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
# 33.1.1 is the last version that supports setuptools self upgrade/installation.
DEFAULT_VERSION = "33.1.1"
DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/"
DEFAULT_SAVE_DIR = os.curdir
DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools"
MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.'
log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION))
def _python_cmd(*args):
"""
Execute a command.
Return True if the command succeeded.
"""
args = (sys.executable,) + args
return subprocess.call(args) == 0
def _install(archive_filename, install_args=()):
"""Install Setuptools."""
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
def _build_egg(egg, archive_filename, to_dir):
"""Build Setuptools egg."""
with archive_context(archive_filename):
# building an egg
log.warn('Building a Setuptools egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
class ContextualZipFile(zipfile.ZipFile):
"""Supplement ZipFile class to support context manager for Python 2.6."""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __new__(cls, *args, **kwargs):
"""Construct a ZipFile or ContextualZipFile as appropriate."""
if hasattr(zipfile.ZipFile, '__exit__'):
return zipfile.ZipFile(*args, **kwargs)
return super(ContextualZipFile, cls).__new__(cls)
@contextlib.contextmanager
def archive_context(filename):
"""
Unzip filename to a temporary directory, set to the cwd.
The unzipped target is cleaned up after.
"""
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
try:
with ContextualZipFile(filename) as archive:
archive.extractall()
except zipfile.BadZipfile as err:
if not err.args:
err.args = ('', )
err.args = err.args + (
MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename),
)
raise
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
yield
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _do_download(version, download_base, to_dir, download_delay):
"""Download Setuptools."""
py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys)
tp = 'setuptools-{version}-{py_desig}.egg'
egg = os.path.join(to_dir, tp.format(**locals()))
if not os.path.exists(egg):
archive = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, archive, to_dir)
sys.path.insert(0, egg)
# Remove previously-imported pkg_resources if present (see
# https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
if 'pkg_resources' in sys.modules:
_unload_pkg_resources()
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, download_delay=15):
"""
Ensure that a setuptools version is installed.
Return None. Raise SystemExit if the requested version
or later cannot be installed.
"""
to_dir = os.path.abspath(to_dir)
# prior to importing, capture the module state for
# representative modules.
rep_modules = 'pkg_resources', 'setuptools'
imported = set(sys.modules).intersection(rep_modules)
try:
import pkg_resources
pkg_resources.require("setuptools>=" + version)
# a suitable version is already installed
return
except ImportError:
# pkg_resources not available; setuptools is not installed; download
pass
except pkg_resources.DistributionNotFound:
# no version of setuptools was found; allow download
pass
except pkg_resources.VersionConflict as VC_err:
if imported:
_conflict_bail(VC_err, version)
# otherwise, unload pkg_resources to allow the downloaded version to
# take precedence.
del pkg_resources
_unload_pkg_resources()
return _do_download(version, download_base, to_dir, download_delay)
def _conflict_bail(VC_err, version):
"""
Setuptools was imported prior to invocation, so it is
unsafe to unload it. Bail out.
"""
conflict_tmpl = textwrap.dedent("""
The required version of setuptools (>={version}) is not available,
and can't be installed while this script is running. Please
install a more recent version first, using
'easy_install -U setuptools'.
(Currently using {VC_err.args[0]!r})
""")
msg = conflict_tmpl.format(**locals())
sys.stderr.write(msg)
sys.exit(2)
def _unload_pkg_resources():
sys.meta_path = [
importer
for importer in sys.meta_path
if importer.__class__.__module__ != 'pkg_resources.extern'
]
del_modules = [
name for name in sys.modules
if name.startswith('pkg_resources')
]
for mod_name in del_modules:
del sys.modules[mod_name]
def _clean_check(cmd, target):
"""
Run the command to download target.
If the command fails, clean up before re-raising the error.
"""
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
if os.access(target, os.F_OK):
os.unlink(target)
raise
def download_file_powershell(url, target):
"""
Download the file at url to target using Powershell.
Powershell will validate trust.
Raise an exception if the command cannot complete.
"""
target = os.path.abspath(target)
ps_cmd = (
"[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
"[System.Net.CredentialCache]::DefaultCredentials; "
'(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")'
% locals()
)
cmd = [
'powershell',
'-Command',
ps_cmd,
]
_clean_check(cmd, target)
def has_powershell():
"""Determine if Powershell is available."""
if platform.system() != 'Windows':
return False
cmd = ['powershell', '-Command', 'echo test']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_powershell.viable = has_powershell
def download_file_curl(url, target):
cmd = ['curl', url, '--location', '--silent', '--output', target]
_clean_check(cmd, target)
def has_curl():
cmd = ['curl', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_curl.viable = has_curl
def download_file_wget(url, target):
cmd = ['wget', url, '--quiet', '--output-document', target]
_clean_check(cmd, target)
def has_wget():
cmd = ['wget', '--version']
with open(os.path.devnull, 'wb') as devnull:
try:
subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
except Exception:
return False
return True
download_file_wget.viable = has_wget
def download_file_insecure(url, target):
"""Use Python to download the file, without connection authentication."""
src = urlopen(url)
try:
# Read all the data in one block.
data = src.read()
finally:
src.close()
# Write all the data in one block to avoid creating a partial file.
with open(target, "wb") as dst:
dst.write(data)
download_file_insecure.viable = lambda: True
def get_best_downloader():
downloaders = (
download_file_powershell,
download_file_curl,
download_file_wget,
download_file_insecure,
)
viable_downloaders = (dl for dl in downloaders if dl.viable())
return next(viable_downloaders, None)
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=DEFAULT_SAVE_DIR, delay=15,
downloader_factory=get_best_downloader):
"""
Download setuptools from a specified location and return its filename.
`version` should be a valid setuptools version number that is available
as an sdist for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
``downloader_factory`` should be a function taking no arguments and
returning a function for downloading a URL to a target.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
zip_name = "setuptools-%s.zip" % version
url = download_base + zip_name
saveto = os.path.join(to_dir, zip_name)
if not os.path.exists(saveto): # Avoid repeated downloads
log.warn("Downloading %s", url)
downloader = downloader_factory()
downloader(url, saveto)
return os.path.realpath(saveto)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the setuptools package.
Returns list of command line arguments.
"""
return ['--user'] if options.user_install else []
def _parse_args():
"""Parse the command line for options."""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the setuptools package')
parser.add_option(
'--insecure', dest='downloader_factory', action='store_const',
const=lambda: download_file_insecure, default=get_best_downloader,
help='Use internal, non-validating downloader'
)
parser.add_option(
'--version', help="Specify which version to download",
default=DEFAULT_VERSION,
)
parser.add_option(
'--to-dir',
help="Directory to save (and re-use) package",
default=DEFAULT_SAVE_DIR,
)
options, args = parser.parse_args()
# positional arguments are ignored
return options
def _download_args(options):
"""Return args for download_setuptools function from cmdline args."""
return dict(
version=options.version,
download_base=options.download_base,
downloader_factory=options.downloader_factory,
to_dir=options.to_dir,
)
def main():
"""Install or upgrade setuptools and EasyInstall."""
options = _parse_args()
archive = download_setuptools(**_download_args(options))
return _install(archive, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
|
7777fbe3bc80230eac159a10ec500e03393a34c160830773a78c56eed2918450 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
import sys
import os
from warnings import warn
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.10.0'
class UnsupportedPythonError(Exception):
pass
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
# this indicates whether or not we are in astropy's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
import builtins
# This will set the _ASTROPY_SETUP_ to True by default if
# we are running Astropy's setup.py
builtins._ASTROPY_SETUP_ = _is_astropy_setup()
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
if not _ASTROPY_SETUP_:
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION
def _get_bibtex():
import re
if os.path.exists('CITATION'):
with open('CITATION', 'r') as citation:
refs = re.findall(r'\{[^()]*\}', citation.read())
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
else:
return ''
__bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
if not _ASTROPY_SETUP_:
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
54b8d4374ca4303f36d4381fad642b2d0f9e26505f2efc0fc822f98a114cca58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
from importlib.util import find_spec
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
from astropy.tests.helper import enable_deprecations_as_exceptions
if find_spec('asdf') is not None:
pytest_plugins = ['asdf.tests.schema_tester']
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
try:
import matplotlib
except ImportError:
pass
else:
matplotlib.use('Agg')
PYTEST_HEADER_MODULES['Cython'] = 'cython'
|
8057d44bebca904660e99827f673f5fc8fed789bf066af0fe47426747c0975ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {'astropy': ['astropy.cfg']}
|
e8859df6fc847f09f0e2abedd5cbf04e0375b085311758daea4c3be927a192f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module defines a logging class based on the built-in logging module"""
import inspect
import os
import sys
import logging
import warnings
from contextlib import contextmanager
from . import config as _config
from . import conf as _conf
from .utils import find_current_module
from .utils.exceptions import AstropyWarning, AstropyUserWarning
__all__ = ['Conf', 'conf', 'log', 'AstropyLogger', 'LoggingError']
# import the logging levels from logging so that one can do:
# log.setLevel(log.DEBUG), for example
logging_levels = ['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL',
'FATAL', ]
for level in logging_levels:
globals()[level] = getattr(logging, level)
__all__ += logging_levels
# Initialize by calling _init_log()
log = None
class LoggingError(Exception):
"""
This exception is for various errors that occur in the astropy logger,
typically when activating or deactivating logger-related features.
"""
class _AstLogIPYExc(Exception):
"""
An exception that is used only as a placeholder to indicate to the
IPython exception-catching mechanism that the astropy
exception-capturing is activated. It should not actually be used as
an exception anywhere.
"""
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.logger`.
"""
log_level = _config.ConfigItem(
'INFO',
"Threshold for the logging messages. Logging "
"messages that are less severe than this level "
"will be ignored. The levels are ``'DEBUG'``, "
"``'INFO'``, ``'WARNING'``, ``'ERROR'``.")
log_warnings = _config.ConfigItem(
True,
"Whether to log `warnings.warn` calls.")
log_exceptions = _config.ConfigItem(
False,
"Whether to log exceptions before raising "
"them.")
log_to_file = _config.ConfigItem(
False,
"Whether to always log messages to a log "
"file.")
log_file_path = _config.ConfigItem(
'',
"The file to log messages to. When ``''``, "
"it defaults to a file ``'astropy.log'`` in "
"the astropy config directory.")
log_file_level = _config.ConfigItem(
'INFO',
"Threshold for logging messages to "
"`log_file_path`.")
log_file_format = _config.ConfigItem(
"%(asctime)r, "
"%(origin)r, %(levelname)r, %(message)r",
"Format for log file entries.")
conf = Conf()
def _init_log():
"""Initializes the Astropy log--in most circumstances this is called
automatically when importing astropy.
"""
global log
orig_logger_cls = logging.getLoggerClass()
logging.setLoggerClass(AstropyLogger)
try:
log = logging.getLogger('astropy')
log._set_defaults()
finally:
logging.setLoggerClass(orig_logger_cls)
return log
def _teardown_log():
"""Shut down exception and warning logging (if enabled) and clear all
Astropy loggers from the logging module's cache.
This involves poking some logging module internals, so much if it is 'at
your own risk' and is allowed to pass silently if any exceptions occur.
"""
global log
if log.exception_logging_enabled():
log.disable_exception_logging()
if log.warnings_logging_enabled():
log.disable_warnings_logging()
del log
# Now for the fun stuff...
try:
logging._acquireLock()
try:
loggerDict = logging.Logger.manager.loggerDict
for key in loggerDict.keys():
if key == 'astropy' or key.startswith('astropy.'):
del loggerDict[key]
finally:
logging._releaseLock()
except Exception:
pass
Logger = logging.getLoggerClass()
class AstropyLogger(Logger):
'''
This class is used to set up the Astropy logging.
The main functionality added by this class over the built-in
logging.Logger class is the ability to keep track of the origin of the
messages, the ability to enable logging of warnings.warn calls and
exceptions, and the addition of colorized output and context managers to
easily capture messages to a file or list.
'''
def makeRecord(self, name, level, pathname, lineno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
if extra is None:
extra = {}
if 'origin' not in extra:
current_module = find_current_module(1, finddiff=[True, 'logging'])
if current_module is not None:
extra['origin'] = current_module.__name__
else:
extra['origin'] = 'unknown'
return Logger.makeRecord(self, name, level, pathname, lineno, msg,
args, exc_info, func=func, extra=extra,
sinfo=sinfo)
_showwarning_orig = None
def _showwarning(self, *args, **kwargs):
# Bail out if we are not catching a warning from Astropy
if not isinstance(args[0], AstropyWarning):
return self._showwarning_orig(*args, **kwargs)
warning = args[0]
# Deliberately not using isinstance here: We want to display
# the class name only when it's not the default class,
# AstropyWarning. The name of subclasses of AstropyWarning should
# be displayed.
if type(warning) not in (AstropyWarning, AstropyUserWarning):
message = '{0}: {1}'.format(warning.__class__.__name__, args[0])
else:
message = str(args[0])
mod_path = args[2]
# Now that we have the module's path, we look through sys.modules to
# find the module object and thus the fully-package-specified module
# name. The module.__file__ is the original source file name.
mod_name = None
mod_path, ext = os.path.splitext(mod_path)
for name, mod in list(sys.modules.items()):
try:
# Believe it or not this can fail in some cases:
# https://github.com/astropy/astropy/issues/2671
path = os.path.splitext(getattr(mod, '__file__', ''))[0]
except Exception:
continue
if path == mod_path:
mod_name = mod.__name__
break
if mod_name is not None:
self.warning(message, extra={'origin': mod_name})
else:
self.warning(message)
def warnings_logging_enabled(self):
return self._showwarning_orig is not None
def enable_warnings_logging(self):
'''
Enable logging of warnings.warn() calls
Once called, any subsequent calls to ``warnings.warn()`` are
redirected to this logger and emitted with level ``WARN``. Note that
this replaces the output from ``warnings.warn``.
This can be disabled with ``disable_warnings_logging``.
'''
if self.warnings_logging_enabled():
raise LoggingError("Warnings logging has already been enabled")
self._showwarning_orig = warnings.showwarning
warnings.showwarning = self._showwarning
def disable_warnings_logging(self):
'''
Disable logging of warnings.warn() calls
Once called, any subsequent calls to ``warnings.warn()`` are no longer
redirected to this logger.
This can be re-enabled with ``enable_warnings_logging``.
'''
if not self.warnings_logging_enabled():
raise LoggingError("Warnings logging has not been enabled")
if warnings.showwarning != self._showwarning:
raise LoggingError("Cannot disable warnings logging: "
"warnings.showwarning was not set by this "
"logger, or has been overridden")
warnings.showwarning = self._showwarning_orig
self._showwarning_orig = None
_excepthook_orig = None
def _excepthook(self, etype, value, traceback):
if traceback is None:
mod = None
else:
tb = traceback
while tb.tb_next is not None:
tb = tb.tb_next
mod = inspect.getmodule(tb)
# include the the error type in the message.
if len(value.args) > 0:
message = '{0}: {1}'.format(etype.__name__, str(value))
else:
message = str(etype.__name__)
if mod is not None:
self.error(message, extra={'origin': mod.__name__})
else:
self.error(message)
self._excepthook_orig(etype, value, traceback)
def exception_logging_enabled(self):
'''
Determine if the exception-logging mechanism is enabled.
Returns
-------
exclog : bool
True if exception logging is on, False if not.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if ip is None:
return self._excepthook_orig is not None
else:
return _AstLogIPYExc in ip.custom_exceptions
def enable_exception_logging(self):
'''
Enable logging of exceptions
Once called, any uncaught exceptions will be emitted with level
``ERROR`` by this logger, before being raised.
This can be disabled with ``disable_exception_logging``.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if self.exception_logging_enabled():
raise LoggingError("Exception logging has already been enabled")
if ip is None:
# standard python interpreter
self._excepthook_orig = sys.excepthook
sys.excepthook = self._excepthook
else:
# IPython has its own way of dealing with excepthook
# We need to locally define the function here, because IPython
# actually makes this a member function of their own class
def ipy_exc_handler(ipyshell, etype, evalue, tb, tb_offset=None):
# First use our excepthook
self._excepthook(etype, evalue, tb)
# Now also do IPython's traceback
ipyshell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
# now register the function with IPython
# note that we include _AstLogIPYExc so `disable_exception_logging`
# knows that it's disabling the right thing
ip.set_custom_exc((BaseException, _AstLogIPYExc), ipy_exc_handler)
# and set self._excepthook_orig to a no-op
self._excepthook_orig = lambda etype, evalue, tb: None
def disable_exception_logging(self):
'''
Disable logging of exceptions
Once called, any uncaught exceptions will no longer be emitted by this
logger.
This can be re-enabled with ``enable_exception_logging``.
'''
try:
ip = get_ipython()
except NameError:
ip = None
if not self.exception_logging_enabled():
raise LoggingError("Exception logging has not been enabled")
if ip is None:
# standard python interpreter
if sys.excepthook != self._excepthook:
raise LoggingError("Cannot disable exception logging: "
"sys.excepthook was not set by this logger, "
"or has been overridden")
sys.excepthook = self._excepthook_orig
self._excepthook_orig = None
else:
# IPython has its own way of dealing with exceptions
ip.set_custom_exc(tuple(), None)
def enable_color(self):
'''
Enable colorized output
'''
_conf.use_color = True
def disable_color(self):
'''
Disable colorized output
'''
_conf.use_color = False
@contextmanager
def log_to_file(self, filename, filter_level=None, filter_origin=None):
'''
Context manager to temporarily log messages to a file.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
By default, the logger already outputs log messages to a file set in
the Astropy configuration file. Using this context manager does not
stop log messages from being output to that file, nor does it stop log
messages from being printed to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_file('myfile.log'):
# your code here
'''
fh = logging.FileHandler(filename)
if filter_level is not None:
fh.setLevel(filter_level)
if filter_origin is not None:
fh.addFilter(FilterOrigin(filter_origin))
f = logging.Formatter(conf.log_file_format)
fh.setFormatter(f)
self.addHandler(fh)
yield
fh.close()
self.removeHandler(fh)
@contextmanager
def log_to_list(self, filter_level=None, filter_origin=None):
'''
Context manager to temporarily log messages to a list.
Parameters
----------
filename : str
The file to log messages to.
filter_level : str
If set, any log messages less important than ``filter_level`` will
not be output to the file. Note that this is in addition to the
top-level filtering for the logger, so if the logger has level
'INFO', then setting ``filter_level`` to ``INFO`` or ``DEBUG``
will have no effect, since these messages are already filtered
out.
filter_origin : str
If set, only log messages with an origin starting with
``filter_origin`` will be output to the file.
Notes
-----
Using this context manager does not stop log messages from being
output to standard output.
Examples
--------
The context manager is used as::
with logger.log_to_list() as log_list:
# your code here
'''
lh = ListHandler()
if filter_level is not None:
lh.setLevel(filter_level)
if filter_origin is not None:
lh.addFilter(FilterOrigin(filter_origin))
self.addHandler(lh)
yield lh.log_list
self.removeHandler(lh)
def _set_defaults(self):
'''
Reset logger to its initial state
'''
# Reset any previously installed hooks
if self.warnings_logging_enabled():
self.disable_warnings_logging()
if self.exception_logging_enabled():
self.disable_exception_logging()
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(conf.log_level)
# Set up the stdout handler
sh = StreamHandler()
self.addHandler(sh)
# Set up the main log file handler if requested (but this might fail if
# configuration directory or log file is not writeable).
if conf.log_to_file:
log_file_path = conf.log_file_path
# "None" as a string because it comes from config
try:
_ASTROPY_TEST_
testing_mode = True
except NameError:
testing_mode = False
try:
if log_file_path == '' or testing_mode:
log_file_path = os.path.join(
_config.get_config_dir(), "astropy.log")
else:
log_file_path = os.path.expanduser(log_file_path)
fh = logging.FileHandler(log_file_path)
except OSError as e:
warnings.warn(
'log file {0!r} could not be opened for writing: '
'{1}'.format(log_file_path, str(e)), RuntimeWarning)
else:
formatter = logging.Formatter(conf.log_file_format)
fh.setFormatter(formatter)
fh.setLevel(conf.log_file_level)
self.addHandler(fh)
if conf.log_warnings:
self.enable_warnings_logging()
if conf.log_exceptions:
self.enable_exception_logging()
class StreamHandler(logging.StreamHandler):
"""
A specialized StreamHandler that logs INFO and DEBUG messages to
stdout, and all other messages to stderr. Also provides coloring
of the output, if enabled in the parent logger.
"""
def emit(self, record):
'''
The formatter for stderr
'''
if record.levelno <= logging.INFO:
stream = sys.stdout
else:
stream = sys.stderr
if record.levelno < logging.DEBUG or not _conf.use_color:
print(record.levelname, end='', file=stream)
else:
# Import utils.console only if necessary and at the latest because
# the import takes a significant time [#4649]
from .utils.console import color_print
if record.levelno < logging.INFO:
color_print(record.levelname, 'magenta', end='', file=stream)
elif record.levelno < logging.WARN:
color_print(record.levelname, 'green', end='', file=stream)
elif record.levelno < logging.ERROR:
color_print(record.levelname, 'brown', end='', file=stream)
else:
color_print(record.levelname, 'red', end='', file=stream)
record.message = "{0} [{1:s}]".format(record.msg, record.origin)
print(": " + record.message, file=stream)
class FilterOrigin:
'''A filter for the record origin'''
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
class ListHandler(logging.Handler):
'''A handler that can be used to capture the records in a list'''
def __init__(self, filter_level=None, filter_origin=None):
logging.Handler.__init__(self)
self.log_list = []
def emit(self, record):
self.log_list.append(record)
|
7b4869b5007e574121a52db58035059de1f6211e72b049914935739e18bbbbfc | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
from datetime import datetime
import os
ON_RTD = os.environ.get('READTHEDOCS') == 'True'
ON_TRAVIS = os.environ.get('TRAVIS') == 'true'
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
import os
import sys
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# If that doesn't work trying to import from astropy_helpers below will
# still blow up
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
import astropy
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/latest/', None)
intersphinx_mapping['ipython'] = ('http://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ----------------------------------------
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
# Don't import the module as "version" or it will override the
# "version" configuration parameter
from astropy import version as versionmod
edit_on_github_project = "astropy/astropy"
if versionmod.release:
edit_on_github_branch = "v{0}.{1}.x".format(
versionmod.major, versionmod.minor)
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
edit_on_github_skip_regex = '_.*|api/.*'
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
app.warn('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
linkcheck_anchors = False
|
b58a788365928d3a739f9df60cedc17eb8a48fe9e634dd89dd796c14cc3c09d0 | # -*- coding: utf-8 -*-
"""
========================
Title of Example
========================
This example <verb> <active tense> <does something>.
The example uses <packages> to <do something> and <other package> to <do other
thing>. Include links to referenced packages like this: `astropy.io.fits` to
show the astropy.io.fits or like this `~astropy.io.fits`to show just 'fits'
-------------------
*By: <names>*
*License: BSD*
-------------------
"""
##############################################################################
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
# uncomment if including figures:
# import matplotlib.pyplot as plt
# from astropy.visualization import astropy_mpl_style
# plt.style.use(astropy_mpl_style)
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to commend
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possible to continue the discussion with a new Python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw Python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Output of the script is captured:
print('Some output from Python')
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the Python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
|
4cc48754070010c8109d4cd80648a363617a4e1103a29743e3e6c6cb23ad12f6 | # -*- coding: utf-8 -*-
"""
========================================================================
Transforming positions and velocities to and from a Galactocentric frame
========================================================================
This document shows a few examples of how to use and customize the
`~astropy.coordinates.Galactocentric` frame to transform Heliocentric sky
positions, distance, proper motions, and radial velocities to a Galactocentric,
Cartesian frame, and the same in reverse.
The main configurable parameters of the `~astropy.coordinates.Galactocentric`
frame control the position and velocity of the solar system barycenter within
the Galaxy. These are specified by setting the ICRS coordinates of the
Galactic center, the distance to the Galactic center (the sun-galactic center
line is always assumed to be the x-axis of the Galactocentric frame), and the
Cartesian 3-velocity of the sun in the Galactocentric frame. We'll first
demonstrate how to customize these values, then show how to set the solar motion
instead by inputting the proper motion of Sgr A*.
Note that, for brevity, we may refer to the solar system barycenter as just "the
sun" in the examples below.
-------------------
*By: Adrian Price-Whelan*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the necessary astropy subpackages
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# Let's first define a barycentric coordinate and velocity in the ICRS frame.
# We'll use the data for the star HD 39881 from the `Simbad
# <simbad.harvard.edu/simbad/>`_ database:
c1 = coord.ICRS(ra=89.014303*u.degree, dec=13.924912*u.degree,
distance=(37.59*u.mas).to(u.pc, u.parallax()),
pm_ra_cosdec=372.72*u.mas/u.yr,
pm_dec=-483.69*u.mas/u.yr,
radial_velocity=0.37*u.km/u.s)
##############################################################################
# This is a high proper-motion star; suppose we'd like to transform its position
# and velocity to a Galactocentric frame to see if it has a large 3D velocity
# as well. To use the Astropy default solar position and motion parameters, we
# can simply do:
gc1 = c1.transform_to(coord.Galactocentric)
##############################################################################
# From here, we can access the components of the resulting
# `~astropy.coordinates.Galactocentric` instance to see the 3D Cartesian
# velocity components:
print(gc1.v_x, gc1.v_y, gc1.v_z)
##############################################################################
# The default parameters for the `~astropy.coordinates.Galactocentric` frame
# are detailed in the linked documentation, but we can modify the most commonly
# changes values using the keywords ``galcen_distance``, ``galcen_v_sun``, and
# ``z_sun`` which set the sun-Galactic center distance, the 3D velocity vector
# of the sun, and the height of the sun above the Galactic midplane,
# respectively. The velocity of the sun must be specified as a
# `~astropy.coordinates.CartesianDifferential` instance, as in the example
# below. Note that, as with the positions, the Galactocentric frame is a
# right-handed system - the x-axis is positive towards the Galactic center, so
# ``v_x`` is opposite of the Galactocentric radial velocity:
v_sun = coord.CartesianDifferential([11.1, 244, 7.25]*u.km/u.s)
gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc,
galcen_v_sun=v_sun,
z_sun=0*u.pc)
##############################################################################
# We can then transform to this frame instead, with our custom parameters:
gc2 = c1.transform_to(gc_frame)
print(gc2.v_x, gc2.v_y, gc2.v_z)
##############################################################################
# It's sometimes useful to specify the solar motion using the `proper motion
# of Sgr A* <https://arxiv.org/abs/astro-ph/0408107>`_ instead of Cartesian
# velocity components. With an assumed distance, we can convert proper motion
# components to Cartesian velocity components using `astropy.units`:
galcen_distance = 8*u.kpc
pm_gal_sgrA = [-6.379, -0.202] * u.mas/u.yr # from Reid & Brunthaler 2004
vy, vz = -(galcen_distance * pm_gal_sgrA).to(u.km/u.s, u.dimensionless_angles())
##############################################################################
# We still have to assume a line-of-sight velocity for the Galactic center,
# which we will again take to be 11 km/s:
vx = 11.1 * u.km/u.s
gc_frame2 = coord.Galactocentric(galcen_distance=galcen_distance,
galcen_v_sun=coord.CartesianDifferential(vx, vy, vz),
z_sun=0*u.pc)
gc3 = c1.transform_to(gc_frame2)
print(gc3.v_x, gc3.v_y, gc3.v_z)
##############################################################################
# The transformations also work in the opposite direction. This can be useful
# for transforming simulated or theoretical data to observable quantities. As
# an example, we'll generate 4 theoretical circular orbits at different
# Galactocentric radii with the same circular velocity, and transform them to
# Heliocentric coordinates:
ring_distances = np.arange(10, 25+1, 5) * u.kpc
circ_velocity = 220 * u.km/u.s
phi_grid = np.linspace(90, 270, 512) * u.degree # grid of azimuths
ring_rep = coord.CylindricalRepresentation(
rho=ring_distances[:,np.newaxis],
phi=phi_grid[np.newaxis],
z=np.zeros_like(ring_distances)[:,np.newaxis])
angular_velocity = (-circ_velocity / ring_distances).to(u.mas/u.yr,
u.dimensionless_angles())
ring_dif = coord.CylindricalDifferential(
d_rho=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s,
d_phi=angular_velocity[:,np.newaxis],
d_z=np.zeros(phi_grid.shape)[np.newaxis]*u.km/u.s
)
ring_rep = ring_rep.with_differentials(ring_dif)
gc_rings = coord.Galactocentric(ring_rep)
##############################################################################
# First, let's visualize the geometry in Galactocentric coordinates. Here are
# the positions and velocities of the rings; note that in the velocity plot,
# the velocities of the 4 rings are identical and thus overlaid under the same
# curve:
fig,axes = plt.subplots(1, 2, figsize=(12,6))
# Positions
axes[0].plot(gc_rings.x.T, gc_rings.y.T, marker='None', linewidth=3)
axes[0].text(-8., 0, r'$\odot$', fontsize=20)
axes[0].set_xlim(-30, 30)
axes[0].set_ylim(-30, 30)
axes[0].set_xlabel('$x$ [kpc]')
axes[0].set_ylabel('$y$ [kpc]')
# Velocities
axes[1].plot(gc_rings.v_x.T, gc_rings.v_y.T, marker='None', linewidth=3)
axes[1].set_xlim(-250, 250)
axes[1].set_ylim(-250, 250)
axes[1].set_xlabel('$v_x$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
axes[1].set_ylabel('$v_y$ [{0}]'.format((u.km/u.s).to_string("latex_inline")))
fig.tight_layout()
##############################################################################
# Now we can transform to Galactic coordinates and visualize the rings in
# observable coordinates:
gal_rings = gc_rings.transform_to(coord.Galactic)
fig,ax = plt.subplots(1, 1, figsize=(8,6))
for i in range(len(ring_distances)):
ax.plot(gal_rings[i].l.degree, gal_rings[i].pm_l_cosb.value,
label=str(ring_distances[i]), marker='None', linewidth=3)
ax.set_xlim(360, 0)
ax.set_xlabel('$l$ [deg]')
ax.set_ylabel(r'$\mu_l \, \cos b$ [{0}]'.format((u.mas/u.yr).to_string('latex_inline')))
ax.legend()
|
acf72c7d45b714149a27d722fc07da683b5f9d96dc19f47977057d7d25427204 | # -*- coding: utf-8 -*-
"""
===================================================================
Determining and plotting the altitude/azimuth of a celestial object
===================================================================
This example demonstrates coordinate transformations and the creation of
visibility curves to assist with observing run planning.
In this example, we make a `~astropy.coordinates.SkyCoord` instance for M33.
The altitude-azimuth coordinates are then found using
`astropy.coordinates.EarthLocation` and `astropy.time.Time` objects.
This example is meant to demonstrate the capabilities of the
`astropy.coordinates` package. For more convenient and/or complex observation
planning, consider the `astroplan <https://astroplan.readthedocs.org/>`_
package.
-------------------
*By: Erik Tollerud, Kelle Cruz*
*License: BSD*
-------------------
"""
##############################################################################
# Let's suppose you are planning to visit picturesque Bear Mountain State Park
# in New York, USA. You're bringing your telescope with you (of course), and
# someone told you M33 is a great target to observe there. You happen to know
# you're free at 11:00 pm local time, and you want to know if it will be up.
# Astropy can answer that.
#
# Make print work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for finding coordinates and making
# coordinate transformations
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
##############################################################################
# `astropy.coordinates.SkyCoord.from_name` uses Simbad to resolve object
# names and retrieve coordinates.
#
# Get the coordinates of M33:
m33 = SkyCoord.from_name('M33')
##############################################################################
# Use `astropy.coordinates.EarthLocation` to provide the location of Bear
# Mountain and set the time to 11pm EDT on 2012 July 12:
bear_mountain = EarthLocation(lat=41.3*u.deg, lon=-74*u.deg, height=390*u.m)
utcoffset = -4*u.hour # Eastern Daylight Time
time = Time('2012-7-12 23:00:00') - utcoffset
##############################################################################
# `astropy.coordinates.EarthLocation.get_site_names` and
# `~astropy.coordinates.EarthLocation.get_site_names` can be used to get
# locations of major observatories.
#
# Use `astropy.coordinates` to find the Alt, Az coordinates of M33 at as
# observed from Bear Mountain at 11pm on 2012 July 12.
m33altaz = m33.transform_to(AltAz(obstime=time,location=bear_mountain))
print("M33's Altitude = {0.alt:.2}".format(m33altaz))
##############################################################################
# This is helpful since it turns out M33 is barely above the horizon at this
# time. It's more informative to find M33's airmass over the course of
# the night.
#
# Find the alt,az coordinates of M33 at 100 times evenly spaced between 10pm
# and 7am EDT:
midnight = Time('2012-7-13 00:00:00') - utcoffset
delta_midnight = np.linspace(-2, 10, 100)*u.hour
frame_July13night = AltAz(obstime=midnight+delta_midnight,
location=bear_mountain)
m33altazs_July13night = m33.transform_to(frame_July13night)
##############################################################################
# convert alt, az to airmass with `~astropy.coordinates.AltAz.secz` attribute:
m33airmasss_July13night = m33altazs_July13night.secz
##############################################################################
# Plot the airmass as a function of time:
plt.plot(delta_midnight, m33airmasss_July13night)
plt.xlim(-2, 10)
plt.ylim(1, 4)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Airmass [Sec(z)]')
plt.show()
##############################################################################
# Use `~astropy.coordinates.get_sun` to find the location of the Sun at 1000
# evenly spaced times between noon on July 12 and noon on July 13:
from astropy.coordinates import get_sun
delta_midnight = np.linspace(-12, 12, 1000)*u.hour
times_July12_to_13 = midnight + delta_midnight
frame_July12_to_13 = AltAz(obstime=times_July12_to_13, location=bear_mountain)
sunaltazs_July12_to_13 = get_sun(times_July12_to_13).transform_to(frame_July12_to_13)
##############################################################################
# Do the same with `~astropy.coordinates.get_moon` to find when the moon is
# up. Be aware that this will need to download a 10MB file from the internet
# to get a precise location of the moon.
from astropy.coordinates import get_moon
moon_July12_to_13 = get_moon(times_July12_to_13)
moonaltazs_July12_to_13 = moon_July12_to_13.transform_to(frame_July12_to_13)
##############################################################################
# Find the alt,az coordinates of M33 at those same times:
m33altazs_July12_to_13 = m33.transform_to(frame_July12_to_13)
##############################################################################
# Make a beautiful figure illustrating nighttime and the altitudes of M33 and
# the Sun over that time:
plt.plot(delta_midnight, sunaltazs_July12_to_13.alt, color='r', label='Sun')
plt.plot(delta_midnight, moonaltazs_July12_to_13.alt, color=[0.75]*3, ls='--', label='Moon')
plt.scatter(delta_midnight, m33altazs_July12_to_13.alt,
c=m33altazs_July12_to_13.az, label='M33', lw=0, s=8,
cmap='viridis')
plt.fill_between(delta_midnight.to('hr').value, 0, 90,
sunaltazs_July12_to_13.alt < -0*u.deg, color='0.5', zorder=0)
plt.fill_between(delta_midnight.to('hr').value, 0, 90,
sunaltazs_July12_to_13.alt < -18*u.deg, color='k', zorder=0)
plt.colorbar().set_label('Azimuth [deg]')
plt.legend(loc='upper left')
plt.xlim(-12, 12)
plt.xticks(np.arange(13)*2 -12)
plt.ylim(0, 90)
plt.xlabel('Hours from EDT Midnight')
plt.ylabel('Altitude [deg]')
plt.show()
|
5e10cacac7b3876e1564e6a3008f5448ab228d0e8c1cfa21e89a276700ff877f | # -*- coding: utf-8 -*-
"""
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
-------------------
*By: Adrian Price-Whelan*
*License: BSD*
-------------------
"""
################################################################################
# Make print work the same in all versions of Python and import the required
# Astropy packages:
import astropy.units as u
import astropy.coordinates as coord
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <http://simbad.harvard.edu/simbad/>`_:
icrs = coord.ICRS(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s)
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric.galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity` (optional)
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric.galcen_v_sun.to_cartesian()
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
|
d9f332bf55abee762b182ceac1bb11e3c82cea9c833521d0be52d43c888650ca | # -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, wee need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
-------------------
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')],
coord.SphericalCosLatDifferential: [
coord.RepresentationMapping('d_lon_coslat', 'pm_Lambda_cosBeta'),
coord.RepresentationMapping('d_lat', 'pm_Beta'),
coord.RepresentationMapping('d_distance', 'radial_velocity')],
coord.SphericalDifferential: [
coord.RepresentationMapping('d_lon', 'pm_Lambda'),
coord.RepresentationMapping('d_lat', 'pm_Beta'),
coord.RepresentationMapping('d_distance', 'radial_velocity')]
}
frame_specific_representation_info[coord.UnitSphericalRepresentation] = \
frame_specific_representation_info[coord.SphericalRepresentation]
frame_specific_representation_info[coord.UnitSphericalCosLatDifferential] = \
frame_specific_representation_info[coord.SphericalCosLatDifferential]
frame_specific_representation_info[coord.UnitSphericalDifferential] = \
frame_specific_representation_info[coord.SphericalDifferential]
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-deteremined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
|
f02043221ece54730f468542cac47fbe467917e71c50da1c69c6667d82c7b190 | # -*- coding: utf-8 -*-
"""
=====================================================
Create a multi-extension FITS (MEF) file from scratch
=====================================================
This example demonstrates how to create a multi-extension FITS (MEF)
file from scratch using `astropy.io.fits`.
-------------------
*By: Erik Bray*
*License: BSD*
-------------------
"""
import os
##############################################################################
# HDUList objects are used to hold all the HDUs in a FITS file. This
# ``HDUList`` class is a subclass of Python's builtin `list`. and can be
# created from scratch. For example, to create a FITS file with
# three extensions:
from astropy.io import fits
new_hdul = fits.HDUList()
new_hdul.append(fits.ImageHDU())
new_hdul.append(fits.ImageHDU())
##############################################################################
# Write out the new file to disk:
new_hdul.writeto('test.fits')
##############################################################################
# Alternatively, the HDU instances can be created first (or read from an
# existing FITS file).
#
# Create a multi-extension FITS file with two empty IMAGE extensions (a
# default PRIMARY HDU is prepended automatically if one is not specified;
# we use ``overwrite=True`` to overwrite the file if it already exists):
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
new_hdul = fits.HDUList([hdu1, hdu2])
new_hdul.writeto('test.fits', overwrite=True)
##############################################################################
# Finally, we'll remove the file we created:
os.remove('test.fits')
|
6d5e36720c0cc9f7e44ed1440757538afabbe214ccafe1d09047e6ffebb75a3b | # -*- coding: utf-8 -*-
"""
==================
Edit a FITS header
==================
This example describes how to edit a value in a FITS header
using `astropy.io.fits`.
-------------------
*By: Adrian Price-Whelan*
*License: BSD*
-------------------
"""
from astropy.io import fits
##############################################################################
# Download a FITS file:
from astropy.utils.data import get_pkg_data_filename
fits_file = get_pkg_data_filename('tutorials/FITS-Header/input_file.fits')
##############################################################################
# Look at contents of the FITS file
fits.info(fits_file)
##############################################################################
# Look at the headers of the two extensions:
print("Before modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
##############################################################################
# `astropy.io.fits` provides an object-oriented interface for reading and
# interacting with FITS files, but for small operations (like this example) it
# is often easier to use the
# `convenience functions <http://docs.astropy.org/en/latest/io/fits/index.html#convenience-functions>`_.
#
# To edit a single header value in the header for extension 0, use the
# `~astropy.io.fits.setval()` function. For example, set the OBJECT keyword
# to 'M31':
fits.setval(fits_file, 'OBJECT', value='M31')
##############################################################################
# With no extra arguments, this will modify the header for extension 0, but
# this can be changed using the ``ext`` keyword argument. For example, we can
# specify extension 1 instead:
fits.setval(fits_file, 'OBJECT', value='M31', ext=1)
##############################################################################
# This can also be used to create a new keyword-value pair ("card" in FITS
# lingo):
fits.setval(fits_file, 'ANEWKEY', value='some value')
##############################################################################
# Again, this is useful for one-off modifications, but can be inefficient
# for operations like editing multiple headers in the same file
# because `~astropy.io.fits.setval()` loads the whole file each time it
# is called. To make several modifications, it's better to load the file once:
with fits.open(fits_file, 'update') as f:
for hdu in f:
hdu.header['OBJECT'] = 'CAT'
print("After modifications:")
print()
print("Extension 0:")
print(repr(fits.getheader(fits_file, 0)))
print()
print("Extension 1:")
print(repr(fits.getheader(fits_file, 1)))
|
fd7e56aed30b48c9be3e1c7d95eb537af8983306b2119be09b348a916e3068c3 | # -*- coding: utf-8 -*-
"""
=======================================
Read and plot an image from a FITS file
=======================================
This example opens an image stored in a FITS file and displays it to the screen.
This example uses `astropy.utils.data` to download the file, `astropy.io.fits` to open
the file, and `matplotlib.pyplot` to display the image.
-------------------
*By: Lia R. Corrales, Adrian Price-Whelan, Kelle Cruz*
*License: BSD*
-------------------
"""
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Download the example FITS files used by this example:
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
image_file = get_pkg_data_filename('tutorials/FITS-images/HorseHead.fits')
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
##############################################################################
# Display the image data:
plt.figure()
plt.imshow(image_data, cmap='gray')
plt.colorbar()
|
ee95409a1acf713ff32ea85cf3b7ee4c223479577cb7383eca499271b8a2b63f | # -*- coding: utf-8 -*-
"""
==========================================
Create a very large FITS file from scratch
==========================================
This example demonstrates how to create a large file (larger than will fit in
memory) from scratch using `astropy.io.fits`.
-------------------
*By: Erik Bray*
*License: BSD*
-------------------
"""
##############################################################################
# Normally to create a single image FITS file one would do something like:
import os
import numpy
from astropy.io import fits
data = numpy.zeros((40000, 40000), dtype=numpy.float64)
hdu = fits.PrimaryHDU(data=data)
##############################################################################
# Then use the `astropy.io.fits.writeto()` method to write out the new
# file to disk
hdu.writeto('large.fits')
##############################################################################
# However, a 40000 x 40000 array of doubles is nearly twelve gigabytes! Most
# systems won't be able to create that in memory just to write out to disk. In
# order to create such a large file efficiently requires a little extra work,
# and a few assumptions.
#
# First, it is helpful to anticipate about how large (as in, how many keywords)
# the header will have in it. FITS headers must be written in 2880 byte
# blocks, large enough for 36 keywords per block (including the END keyword in
# the final block). Typical headers have somewhere between 1 and 4 blocks,
# though sometimes more.
#
# Since the first thing we write to a FITS file is the header, we want to write
# enough header blocks so that there is plenty of padding in which to add new
# keywords without having to resize the whole file. Say you want the header to
# use 4 blocks by default. Then, excluding the END card which Astropy will add
# automatically, create the header and pad it out to 36 * 4 cards.
#
# Create a stub array to initialize the HDU; its
# exact size is irrelevant, as long as it has the desired number of
# dimensions
data = numpy.zeros((100, 100), dtype=numpy.float64)
hdu = fits.PrimaryHDU(data=data)
header = hdu.header
while len(header) < (36 * 4 - 1):
header.append() # Adds a blank card to the end
##############################################################################
# Now adjust the NAXISn keywords to the desired size of the array, and write
# only the header out to a file. Using the ``hdu.writeto()`` method will cause
# astropy to "helpfully" reset the NAXISn keywords to match the size of the
# dummy array. That is because it works hard to ensure that only valid FITS
# files are written. Instead, we can write just the header to a file using the
# `astropy.io.fits.Header.tofile` method:
header['NAXIS1'] = 40000
header['NAXIS2'] = 40000
header.tofile('large.fits')
##############################################################################
# Finally, grow out the end of the file to match the length of the
# data (plus the length of the header). This can be done very efficiently on
# most systems by seeking past the end of the file and writing a single byte,
# like so:
with open('large.fits', 'rb+') as fobj:
# Seek past the length of the header, plus the length of the
# Data we want to write.
# 8 is the number of bytes per value, i.e. abs(header['BITPIX'])/8
# (this example is assuming a 64-bit float)
# The -1 is to account for the final byte that we are about to
# write:
fobj.seek(len(header.tostring()) + (40000 * 40000 * 8) - 1)
fobj.write(b'\0')
##############################################################################
# More generally, this can be written:
shape = tuple(header['NAXIS{0}'.format(ii)] for ii in range(1, header['NAXIS']+1))
with open('large.fits', 'rb+') as fobj:
fobj.seek(len(header.tostring()) + (np.product(shape) * np.abs(header['BITPIX']//8)) - 1)
fobj.write(b'\0')
##############################################################################
# On modern operating systems this will cause the file (past the header) to be
# filled with zeros out to the ~12GB needed to hold a 40000 x 40000 image. On
# filesystems that support sparse file creation (most Linux filesystems, but not
# the HFS+ filesystem used by most Macs) this is a very fast, efficient
# operation. On other systems your mileage may vary.
#
# This isn't the only way to build up a large file, but probably one of the
# safest. This method can also be used to create large multi-extension FITS
# files, with a little care.
##############################################################################
# Finally, we'll remove the file we created:
os.remove('large.fits')
|
ea7456f9855eb9c4935d4065e10d34eb3a42f04ad2ed54e266c5ab12450c8714 | # -*- coding: utf-8 -*-
"""
=====================================================================
Accessing data stored as a table in a multi-extension FITS (MEF) file
=====================================================================
FITS files can often contain large amount of multi-dimensional data and
tables. This example opens a FITS file with information
from Chandra's HETG-S instrument.
The example uses `astropy.utils.data` to download multi-extension FITS (MEF)
file, `astropy.io.fits` to investigate the header, and
`astropy.table.Table` to explore the data.
-------------------
*By: Lia Corrales, Adrian Price-Whelan, and Kelle Cruz*
*License: BSD*
-------------------
"""
##############################################################################
# Use `astropy.utils.data` subpackage to download the FITS file used in this
# example. Also import `~astropy.table.Table` from the `astropy.table` subpackage
# and `astropy.io.fits`
from astropy.utils.data import get_pkg_data_filename
from astropy.table import Table
from astropy.io import fits
##############################################################################
# Download a FITS file
event_filename = get_pkg_data_filename('tutorials/FITS-tables/chandra_events.fits')
##############################################################################
# Display information about the contents of the FITS file.
fits.info(event_filename)
##############################################################################
# Extension 1, EVENTS, is a Table that contains information about each X-ray
# photon that hit Chandra's HETG-S detector.
#
# Use `~astropy.table.Table` to read the table
events = Table.read(event_filename, hdu=1)
##############################################################################
# Print the column names of the Events Table.
print(events.columns)
##############################################################################
# If a column contains unit information, it will have an associated
# `astropy.units` object.
print(events['energy'].unit)
##############################################################################
# Print the data stored in the Energy column.
print(events['energy'])
|
34d826133a5a8acd7d52504aaf8b66c5d8e2b62438758251e4314e6c10f1995d | # -*- coding: utf-8 -*-
"""
=====================================================
Convert a 3-color image (JPG) to separate FITS images
=====================================================
This example opens an RGB JPEG image and writes out each channel as a separate
FITS (image) file.
This example uses `pillow <http://python-pillow.org>`_ to read the image,
`matplotlib.pyplot` to display the image, and `astropy.io.fits` to save FITS files.
-------------------
*By: Erik Bray, Adrian Price-Whelan*
*License: BSD*
-------------------
"""
import numpy as np
from PIL import Image
from astropy.io import fits
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Load and display the original 3-color jpeg image:
image = Image.open('Hs-2009-14-a-web.jpg')
xsize, ysize = image.size
print("Image size: {} x {}".format(xsize, ysize))
plt.imshow(image)
##############################################################################
# Split the three channels (RGB) and get the data as Numpy arrays. The arrays
# are flattened, so they are 1-dimensional:
r, g, b = image.split()
r_data = np.array(r.getdata()) # data is now an array of length ysize*xsize
g_data = np.array(g.getdata())
b_data = np.array(b.getdata())
print(r_data.shape)
##############################################################################
# Reshape the image arrays to be 2-dimensional:
r_data = r_data.reshape(ysize, xsize)
g_data = g_data.reshape(ysize, xsize)
b_data = b_data.reshape(ysize, xsize)
##############################################################################
# Write out the channels as separate FITS images
red = fits.PrimaryHDU(data=r_data)
red.header['LATOBS'] = "32:11:56" # add spurious header info
red.header['LONGOBS'] = "110:56"
red.writeto('red.fits')
green = fits.PrimaryHDU(data=g_data)
green.header['LATOBS'] = "32:11:56"
green.header['LONGOBS'] = "110:56"
green.writeto('green.fits')
blue = fits.PrimaryHDU(data=b_data)
blue.header['LATOBS'] = "32:11:56"
blue.header['LONGOBS'] = "110:56"
blue.writeto('blue.fits')
##############################################################################
# Delete the files created
import os
os.remove('red.fits')
os.remove('green.fits')
os.remove('blue.fits')
|
e515fb4027e7cc5b3edee6ed7969b2adf9b6949336fb2d0a452a104ab747587e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import warnings
from ..utils.exceptions import AstropyUserWarning
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations
from a center value. If the data contains invalid values (NaNs or
infs), they are automatically masked before performing the sigma
clipping.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class.
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
See Also
--------
sigma_clip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, iters=5)
>>> filtered_data = sigclip(randvar)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, iters=None, cenfunc=mean)
>>> filtered_data = sigclip(randvar, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std):
self.sigma = sigma
self.sigma_lower = sigma_lower
self.sigma_upper = sigma_upper
self.iters = iters
self.cenfunc = cenfunc
self.stdfunc = stdfunc
def __repr__(self):
return ('SigmaClip(sigma={0}, sigma_lower={1}, sigma_upper={2}, '
'iters={3}, cenfunc={4}, stdfunc={5})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.iters, self.cenfunc, self.stdfunc))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'iters', 'cenfunc',
'stdfunc']
for attr in attrs:
lines.append(' {0}: {1}'.format(attr, getattr(self, attr)))
return '\n'.join(lines)
def _perform_clip(self, _filtered_data, axis=None):
"""
Perform sigma clip by comparing the data to the minimum and
maximum values (median + sig * standard deviation). Use
sigma_lower and sigma_upper to get the correct limits. Data
values less or greater than the minimum / maximum values
will have True set in the mask array.
"""
if _filtered_data.size == 0:
return _filtered_data
max_value = self.cenfunc(_filtered_data, axis=axis)
std = self.stdfunc(_filtered_data, axis=axis)
min_value = max_value - std * self.sigma_lower
max_value += std * self.sigma_upper
if axis is not None:
if axis != 0:
min_value = np.expand_dims(min_value, axis=axis)
max_value = np.expand_dims(max_value, axis=axis)
if max_value is np.ma.masked:
max_value = np.ma.MaskedArray(np.nan, mask=True)
min_value = np.ma.MaskedArray(np.nan, mask=True)
_filtered_data.mask |= _filtered_data > max_value
_filtered_data.mask |= _filtered_data < min_value
return _filtered_data
def __call__(self, data, axis=None, copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like
The data to be sigma clipped.
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``,
which are expected to return an array with the axis
dimension removed (like the numpy functions). If `None`,
clip over all axes. Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`,
the returned masked array data will contain the same array
as ``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where
the points rejected by the algorithm have been masked.
"""
if self.sigma_lower is None:
self.sigma_lower = self.sigma
if self.sigma_upper is None:
self.sigma_upper = self.sigma
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically masked.',
AstropyUserWarning)
filtered_data = np.ma.array(data, copy=copy)
if self.iters is None:
lastrej = filtered_data.count() + 1
while filtered_data.count() != lastrej:
lastrej = filtered_data.count()
self._perform_clip(filtered_data, axis=axis)
else:
for i in range(self.iters):
self._perform_clip(filtered_data, axis=axis)
# prevent filtered_data.mask = False (scalar) if no values are clipped
if filtered_data.mask.shape == ():
# make .mask shape match .data shape
filtered_data.mask = False
return filtered_data
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations from a
center value. If the data contains invalid values (NaNs or infs),
they are automatically masked before performing the sigma clipping.
For an object-oriented interface to sigma clipping, see
:func:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this function.
Parameters
----------
data : array-like
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`, the
returned masked array data will contain the same array as
``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where the
points rejected by the algorithm have been masked.
Notes
-----
1. The routine works by calculating::
deviation = data - cenfunc(data [,axis=int])
and then setting a mask for points outside the range::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
It will iterate a given number of times, or until no further
data are rejected.
2. Most numpy functions deal well with masked arrays, but if one
would like to have an array with just the good (or bad) values, one
can use::
good_only = filtered_data.data[~filtered_data.mask]
bad_only = filtered_data.data[filtered_data.mask]
However, for multidimensional data, this flattens the array,
which may not be what one wants (especially if filtering was
done along an axis).
See Also
--------
SigmaClip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, iters=5)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, iters=None,
... cenfunc=mean, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc)
return sigclip(data, axis=axis, copy=copy)
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, std_ddof=0,
axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use as the lower and upper
clipping limit. These limits are overridden by ``sigma_lower``
and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
sigma_upper : float, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
iters : int, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing) when calculating the statistics.
Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is zero.
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
data_clip = sigma_clip(data, sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc, axis=axis)
mean = np.ma.mean(data_clip, axis=axis)
median = np.ma.median(data_clip, axis=axis)
std = np.ma.std(data_clip, ddof=std_ddof, axis=axis)
if axis is None and np.ma.isMaskedArray(median):
# np.ma.median now always return a MaskedArray, even with one
# element. So for compatibility with previous versions of astropy,
# we keep taking the scalar value.
median = median.item()
return mean, median, std
|
e3e6eb66476d9d5040c0a4a432615db8e3acb985ad92129119ac911007683332 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Bayesian Blocks for Time Series Analysis
========================================
Dynamic programming algorithm for solving a piecewise-constant model for
various datasets. This is based on the algorithm presented in Scargle
et al 2012 [1]_. This code was ported from the astroML project [2]_.
Applications include:
- finding an optimal histogram with adaptive bin widths
- finding optimal segmentation of time series data
- detecting inflection points in the rate of event data
The primary interface to these routines is the :func:`bayesian_blocks`
function. This module provides fitness functions suitable for three types
of data:
- Irregularly-spaced event data via the :class:`Events` class
- Regularly-spaced event data via the :class:`RegularEvents` class
- Irregularly-spaced point measurements via the :class:`PointMeasures` class
For more fine-tuned control over the fitness functions used, it is possible
to define custom :class:`FitnessFunc` classes directly and use them with
the :func:`bayesian_blocks` routine.
One common application of the Bayesian Blocks algorithm is the determination
of optimal adaptive-width histogram bins. This uses the same fitness function
as for irregularly-spaced time series events. The easiest interface for
creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram`
function.
References
----------
.. [1] http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
.. [2] http://astroML.org/ https://github.com//astroML/astroML/
"""
import warnings
import numpy as np
from inspect import signature
from ..utils.exceptions import AstropyUserWarning
# TODO: implement other fitness functions from appendix B of Scargle 2012
__all__ = ['FitnessFunc', 'Events', 'RegularEvents', 'PointMeasures',
'bayesian_blocks']
def bayesian_blocks(t, x=None, sigma=None,
fitness='events', **kwargs):
r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2012 [1]_.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks
"""
FITNESS_DICT = {'events': Events,
'regular_events': RegularEvents,
'measures': PointMeasures}
fitness = FITNESS_DICT.get(fitness, fitness)
if type(fitness) is type and issubclass(fitness, FitnessFunc):
fitfunc = fitness(**kwargs)
elif isinstance(fitness, FitnessFunc):
fitfunc = fitness
else:
raise ValueError("fitness parameter not understood")
return fitfunc.fit(t, x, sigma)
class FitnessFunc:
"""Base class for bayesian blocks fitness functions
Derived classes should overload the following method:
``fitness(self, **kwargs)``:
Compute the fitness given a set of named arguments.
Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]``
(See [1]_ for details on the meaning of these parameters).
Additionally, other methods may be overloaded as well:
``__init__(self, **kwargs)``:
Initialize the fitness function with any parameters beyond the normal
``p0`` and ``gamma``.
``validate_input(self, t, x, sigma)``:
Enable specific checks of the input data (``t``, ``x``, ``sigma``)
to be performed prior to the fit.
``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly,
this function is called in order to define it before fitting. This may be
calculated from ``gamma``, ``p0``, or whatever method you choose.
``p0_prior(self, N)``:
Specify the form of the prior given the false-alarm probability ``p0``
(See [1]_ for details).
For examples of implemented fitness functions, see :class:`Events`,
:class:`RegularEvents`, and :class:`PointMeasures`.
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
self.p0 = p0
self.gamma = gamma
self.ncp_prior = ncp_prior
def validate_input(self, t, x=None, sigma=None):
"""Validate inputs to the model.
Parameters
----------
t : array_like
times of observations
x : array_like (optional)
values observed at each time
sigma : float or array_like (optional)
errors in values x
Returns
-------
t, x, sigma : array_like, float or None
validated and perhaps modified versions of inputs
"""
# validate array input
t = np.asarray(t, dtype=float)
if x is not None:
x = np.asarray(x)
if sigma is not None:
sigma = np.asarray(sigma)
# find unique values of t
t = np.array(t)
if t.ndim != 1:
raise ValueError("t must be a one-dimensional array")
unq_t, unq_ind, unq_inv = np.unique(t, return_index=True,
return_inverse=True)
# if x is not specified, x will be counts at each time
if x is None:
if sigma is not None:
raise ValueError("If sigma is specified, x must be specified")
else:
sigma = 1
if len(unq_t) == len(t):
x = np.ones_like(t)
else:
x = np.bincount(unq_inv)
t = unq_t
# if x is specified, then we need to simultaneously sort t and x
else:
# TODO: allow broadcasted x?
x = np.asarray(x)
if x.shape not in [(), (1,), (t.size,)]:
raise ValueError("x does not match shape of t")
x += np.zeros_like(t)
if len(unq_t) != len(t):
raise ValueError("Repeated values in t not supported when "
"x is specified")
t = unq_t
x = x[unq_ind]
# verify the given sigma value
if sigma is None:
sigma = 1
else:
sigma = np.asarray(sigma)
if sigma.shape not in [(), (1,), (t.size,)]:
raise ValueError('sigma does not match the shape of x')
return t, x, sigma
def fitness(self, **kwargs):
raise NotImplementedError()
def p0_prior(self, N):
"""
Empirical prior, parametrized by the false alarm probability ``p0``
See eq. 21 in Scargle (2012)
Note that there was an error in this equation in the original Scargle
paper (the "log" was missing). The following corrected form is taken
from https://arxiv.org/abs/1304.2818
"""
return 4 - np.log(73.53 * self.p0 * (N ** -0.478))
# the fitness_args property will return the list of arguments accepted by
# the method fitness(). This allows more efficient computation below.
@property
def _fitness_args(self):
return signature(self.fitness).parameters.keys()
def compute_ncp_prior(self, N):
"""
If ``ncp_prior`` is not explicitly defined, compute it from ``gamma``
or ``p0``.
"""
if self.ncp_prior is not None:
return self.ncp_prior
elif self.gamma is not None:
return -np.log(self.gamma)
elif self.p0 is not None:
return self.p0_prior(N)
else:
raise ValueError("``ncp_prior`` is not defined, and cannot compute "
"it as neither ``gamma`` nor ``p0`` is defined.")
def fit(self, t, x=None, sigma=None):
"""Fit the Bayesian Blocks model given the specified fitness function.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
Returns
-------
edges : ndarray
array containing the (M+1) edges defining the M optimal bins
"""
t, x, sigma = self.validate_input(t, x, sigma)
# compute values needed for computation, below
if 'a_k' in self._fitness_args:
ak_raw = np.ones_like(x) / sigma ** 2
if 'b_k' in self._fitness_args:
bk_raw = x / sigma ** 2
if 'c_k' in self._fitness_args:
ck_raw = x * x / sigma ** 2
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays to store the best configuration
N = len(t)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Compute ncp_prior if not defined
if self.ncp_prior is None:
ncp_prior = self.compute_ncp_prior(N)
# ----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# ----------------------------------------------------------------
for R in range(N):
# Compute fit_vec : fitness of putative last block (end at R)
kwds = {}
# T_k: width/duration of each block
if 'T_k' in self._fitness_args:
kwds['T_k'] = block_length[:R + 1] - block_length[R + 1]
# N_k: number of elements in each block
if 'N_k' in self._fitness_args:
kwds['N_k'] = np.cumsum(x[:R + 1][::-1])[::-1]
# a_k: eq. 31
if 'a_k' in self._fitness_args:
kwds['a_k'] = 0.5 * np.cumsum(ak_raw[:R + 1][::-1])[::-1]
# b_k: eq. 32
if 'b_k' in self._fitness_args:
kwds['b_k'] = - np.cumsum(bk_raw[:R + 1][::-1])[::-1]
# c_k: eq. 33
if 'c_k' in self._fitness_args:
kwds['c_k'] = 0.5 * np.cumsum(ck_raw[:R + 1][::-1])[::-1]
# evaluate fitness function
fit_vec = self.fitness(**kwds)
A_R = fit_vec - ncp_prior
A_R[1:] += best[:R]
i_max = np.argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
# ----------------------------------------------------------------
# Now find changepoints by iteratively peeling off the last block
# ----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
class Events(FitnessFunc):
r"""Bayesian blocks fitness for binned or unbinned events
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on
:math:`N_{\rm blocks}` (see eq. 21 of Scargle 2012). For the Events
type data, ``p0`` does not seem to be an accurate representation of the
actual false alarm probability. If you are using this fitness function
for a triggering type condition, it is recommended that you run
statistical trials on signal-free noise to determine an appropriate
value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm
rate.
gamma : float (optional)
If specified, then use this gamma to compute the general prior form,
:math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0
is ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`.
If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
if p0 is not None and gamma is None and ncp_prior is None:
warnings.warn('p0 does not seem to accurately represent the false '
'positive rate for event data. It is highly '
'recommended that you run random trials on signal-'
'free noise to calibrate ncp_prior to achieve a '
'desired false positive rate.', AstropyUserWarning)
super().__init__(p0, gamma, ncp_prior)
def fitness(self, N_k, T_k):
# eq. 19 from Scargle 2012
return N_k * (np.log(N_k) - np.log(T_k))
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if x is not None and np.any(x % 1 > 0):
raise ValueError("x must be integer counts for fitness='events'")
return t, x, sigma
class RegularEvents(FitnessFunc):
r"""Bayesian blocks fitness for regular events
This is for data which has a fundamental "tick" length, so that all
measured values are multiples of this tick length. In each tick, there
are either zero or one counts.
Parameters
----------
dt : float
tick rate for data
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None):
self.dt = dt
super().__init__(p0, gamma, ncp_prior)
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if not np.all((x == 0) | (x == 1)):
raise ValueError("Regular events must have only 0 and 1 in x")
return t, x, sigma
def fitness(self, T_k, N_k):
# Eq. 75 of Scargle 2012
M_k = T_k / self.dt
N_over_M = N_k / M_k
eps = 1E-8
if np.any(N_over_M > 1 + eps):
warnings.warn('regular events: N/M > 1. '
'Is the time step correct?', AstropyUserWarning)
one_m_NM = 1 - N_over_M
N_over_M[N_over_M <= 0] = 1
one_m_NM[one_m_NM <= 0] = 1
return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM)
class PointMeasures(FitnessFunc):
r"""Bayesian blocks fitness for point measures
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
super().__init__(p0, gamma, ncp_prior)
def fitness(self, a_k, b_k):
# eq. 41 from Scargle 2012
return (b_k * b_k) / (4 * a_k)
def validate_input(self, t, x, sigma):
if x is None:
raise ValueError("x must be specified for point measures")
return super().validate_input(t, x, sigma)
|
57290e5210c5681afd7178c4491ad792ac1b14e257c1a1b94cc1985dcdbc47da | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains statistical tools provided for or used by Astropy.
While the `scipy.stats` package contains a wide range of statistical
tools, it is a general-purpose package, and is missing some that are
particularly useful to astronomy or are used in an atypical way in
astronomy. This package is intended to provide such functionality, but
*not* to replace `scipy.stats` if its implementation satisfies
astronomers' needs.
"""
from .funcs import *
from .biweight import *
from .sigma_clipping import *
from .jackknife import *
from .circstats import *
from .bayesian_blocks import *
from .histogram import *
from .info_theory import *
from .lombscargle import *
from .spatial import *
|
fae1ea9b444cb1ad61241a0e5ebc647c23d560c2d6f1c435ba970dc0ae9514e5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for dealing with circular statistics, for
instance, mean, variance, standard deviation, correlation coefficient, and so
on. This module also cover tests of uniformity, e.g., the Rayleigh and V tests.
The Maximum Likelihood Estimator for the Von Mises distribution along with the
Cramer-Rao Lower Bounds are also implemented. Almost all of the implementations
are based on reference [1]_, which is also the basis for the R package
'CircStats' [2]_.
"""
import numpy as np
from astropy.units import Quantity
__all__ = ['circmean', 'circvar', 'circmoment', 'circcorrcoef', 'rayleightest',
'vtest', 'vonmisesmle']
__doctest_requires__ = {'vtest': ['scipy.stats']}
def _components(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized rectangular components
# of the circular data.
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
C = np.sum(weights * np.cos(p * (data - phi)), axis)/np.sum(weights, axis)
S = np.sum(weights * np.sin(p * (data - phi)), axis)/np.sum(weights, axis)
return C, S
def _angle(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample mean angle
C, S = _components(data, p, phi, axis, weights)
# theta will be an angle in the interval [-np.pi, np.pi)
# [-180, 180)*u.deg in case data is a Quantity
theta = np.arctan2(S, C)
if isinstance(data, Quantity):
theta = theta.to(data.unit)
return theta
def _length(data, p=1, phi=0.0, axis=None, weights=None):
# Utility function for computing the generalized sample length
C, S = _components(data, p, phi, axis, weights)
return np.hypot(S, C)
def circmean(data, axis=None, weights=None):
""" Computes the circular mean angle of an array of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular means are computed. The default is to compute
the mean of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22, for
detailed explanation.
Returns
-------
circmean : numpy.ndarray or Quantity
Circular mean.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmean
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmean(data) # doctest: +FLOAT_CMP
<Quantity 48.62718088722989 deg>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
return _angle(data, 1, 0.0, axis, weights)
def circvar(data, axis=None, weights=None):
""" Computes the circular variance of an array of circular data.
There are some concepts for defining measures of dispersion for circular
data. The variance implemented here is based on the definition given by
[1]_, which is also the same used by the R package 'CircStats' [2]_.
Parameters
----------
data : numpy.ndarray or dimensionless Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular variances are computed. The default is to
compute the variance of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circvar : numpy.ndarray or dimensionless Quantity
Circular variance.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circvar
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circvar(data) # doctest: +FLOAT_CMP
<Quantity 0.16356352748437508>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
Notes
-----
The definition used here differs from the one in scipy.stats.circvar.
Precisely, Scipy circvar uses an approximation based on the limit of small
angles which approaches the linear variance.
"""
return 1.0 - _length(data, 1, 0.0, axis, weights)
def circmoment(data, p=1.0, centered=False, axis=None, weights=None):
""" Computes the ``p``-th trigonometric circular moment for an array
of circular data.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
p : float, optional
Order of the circular moment.
centered : Boolean, optional
If ``True``, central circular moments are computed. Default value is
``False``.
axis : int, optional
Axis along which circular moments are computed. The default is to
compute the circular moment of the flattened array.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
circmoment : numpy.ndarray or Quantity
The first and second elements correspond to the direction and length of
the ``p``-th circular moment, respectively.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circmoment
>>> from astropy import units as u
>>> data = np.array([51, 67, 40, 109, 31, 358])*u.deg
>>> circmoment(data, p=2) # doctest: +FLOAT_CMP
(<Quantity 90.99263082432564 deg>, <Quantity 0.48004283892950717>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if centered:
phi = circmean(data, axis, weights)
else:
phi = 0.0
return _angle(data, p, phi, axis, weights), _length(data, p, phi, axis,
weights)
def circcorrcoef(alpha, beta, axis=None, weights_alpha=None,
weights_beta=None):
""" Computes the circular correlation coefficient between two array of
circular data.
Parameters
----------
alpha : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
beta : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which circular correlation coefficients are computed.
The default is the compute the circular correlation coefficient of the
flattened array.
weights_alpha : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights_alpha``
represents a weighting factor for each group such that
``sum(weights_alpha, axis)`` equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
weights_beta : numpy.ndarray, optional
See description of ``weights_alpha``.
Returns
-------
rho : numpy.ndarray or dimensionless Quantity
Circular correlation coefficient.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import circcorrcoef
>>> from astropy import units as u
>>> alpha = np.array([356, 97, 211, 232, 343, 292, 157, 302, 335, 302,
... 324, 85, 324, 340, 157, 238, 254, 146, 232, 122,
... 329])*u.deg
>>> beta = np.array([119, 162, 221, 259, 270, 29, 97, 292, 40, 313, 94,
... 45, 47, 108, 221, 270, 119, 248, 270, 45, 23])*u.deg
>>> circcorrcoef(alpha, beta) # doctest: +FLOAT_CMP
<Quantity 0.2704648826748831>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
if(np.size(alpha, axis) != np.size(beta, axis)):
raise ValueError("alpha and beta must be arrays of the same size")
mu_a = circmean(alpha, axis, weights_alpha)
mu_b = circmean(beta, axis, weights_beta)
sin_a = np.sin(alpha - mu_a)
sin_b = np.sin(beta - mu_b)
rho = np.sum(sin_a*sin_b)/np.sqrt(np.sum(sin_a*sin_a)*np.sum(sin_b*sin_b))
return rho
def rayleightest(data, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity.
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Small p-values suggest to reject the null hypothesis.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the Rayleigh test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``np.sum(weights, axis)``
equals the number of observations.
See [1]_, remark 1.4, page 22, for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import rayleightest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> rayleightest(data) # doctest: +FLOAT_CMP
<Quantity 0.2563487733797317>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
.. [4] D. Wilkie. "Rayleigh Test for Randomness of Circular Data". Applied
Statistics. 1983.
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.211.4762>
"""
n = np.size(data, axis=axis)
Rbar = _length(data, 1, 0.0, axis, weights)
z = n*Rbar*Rbar
# see [3] and [4] for the formulae below
tmp = 1.0
if(n < 50):
tmp = 1.0 + (2.0*z - z*z)/(4.0*n) - (24.0*z - 132.0*z**2.0 +
76.0*z**3.0 - 9.0*z**4.0)/(288.0 *
n * n)
p_value = np.exp(-z)*tmp
return p_value
def vtest(data, mu=0.0, axis=None, weights=None):
""" Performs the Rayleigh test of uniformity where the alternative
hypothesis H1 is assumed to have a known mean angle ``mu``.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
mu : float or Quantity, optional
Mean angle. Assumed to be known.
axis : int, optional
Axis along which the V test will be performed.
weights : numpy.ndarray, optional
In case of grouped data, the i-th element of ``weights`` represents a
weighting factor for each group such that ``sum(weights, axis)``
equals the number of observations. See [1]_, remark 1.4, page 22,
for detailed explanation.
Returns
-------
p-value : float or dimensionless Quantity
p-value.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vtest
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vtest(data) # doctest: +FLOAT_CMP
<Quantity 0.6223678199713766>
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
.. [3] M. Chirstman., C. Miller. "Testing a Sample of Directions for
Uniformity." Lecture Notes, STA 6934/5805. University of Florida, 2007.
"""
from scipy.stats import norm
if weights is None:
weights = np.ones((1,))
try:
weights = np.broadcast_to(weights, data.shape)
except ValueError:
raise ValueError('Weights and data have inconsistent shape.')
n = np.size(data, axis=axis)
R0bar = np.sum(weights * np.cos(data - mu), axis)/np.sum(weights, axis)
z = np.sqrt(2.0 * n) * R0bar
pz = norm.cdf(z)
fz = norm.pdf(z)
# see reference [3]
p_value = 1 - pz + fz*((3*z - z**3)/(16.0*n) +
(15*z + 305*z**3 - 125*z**5 + 9*z**7)/(4608.0*n*n))
return p_value
def _A1inv(x):
# Approximation for _A1inv(x) according R Package 'CircStats'
# See http://www.scienceasia.org/2012.38.n1/scias38_118.pdf, equation (4)
if 0 <= x < 0.53:
return 2.0*x + x*x*x + (5.0*x**5)/6.0
elif x < 0.85:
return -0.4 + 1.39*x + 0.43/(1.0 - x)
else:
return 1.0/(x*x*x - 4.0*x*x + 3.0*x)
def vonmisesmle(data, axis=None):
""" Computes the Maximum Likelihood Estimator (MLE) for the parameters of
the von Mises distribution.
Parameters
----------
data : numpy.ndarray or Quantity
Array of circular (directional) data, which is assumed to be in
radians whenever ``data`` is ``numpy.ndarray``.
axis : int, optional
Axis along which the mle will be computed.
Returns
-------
mu : float or Quantity
the mean (aka location parameter).
kappa : float or dimensionless Quantity
the concentration parameter.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import vonmisesmle
>>> from astropy import units as u
>>> data = np.array([130, 90, 0, 145])*u.deg
>>> vonmisesmle(data) # doctest: +FLOAT_CMP
(<Quantity 101.16894320013179 deg>, <Quantity 1.49358958737054>)
References
----------
.. [1] S. R. Jammalamadaka, A. SenGupta. "Topics in Circular Statistics".
Series on Multivariate Analysis, Vol. 5, 2001.
.. [2] C. Agostinelli, U. Lund. "Circular Statistics from 'Topics in
Circular Statistics (2001)'". 2015.
<https://cran.r-project.org/web/packages/CircStats/CircStats.pdf>
"""
mu = circmean(data, axis=None)
kappa = _A1inv(np.mean(np.cos(data - mu), axis))
return mu, kappa
|
c7e7f05997296765746f4db5c2598e350b6aa478e4d3b73d08804b2ff68600cb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import numpy as np
import math
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1980lssu.book.....P&db_key=AST>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError('area is expected to be a positive number. '
'Got {}.'.format(value))
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError('y_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError('x_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError('y_min is expected to be a real number. '
'Got {}.'.format(value))
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError('x_min is expected to be a real number. '
'Got {}.'.format(value))
def __call__(self, data, radii, mode='none'):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k:k + size] = abs(data[i] - data[i+1:])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homongeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode='none'):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode='none'):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode='none'):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weigth to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError('data must be an n by 2 array, where n is the '
'number of observed points.')
npts = len(data)
ripley = np.zeros(len(radii))
if mode == 'none':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == 'translation':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = (((self.x_max - self.x_min) - diff[:, 0]) *
((self.y_max - self.y_min) - diff[:, 1]))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == 'ohser':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max((self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min))
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b ** 2) * (x < math.sqrt(b ** 2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (2 * np.arcsin(((b - u * v) / (x * x))
* (x > b) * (x < math.sqrt(b ** 2 + 1)))
+ 2 * u + 2 * v / b - b - (1 + x * x) / b)
cov_func = ((a / np.pi) * (c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b ** 2 + 1))))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == 'var-width':
lt_dist = np.minimum(np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min))
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == 'ripley':
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0],
data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1],
data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start: end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start: end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (1 - (np.arccos(np.minimum(ver_dist, dist) / dist) +
np.arccos(np.minimum(hor_dist, dist) / dist)) / np.pi)
w2 = (3 / 4 - 0.5 * (np.arccos(ver_dist / dist * ~dist_ind) +
np.arccos(hor_dist / dist * ~dist_ind)) / np.pi)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
else:
raise ValueError('mode {} is not implemented.'.format(mode))
return ripley
|
444bd19dfcdf9ff143504d21a1e3a6dc27c27906dadbda28319af231a7291d4e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import itertools
import numpy as np
from warnings import warn
from ..utils.decorators import deprecated_renamed_argument
from ..utils import isiterable
__all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm',
'binom_conf_interval', 'binned_binom_proportion',
'poisson_conf_interval', 'median_absolute_deviation', 'mad_std',
'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two',
'kuiper_false_positive_probability', 'cdf_from_intervals',
'interval_overlap_length', 'histogram_intervals', 'fold_intervals']
__doctest_skip__ = ['binned_binom_proportion']
__doctest_requires__ = {'binom_conf_interval': ['scipy.special'],
'poisson_conf_interval': ['scipy.special',
'scipy.optimize',
'scipy.integrate']}
gaussian_sigma_to_fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
# TODO Note scipy dependency
def binom_conf_interval(k, n, conf=0.68269, interval='wilson'):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
conf : float in [0, 1], optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (N) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/N)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{N + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / N` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, N - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = N the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, N - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{N}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when N is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson')
array([ 0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, N:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wilson')
array([[ 0. , 0.07921741, 0.21597328, 0.83333304],
[ 0.16666696, 0.42078276, 0.61736012, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='jeffreys')
array([[ 0. , 0.0842525 , 0.21789949, 0.82788246],
[ 0.17211754, 0.42218001, 0.61753691, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='flat')
array([[ 0. , 0.12139799, 0.24309021, 0.73577037],
[ 0.26422963, 0.45401727, 0.61535699, 1. ]])
In contrast, the Wald interval gives poor results for small k, N.
For k = 0 or k = N, the interval always has zero length.
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald')
array([[ 0. , 0.02111437, 0.18091075, 1. ],
[ 0. , 0.37888563, 0.61908925, 1. ]])
For confidence intervals approaching 1, the Wald interval for
0 < k < N can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald', conf=0.99)
array([[ 0. , -0.26077835, -0.16433593, 1. ],
[ 0. , 0.66077835, 0.96433593, 1. ]])
"""
if conf < 0. or conf > 1.:
raise ValueError('conf must be between 0. and 1.')
alpha = 1. - conf
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError('n must be positive')
if (k < 0).any() or (k > n).any():
raise ValueError('k must be in {0, 1, .., n}')
if interval == 'wilson' or interval == 'wald':
from scipy.special import erfinv
kappa = np.sqrt(2.) * min(erfinv(conf), 1.e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == 'wilson':
midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2)
halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \
np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n))
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.] = 0.
conf_interval[conf_interval > 1.] = 1.
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1. - p) / n)
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
elif interval == 'jeffreys' or interval == 'flat':
from scipy.special import betaincinv
if interval == 'jeffreys':
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.
elif k == n:
upperbound = 1.
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError('Unrecognized interval: {0:s}'.format(interval))
return conf_interval
# TODO Note scipy dependency (needed in binom_conf_interval)
def binned_binom_proportion(x, success, bins=10, range=None, conf=0.68269,
interval='wilson'):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : list_like
Values.
success : list_like (bool)
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalars, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
conf : float in [0, 1], optional
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : numpy.ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : numpy.ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : numpy.ndarray
Efficiency in each bin.
perr : numpy.ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError('sizes of x and success must match')
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(k, n, conf=conf, interval=interval)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, conflevel, name):
if sigma != 1:
raise ValueError("Only sigma=1 supported for interval {0}"
.format(name))
if background != 0:
raise ValueError("background not supported for interval {0}"
.format(name))
if conflevel is not None:
raise ValueError("conflevel not supported for interval {0}"
.format(name))
def poisson_conf_interval(n, interval='root-n', sigma=1, background=0,
conflevel=None):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
conflevel : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group [recommends][pois_eb]
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also [discusses][ErrorBars] several possibilities but
concludes that no single representation is suitable for all cases.
The suggestion has also been [floated][ac12] that error bars should be
attached to theoretical predictions instead of observed data,
which this function will not help with (but it's easy; then you
really should use the square root of the theoretical prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as [explained][pois_eb] by
the CDF working group). It also has the nice feature that
if your theory curve touches an endpoint of the interval, then your
data point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The [documentation][sherpa_gehrels] claims it is
based on a numerical approximation published in
[Gehrels 1986][gehrels86] but it does not actually appear there.
It is symmetrical, and while the upper limits
are within about 1% of those given by 'frequentist-confidence', the
lower limits can be badly wrong. The interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
[Maxwell 2011][maxw11] for further details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See [KraftBurrowsNousek][kbn1991] for further details.
These formulas implement a positive, uniform prior.
[KraftBurrowsNousek][kbn1991] discuss this choice in more detail and show
that the problem is relatively insensitive to the choice of prior.
This functions has an optional dependency: Either scipy or
`mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(np.arange(10),
... interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(7,
... interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(10, background=1.5, conflevel=0.95,
... interval='kraft-burrows-nousek').T
array([ 3.47894005, 16.113329533]) # doctest: +FLOAT_CMP
[pois_eb]: http://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt
[ErrorBars]: http://www.pp.rhul.ac.uk/~cowan/atlas/ErrorBars.pdf
[ac12]: http://adsabs.harvard.edu/abs/2012EPJP..127...24A
[maxw11]: http://adsabs.harvard.edu/abs/2011arXiv1102.0822M
[gehrels86]: http://adsabs.harvard.edu/abs/1986ApJ...303..336G
[sherpa_gehrels]: http://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels
[kbn1991]: http://adsabs.harvard.edu/abs/1991ApJ...374..344K
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == 'root-n':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
elif interval == 'root-n-0':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == 'pearson':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25),
n + 0.5 + np.sqrt(n + 0.25)])
elif interval == 'sherpagehrels':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75),
n + 1 + np.sqrt(n + 0.75)])
elif interval == 'frequentist-confidence':
_check_poisson_conf_inputs(1., background, conflevel, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)])
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == 'kraft-burrows-nousek':
if conflevel is None:
raise ValueError('Set conflevel for method {0}. (sigma is '
'ignored.)'.format(interval))
conflevel = np.asanyarray(conflevel)
if np.any(conflevel <= 0) or np.any(conflevel >= 1):
raise ValueError('Conflevel must be a number between 0 and 1.')
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError('Background must be >= 0.')
conf_interval = np.vectorize(_kraft_burrows_nousek,
cache=True)(n, background, conflevel)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError("Invalid method for Poisson confidence intervals: "
"{}".format(interval))
return conf_interval
@deprecated_renamed_argument('a', 'data', '2.0')
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the MADs are computed. The default (`None`) is
to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.RandomState(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.randn(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.65244241428454486
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_invalid(data)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
if isiterable(axis):
for ax in sorted(list(axis)):
data_median = np.expand_dims(data_median, axis=ax)
else:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the robust standard deviations are computed.
The default (`None`) is to compute the robust standard deviation
of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.RandomState(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
2.0232764659422626
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(
data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix,
gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
----------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(t * (source_eps * gain + npix *
(sky_eps * gain + dark_eps)) + npix * rd ** 2)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : numpy.ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : numpy.ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires `scipy`. This implementation will cause Overflow Errors for about
N > 100 (the exact limit depends on details of how scipy was compiled).
See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that
is slower, but can deal with arbitrarily high numbers since it is based on
the `mpmath <http://mpmath.org/>`_ library.
'''
from scipy.optimize import brentq
from scipy.integrate import quad
from math import exp
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
# Create an array containing the factorials. scipy.special.factorial
# requires SciPy 0.14 (#5064) therefore this is calculated by using
# numpy.cumprod. This could be replaced by factorial again as soon as
# older SciPy are not supported anymore but the cumprod alternative
# might also be a bit faster.
factorial_n = np.ones(n.shape, dtype=np.float64)
np.cumprod(n[1:], out=factorial_n[1:])
return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial_n))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
'''
from mpmath import mpf, factorial, findroot, fsum, power, exp, quad
N = mpf(N)
B = mpf(B)
CL = mpf(CL)
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1. / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, (N - B) / 2.)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
S_max = findroot(func, N - B, tol=1e-4)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either `scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
'''
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import mpmath
HAS_MPMATH = True
except ImportError:
HAS_MPMATH = False
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError('Need mpmath package for input numbers this '
'large.')
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError('Either scipy or mpmath are required.')
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
"""
try:
from scipy.special import factorial, comb
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import factorial, comb
if D < 0. or D > 2.:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2. / N:
return 1. - factorial(N) * (D - 1. / N)**(N - 1)
elif D < 3. / N:
k = -(N * D - 1.) / 2.
r = np.sqrt(k**2 - (N * D - 2.) / 2.)
a, b = -k + r, -k - r
return 1. - factorial(N - 1) * (b**(N - 1.) * (1. - a) -
a**(N - 1.) * (1. - b)) / float(N)**(N - 2) * (b - a)
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1):
def T(t):
y = D + t / float(N)
return y**(t - 3) * (y**3 * N - y**2 * t * (3. - 2. /
N) / N - t * (t - 1) * (t - 2) / float(N)**2)
s = 0.
# NOTE: the upper limit of this sum is taken from Stephens 1965
for t in range(int(np.floor(N * (1 - D))) + 1):
term = T(t) * comb(N, t) * (1 - D - t / float(N))**(N - t - 1)
s += term
return s
else:
z = D * np.sqrt(N)
S1 = 0.
term_eps = 1e-12
abs_eps = 1e-100
for m in itertools.count(1):
T1 = 2. * (4. * m**2 * z**2 - 1.) * np.exp(-2. * m**2 * z**2)
so = S1
S1 += T1
if np.abs(S1 - so) / (np.abs(S1) + np.abs(so)
) < term_eps or np.abs(S1 - so) < abs_eps:
break
S2 = 0.
for m in itertools.count(1):
T2 = m**2 * (4. * m**2 * z**2 - 3.) * np.exp(-2 * m**2 * z**2)
so = S2
S2 += T2
if np.abs(S2 - so) / (np.abs(S2) + np.abs(so)
) < term_eps or np.abs(S1 - so) < abs_eps:
break
return S1 - 8 * D / (3. * np.sqrt(N)) * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = (np.amax(cdfv - np.arange(N) / float(N)) +
np.amax((np.arange(N) + 1) / float(N) - cdfv))
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1, data2 = np.sort(data1), np.sort(data2)
if len(data2) < len(data1):
data1, data2 = data2, data1
# this could be more efficient
cdfv1 = np.searchsorted(data2, data1) / float(len(data2))
# this could be more efficient
cdfv2 = np.searchsorted(data1, data2) / float(len(data1))
D = (np.amax(cdfv1 - np.arange(len(data1)) / float(len(data1))) +
np.amax(cdfv2 - np.arange(len(data2)) / float(len(data2))))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of three-element tuples (ai,bi,wi)
The intervals to fold; ai and bi are the limits of the interval, and
wi is the weight to apply to the interval.
Returns
-------
breaks : array of floats length N
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : array of floats of length N-1
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for (a, b, wt) in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.)
breaks.add(1.)
breaks = sorted(breaks)
breaks_map = dict([(f, i) for (i, f) in enumerate(breaks)])
totals = np.zeros(len(breaks) - 1)
totals += tot
for (a, b, wt) in r:
totals[breaks_map[a]:breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : array of floats of length N
The boundaries of successive intervals.
totals : array of floats of length N-1
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError(
"Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : pairs of two floats
The two intervals.
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : array of floats of length N
Endpoints of the intervals in the PDF
totals : array of floats of length N-1
Probability densities in each bin
Returns
-------
h : array of floats
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n,
float(j + 1) / n), (start, end))
h[j] += ol / (1. / n) * totals[i]
start = end
return h
|
2f536f287346a69180458203b7f150bd7c7607a25aab558ce1720e4ce0d18c75 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5)
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +FLOAT_CMP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<http://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
<http://theses.ulaval.ca/archimede/fichiers/21842/apa.html>
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2., .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-660.41075962620482
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criteria
<http://avesbiodiv.mncn.csic.es/estadistica/ejemploaic.pdf>
.. [2] Hu, S. Akaike Information Criterion.
<http://www4.ncsu.edu/~shu3/Presentation/AIC.pdf>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
225e38e7a4c0605b81d127f43f389a0447b09816acc9bce09a64bc11b17cc043 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for computing robust statistics using
Tukey's biweight function.
"""
import numpy as np
from .funcs import median_absolute_deviation
from ..utils.decorators import deprecated_renamed_argument
__all__ = ['biweight_location', 'biweight_scale', 'biweight_midvariance',
'biweight_midcovariance', 'biweight_midcorrelation']
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_location(data, c=6.0, M=None, axis=None):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\Sigma_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\Sigma_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : int, optional
The axis along which the biweight locations are computed. If
`None` (default), then the biweight location of the flattened
input array will be computed.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.RandomState(12345)
>>> biloc = biweight_location(rand.randn(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
-0.0175741540445
"""
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = np.median(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis)
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
u = d / (c * mad)
# now remove the outlier points
mask = (np.abs(u) >= 1)
u = (1 - u ** 2) ** 2
u[mask] = 0
return M.squeeze() + (d * u).sum(axis=axis) / u.sum(axis=axis)
def biweight_scale(data, c=9.0, M=None, axis=None, modify_sample_size=False):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\Sigma_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\Sigma_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \Sigma_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : int, optional
The axis along which the biweight scales are computed. If
`None` (default), then the biweight scale of the flattened input
array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.RandomState(12345)
>>> biscl = biweight_scale(rand.randn(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
0.986726249291
"""
return np.sqrt(
biweight_midvariance(data, c=c, M=M, axis=axis,
modify_sample_size=modify_sample_size))
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_midvariance(data, c=9.0, M=None, axis=None,
modify_sample_size=False):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\Sigma_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\Sigma_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \Sigma_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : int, optional
The axis along which the biweight midvariances are computed. If
`None` (default), then the biweight midvariance of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.RandomState(12345)
>>> bivar = biweight_midvariance(rand.randn(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
0.97362869104
"""
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = np.median(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis)
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
u = d / (c * mad)
# now remove the outlier points
mask = np.abs(u) < 1
u = u ** 2
if modify_sample_size:
n = mask.sum(axis=axis)
else:
if axis is None:
n = data.size
else:
n = data.shape[axis]
f1 = d * d * (1. - u)**4
f1[~mask] = 0.
f1 = f1.sum(axis=axis)
f2 = (1. - u) * (1. - 5.*u)
f2[~mask] = 0.
f2 = np.abs(f2.sum(axis=axis))**2
return n * f1 / f2
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n \ \frac{\Sigma_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\Sigma_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\Sigma_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
For the standard definition of biweight midcovariance :math:`n` is
the total number of observations of each variable. That definition
is used if ``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of observations for which :math:`|u_i| < 1` and :math:`|v_i|
< 1`, i.e.
.. math::
n = \Sigma_{|u_i| < 1, \ |v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : `~numpy.ndarray`
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.RandomState(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[ 0.82483155 -0.18961219]
[-0.18961219 9.80265764]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[ 0.90820237 3.13091961]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError('The input array must be 2D or 1D.')
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError('M must be a scalar or 1D array.')
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
u = (d.T / (c * mad)).T
# now remove the outlier points
mask = np.abs(u) < 1
u = u ** 2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = (1. - u)
usub5 = (1. - 5. * u)
usub1[~mask] = 0.
numerator = d * usub1 ** 2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
return n * (numerator_matrix / denominator_matrix)
def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.RandomState(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.0495780713907
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError('x must be a 1D array.')
if y.ndim != 1:
raise ValueError('y must be a 1D array.')
if x.shape != y.shape:
raise ValueError('x and y must have the same shape.')
bicorr = biweight_midcovariance([x, y], c=c, M=M,
modify_sample_size=modify_sample_size)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
|
7e5b44e45cab1846754f344d45f9a03e2a140c511445b28b6571316019f948b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroML.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width']
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
Not Implemented
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``normed`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError("unrecognized bin code: '{}'".format(bins))
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
bins = dmin + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
|
88ce92a9dfeda529e18ce5a7734e783744571da83453bfaac208dc7e775ddba6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__all__ = ['jackknife_resampling', 'jackknife_stats']
__doctest_requires__ = {'jackknife_stats': ['scipy.special']}
def jackknife_resampling(data):
""" Performs jackknife resampling on numpy arrays.
Jackknife resampling is a technique to generate 'n' deterministic samples
of size 'n-1' from a measured sample of size 'n'. Basically, the i-th
sample, (1<=i<=n), is generated by means of removing the i-th measurement
of the original sample. Like the bootstrap resampling, this statistical
technique finds applications in estimating variance, bias, and confidence
intervals.
Parameters
----------
data : numpy.ndarray
Original sample (1-D array) from which the jackknife resamples will be
generated.
Returns
-------
resamples : numpy.ndarray
The i-th row is the i-th jackknife sample, i.e., the original sample
with the i-th measurement deleted.
References
----------
.. [1] McIntosh, Avery. "The Jackknife Estimation Method".
<http://people.bu.edu/aimcinto/jackknife.pdf>
.. [2] Efron, Bradley. "The Jackknife, the Bootstrap, and other
Resampling Plans". Technical Report No. 63, Division of Biostatistics,
Stanford University, December, 1980.
.. [3] Jackknife resampling <https://en.wikipedia.org/wiki/Jackknife_resampling>
"""
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = np.empty([n, n-1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
def jackknife_stats(data, statistic, conf_lvl=0.95):
""" Performs jackknife estimation on the basis of jackknife resamples.
This function requires `SciPy <https://www.scipy.org/>`_ to be installed.
Parameters
----------
data : numpy.ndarray
Original sample (1-D array).
statistic : function
Any function (or vector of functions) on the basis of the measured
data, e.g, sample mean, sample variance, etc. The jackknife estimate of
this statistic will be returned.
conf_lvl : float, optional
Confidence level for the confidence interval of the Jackknife estimate.
Must be a real-valued number in (0,1). Default value is 0.95.
Returns
-------
estimate : numpy.float64 or numpy.ndarray
The i-th element is the bias-corrected "jackknifed" estimate.
bias : numpy.float64 or numpy.ndarray
The i-th element is the jackknife bias.
std_err : numpy.float64 or numpy.ndarray
The i-th element is the jackknife standard error.
conf_interval : numpy.ndarray
If ``statistic`` is single-valued, the first and second elements are
the lower and upper bounds, respectively. If ``statistic`` is
vector-valued, each column corresponds to the confidence interval for
each component of ``statistic``. The first and second rows contain the
lower and upper bounds, respectively.
Examples
--------
1. Obtain Jackknife resamples:
>>> import numpy as np
>>> from astropy.stats import jackknife_resampling
>>> from astropy.stats import jackknife_stats
>>> data = np.array([1,2,3,4,5,6,7,8,9,0])
>>> resamples = jackknife_resampling(data)
>>> resamples
array([[ 2., 3., 4., 5., 6., 7., 8., 9., 0.],
[ 1., 3., 4., 5., 6., 7., 8., 9., 0.],
[ 1., 2., 4., 5., 6., 7., 8., 9., 0.],
[ 1., 2., 3., 5., 6., 7., 8., 9., 0.],
[ 1., 2., 3., 4., 6., 7., 8., 9., 0.],
[ 1., 2., 3., 4., 5., 7., 8., 9., 0.],
[ 1., 2., 3., 4., 5., 6., 8., 9., 0.],
[ 1., 2., 3., 4., 5., 6., 7., 9., 0.],
[ 1., 2., 3., 4., 5., 6., 7., 8., 0.],
[ 1., 2., 3., 4., 5., 6., 7., 8., 9.]])
>>> resamples.shape
(10, 9)
2. Obtain Jackknife estimate for the mean, its bias, its standard error,
and its 95% confidence interval:
>>> test_statistic = np.mean
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
4.5
>>> bias
0.0
>>> stderr
0.95742710775633832
>>> conf_interval
array([ 2.62347735, 6.37652265])
3. Example for two estimates
>>> test_statistic = lambda x: (np.mean(x), np.var(x))
>>> estimate, bias, stderr, conf_interval = jackknife_stats(
... data, test_statistic, 0.95)
>>> estimate
array([ 4.5 , 9.16666667])
>>> bias
array([ 0. , -0.91666667])
>>> stderr
array([ 0.95742711, 2.69124476])
>>> conf_interval
array([[ 2.62347735, 3.89192387],
[ 6.37652265, 14.44140947]])
IMPORTANT: Note that confidence intervals are given as columns
"""
from scipy.special import erfinv
# make sure original data is proper
n = data.shape[0]
if n <= 0:
raise ValueError("data must contain at least one measurement.")
resamples = jackknife_resampling(data)
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n-1)*(mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt((n-1)*np.mean((jack_stat - mean_jack_stat)*(jack_stat -
mean_jack_stat), axis=0))
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
# jackknife confidence interval
if not (0 < conf_lvl < 1):
raise ValueError("confidence level must be in (0, 1).")
z_score = np.sqrt(2.0)*erfinv(conf_lvl)
conf_interval = estimate + z_score*np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
|
cffd4a1b35b001b5c3ae01cdc646de113f584b95f037072d72af99e904742534 | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
import numpy as np
from ..utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1.0 2.0
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(tbl)))
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = tbl.columns.values()
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
_parent = None
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
|
cd641a6b0ca9fb2f562b0057f35b27b9b2c1c0062af86e376130f3ad56a41da8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
from ..time import Time
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, and FastRBT) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
from .table import Table, Column
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd')
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError("Column does not belong to index: {0}".format(col_name))
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {0} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError("Could not remove row {0} from index".format(row))
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {0} of\n{1}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy):
'''
Inputs a table and some subset of its columns, and
returns an index corresponding to this subset or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`
Subset of the columns in the table argument
'''
cols = set(table_copy.columns)
indices = set()
for column in cols:
for index in table[column].info.indices:
if set([x.info.name for x in index.columns]) == cols:
return index
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{0}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = '_{0}WithIndexCopy'.format(cls.__name__)
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError("No index found for {0}".format(item))
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError('No matches found for key {0}'.format(key))
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(key))
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError('Right side should contain {0} values'.format(len(rows)))
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError('Invalid index for iloc: {0}'.format(item))
return table_slice
|
9533b12b8825b2fd259b8be28d2f415e8e310c9dedc59e3603a852ee6343df2d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from os.path import abspath, dirname, join
from .table import Table
from ..io import registry as io_registry
from .. import config as _config
from .. import extern
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table.jsviewer`.
"""
jquery_url = _config.ConfigItem(
'https://code.jquery.com/jquery-3.1.1.min.js',
'The URL to the jquery library.')
datatables_url = _config.ConfigItem(
'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
'The URL to the jquery datatables library.')
css_urls = _config.ConfigItem(
['https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css'],
'The URLs to the css file(s) to include.', cfgtype='list')
conf = Conf()
EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), 'js'))
EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), 'css'))
_SORTING_SCRIPT_PART_1 = """
var astropy_sort_num = function(a, b) {{
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}}
"""
_SORTING_SCRIPT_PART_2 = """
jQuery.extend( jQuery.fn.dataTableExt.oSort, {{
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }}
}});
"""
IPYNB_JS_SCRIPT = """
<script>
%(sorting_script1)s
require.config({{paths: {{
datatables: '{datatables_url}'
}}}});
require(["datatables"], function(){{
console.log("$('#{tid}').dataTable()");
%(sorting_script2)s
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}});
</script>
""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1,
sorting_script2=_SORTING_SCRIPT_PART_2)
HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """
$(document).ready(function() {{
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}} );
"""
# Default CSS for the JSViewer writer
DEFAULT_CSS = """\
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
"""
# Default CSS used when rendering a table in the IPython notebook
DEFAULT_CSS_NB = """\
table.dataTable {clear: both; width: auto !important; margin: 0 !important;}
.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{
display: inline-block; margin-right: 1em; }
.paginate_button { margin-right: 5px; }
"""
class JSViewer:
"""Provides an interactive HTML export of a Table.
This class provides an interface to the `DataTables
<https://datatables.net/>`_ library, which allow to visualize interactively
an HTML table. It is used by the `~astropy.table.Table.show_in_browser`
method.
Parameters
----------
use_local_files : bool, optional
Use local files or a CDN for JavaScript libraries. Default False.
display_length : int, optional
Number or rows to show. Default to 50.
"""
def __init__(self, use_local_files=False, display_length=50):
self._use_local_files = use_local_files
self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1],
[10, 25, 50, 100, 500, 1000, "All"]]
self.display_length = display_length
for L in self.display_length_menu:
if display_length not in L:
L.insert(0, display_length)
@property
def jquery_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.1.1.min.js'),
'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')]
else:
return [conf.jquery_url, conf.datatables_url]
@property
def css_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_CSS_DIR,
'jquery.dataTables.css')]
else:
return conf.css_urls
def _jstable_file(self):
if self._use_local_files:
return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min')
else:
return conf.datatables_url[:-3]
def ipynb(self, table_id, css=None, sort_columns='[]'):
html = '<style>{0}</style>'.format(css if css is not None
else DEFAULT_CSS_NB)
html += IPYNB_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
datatables_url=self._jstable_file(),
tid=table_id, sort_columns=sort_columns)
return html
def html_js(self, table_id='table0', sort_columns='[]'):
return HTML_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
tid=table_id, sort_columns=sort_columns).strip()
def write_table_jsviewer(table, filename, table_id=None, max_lines=5000,
table_class="display compact", jskwargs=None,
css=DEFAULT_CSS, htmldict=None):
if table_id is None:
table_id = 'table{id}'.format(id=id(table))
jskwargs = jskwargs or {}
jsv = JSViewer(**jskwargs)
sortable_columns = [i for i, col in enumerate(table.columns.values())
if col.dtype.kind in 'iufc']
html_options = {
'table_id': table_id,
'table_class': table_class,
'css': css,
'cssfiles': jsv.css_urls,
'jsfiles': jsv.jquery_urls,
'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns)
}
if htmldict:
html_options.update(htmldict)
if max_lines < len(table):
table = table[:max_lines]
table.write(filename, format='html', htmldict=html_options)
io_registry.register_writer('jsviewer', Table, write_table_jsviewer)
|
3a8c3e9e624b2538c50a960f4ee694f9632cb1ceb62d0f8a27b0323692e5b54f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import collections
import numpy as np
class Row:
"""A class to represent one row of a Table object.
A Row object is returned when a Table object is indexed with an integer
or when iterating over a table::
>>> from astropy.table import Table
>>> table = Table([(1, 2), (3, 4)], names=('a', 'b'),
... dtype=('int32', 'int32'))
>>> row = table[1]
>>> row
<Row index=1>
a b
int32 int32
----- -----
2 4
>>> row['a']
2
>>> row[1]
4
"""
def __init__(self, table, index):
self._table = table
self._index = index
n = len(table)
if index < -n or index >= n:
raise IndexError('index {0} out of range for table with length {1}'
.format(index, len(table)))
def __getitem__(self, item):
if self._table._is_list_or_tuple_of_str(item):
cols = [self._table[name] for name in item]
out = self._table.__class__(cols, copy=False)[self._index]
else:
out = self._table.columns[item][self._index]
return out
def __setitem__(self, item, val):
if self._table._is_list_or_tuple_of_str(item):
self._table._set_row(self._index, colnames=item, vals=val)
else:
self._table.columns[item][self._index] = val
def _ipython_key_completions_(self):
return self.colnames
def __eq__(self, other):
if self._table.masked:
# Sent bug report to numpy-discussion group on 2012-Oct-21, subject:
# "Comparing rows in a structured masked array raises exception"
# No response, so this is still unresolved.
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() == other
def __ne__(self, other):
if self._table.masked:
raise ValueError('Unable to compare rows for masked table due to numpy.ma bug')
return self.as_void() != other
def __array__(self, dtype=None):
"""Support converting Row to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
If the parent table is masked then the mask information is dropped.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
return np.asarray(self.as_void())
def __len__(self):
return len(self._table.columns)
def __iter__(self):
index = self._index
for col in self._table.columns.values():
yield col[index]
@property
def table(self):
return self._table
@property
def index(self):
return self._index
def as_void(self):
"""
Returns a *read-only* copy of the row values in the form of np.void or
np.ma.mvoid objects. This corresponds to the object types returned for
row indexing of a pure numpy structured array or masked array. This
method is slow and its use is discouraged when possible.
Returns
-------
void_row : np.void (unmasked) or np.ma.mvoid (masked)
Copy of row values
"""
index = self._index
cols = self._table.columns.values()
vals = tuple(np.asarray(col)[index] for col in cols)
if self._table.masked:
# The logic here is a little complicated to work around
# bug in numpy < 1.8 (numpy/numpy#483). Need to build up
# a np.ma.mvoid object by hand.
from .table import descr
# Make np.void version of masks. Use the table dtype but
# substitute bool for data type
masks = tuple(col.mask[index] if hasattr(col, 'mask') else False
for col in cols)
descrs = (descr(col) for col in cols)
mask_dtypes = [(name, bool, shape) for name, type_, shape in descrs]
row_mask = np.array([masks], dtype=mask_dtypes)[0]
# Make np.void version of values, and then the final mvoid row
row_vals = np.array([vals], dtype=self.dtype)[0]
void_row = np.ma.mvoid(data=row_vals, mask=row_mask)
else:
void_row = np.array([vals], dtype=self.dtype)[0]
return void_row
@property
def meta(self):
return self._table.meta
@property
def columns(self):
return self._table.columns
@property
def colnames(self):
return self._table.colnames
@property
def dtype(self):
return self._table.dtype
def _base_repr_(self, html=False):
"""
Display row as a single-line table but with appropriate header line.
"""
index = self.index if (self.index >= 0) else self.index + len(self._table)
table = self._table[index:index + 1]
descr_vals = [self.__class__.__name__,
'index={0}'.format(self.index)]
if table.masked:
descr_vals.append('masked=True')
return table._base_repr_(html, descr_vals, max_width=-1,
tableid='table{0}'.format(id(self._table)))
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
index = self.index if (self.index >= 0) else self.index + len(self._table)
return '\n'.join(self.table[index:index + 1].pformat(max_width=-1))
def __bytes__(self):
return str(self).encode('utf-8')
collections.Sequence.register(Row)
|
d5f7121487277db9d629f31edd6260a989e50d50faf3944a9dcf871017da5284 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import platform
import warnings
import numpy as np
from .index import get_index
from ..utils.exceptions import AstropyUserWarning
__all__ = ['TableGroups', 'ColumnGroups']
def table_group_by(table, keys):
# index copies are unnecessary and slow down _table_group_by
with table.index_mode('discard_on_copy'):
return _table_group_by(table, keys)
def _table_group_by(table, keys):
"""
Get groups for ``table`` on specified ``keys``.
Parameters
----------
table : `Table`
Table to group
keys : str, list of str, `Table`, or Numpy array
Grouping key specifier
Returns
-------
grouped_table : Table object with groups attr set accordingly
"""
from .table import Table
# Pre-convert string to tuple of strings, or Table to the underlying structured array
if isinstance(keys, str):
keys = (keys,)
if isinstance(keys, (list, tuple)):
for name in keys:
if name not in table.colnames:
raise ValueError('Table does not have key column {0!r}'.format(name))
if table.masked and np.any(table[name].mask):
raise ValueError('Missing values in key column {0!r} are not allowed'.format(name))
keys = tuple(keys)
table_keys = table[keys]
grouped_by_table_cols = True # Grouping keys are columns from the table being grouped
elif isinstance(keys, (np.ndarray, Table)):
table_keys = keys
if len(table_keys) != len(table):
raise ValueError('Input keys array length {0} does not match table length {1}'
.format(len(table_keys), len(table)))
grouped_by_table_cols = False # Grouping key(s) are external
else:
raise TypeError('Keys input must be string, list, tuple or numpy array, but got {0}'
.format(type(keys)))
try:
# take advantage of index internal sort if possible
table_index = get_index(table, table_keys) if \
isinstance(table_keys, Table) else None
if table_index is not None:
idx_sort = table_index.sorted_data()
else:
idx_sort = table_keys.argsort(kind='mergesort')
stable_sort = True
except TypeError:
# Some versions (likely 1.6 and earlier) of numpy don't support
# 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable
# sort by default, nor does Windows, while Linux does (or appears to).
idx_sort = table_keys.argsort()
stable_sort = platform.system() not in ('Darwin', 'Windows')
table_keys = table_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# If the sort is not stable (preserves original table order) then sort idx_sort in
# place within each group.
if not stable_sort:
for i0, i1 in zip(indices[:-1], indices[1:]):
idx_sort[i0:i1].sort()
# Make a new table and set the _groups to the appropriate TableGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = table.__class__(table[idx_sort])
out_keys = table_keys[indices[:-1]]
if isinstance(out_keys, Table):
out_keys.meta['grouped_by_table_cols'] = grouped_by_table_cols
out._groups = TableGroups(out, indices=indices, keys=out_keys)
return out
def column_group_by(column, keys):
"""
Get groups for ``column`` on specified ``keys``
Parameters
----------
column : Column object
Column to group
keys : Table or Numpy array of same length as col
Grouping key specifier
Returns
-------
grouped_column : Column object with groups attr set accordingly
"""
from .table import Table
if isinstance(keys, Table):
keys = keys.as_array()
if not isinstance(keys, np.ndarray):
raise TypeError('Keys input must be numpy array, but got {0}'
.format(type(keys)))
if len(keys) != len(column):
raise ValueError('Input keys array length {0} does not match column length {1}'
.format(len(keys), len(column)))
# take advantage of table or column indices, if possible
index = None
if isinstance(keys, Table):
index = get_index(keys)
elif hasattr(keys, 'indices') and keys.indices:
index = keys.indices[0]
if index is not None:
idx_sort = index.sorted_data()
else:
idx_sort = keys.argsort()
keys = keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True]))
indices = np.flatnonzero(diffs)
# Make a new column and set the _groups to the appropriate ColumnGroups object.
# Take the subset of the original keys at the indices values (group boundaries).
out = column.__class__(column[idx_sort])
out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]])
return out
class BaseGroups:
"""
A class to represent groups within a table of heterogeneous data.
- ``keys``: key values corresponding to each group
- ``indices``: index values in parent table or column corresponding to group boundaries
- ``aggregate()``: method to create new table by aggregating within groups
"""
@property
def parent(self):
return self.parent_column if isinstance(self, ColumnGroups) else self.parent_table
def __iter__(self):
self._iter_index = 0
return self
def next(self):
ii = self._iter_index
if ii < len(self.indices) - 1:
i0, i1 = self.indices[ii], self.indices[ii + 1]
self._iter_index += 1
return self.parent[i0:i1]
else:
raise StopIteration
__next__ = next
def __getitem__(self, item):
parent = self.parent
if isinstance(item, (int, np.integer)):
i0, i1 = self.indices[item], self.indices[item + 1]
out = parent[i0:i1]
out.groups._keys = parent.groups.keys[item]
else:
indices0, indices1 = self.indices[:-1], self.indices[1:]
try:
i0s, i1s = indices0[item], indices1[item]
except Exception:
raise TypeError('Index item for groups attribute must be a slice, '
'numpy mask or int array')
mask = np.zeros(len(parent), dtype=bool)
# Is there a way to vectorize this in numpy?
for i0, i1 in zip(i0s, i1s):
mask[i0:i1] = True
out = parent[mask]
out.groups._keys = parent.groups.keys[item]
out.groups._indices = np.concatenate([[0], np.cumsum(i1s - i0s)])
return out
def __repr__(self):
return '<{0} indices={1}>'.format(self.__class__.__name__, self.indices)
def __len__(self):
return len(self.indices) - 1
class ColumnGroups(BaseGroups):
def __init__(self, parent_column, indices=None, keys=None):
self.parent_column = parent_column # parent Column
self.parent_table = parent_column.parent_table
self._indices = indices
self._keys = keys
@property
def indices(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.indices
else:
if self._indices is None:
return np.array([0, len(self.parent_column)])
else:
return self._indices
@property
def keys(self):
# If the parent column is in a table then use group indices from table
if self.parent_table:
return self.parent_table.groups.keys
else:
return self._keys
def aggregate(self, func):
from .column import MaskedColumn
i0s, i1s = self.indices[:-1], self.indices[1:]
par_col = self.parent_column
masked = isinstance(par_col, MaskedColumn)
reduceat = hasattr(func, 'reduceat')
sum_case = func is np.sum
mean_case = func is np.mean
try:
if not masked and (reduceat or sum_case or mean_case):
if mean_case:
vals = np.add.reduceat(par_col, i0s) / np.diff(self.indices)
else:
if sum_case:
func = np.add
vals = func.reduceat(par_col, i0s)
else:
vals = np.array([func(par_col[i0: i1]) for i0, i1 in zip(i0s, i1s)])
except Exception:
raise TypeError("Cannot aggregate column '{0}' with type '{1}'"
.format(par_col.info.name,
par_col.info.dtype))
out = par_col.__class__(data=vals,
name=par_col.info.name,
description=par_col.info.description,
unit=par_col.info.unit,
format=par_col.info.format,
meta=par_col.info.meta)
return out
def filter(self, func):
"""
Filter groups in the Column based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept one argument:
- ``column`` : `Column` object
It must then return either `True` or `False`. As an example, the following
will select all column groups with only positive values::
def all_positive(column):
if np.any(column < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Column
New column with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
for i, group_column in enumerate(self):
mask[i] = func(group_column)
return self[mask]
class TableGroups(BaseGroups):
def __init__(self, parent_table, indices=None, keys=None):
self.parent_table = parent_table # parent Table
self._indices = indices
self._keys = keys
@property
def key_colnames(self):
"""
Return the names of columns in the parent table that were used for grouping.
"""
# If the table was grouped by key columns *in* the table then treat those columns
# differently in aggregation. In this case keys will be a Table with
# keys.meta['grouped_by_table_cols'] == True. Keys might not be a Table so we
# need to handle this.
grouped_by_table_cols = getattr(self.keys, 'meta', {}).get('grouped_by_table_cols', False)
return self.keys.colnames if grouped_by_table_cols else ()
@property
def indices(self):
if self._indices is None:
return np.array([0, len(self.parent_table)])
else:
return self._indices
def aggregate(self, func):
"""
Aggregate each group in the Table into a single row by applying the reduction
function ``func`` to group values in each column.
Parameters
----------
func : function
Function that reduces an array of values to a single value
Returns
-------
out : Table
New table with the aggregated rows.
"""
i0s, i1s = self.indices[:-1], self.indices[1:]
out_cols = []
parent_table = self.parent_table
for col in parent_table.columns.values():
# For key columns just pick off first in each group since they are identical
if col.info.name in self.key_colnames:
new_col = col.take(i0s)
else:
try:
new_col = col.groups.aggregate(func)
except TypeError as err:
warnings.warn(str(err), AstropyUserWarning)
continue
out_cols.append(new_col)
return parent_table.__class__(out_cols, meta=parent_table.meta)
def filter(self, func):
"""
Filter groups in the Table based on evaluating function ``func`` on each
group sub-table.
The function which is passed to this method must accept two arguments:
- ``table`` : `Table` object
- ``key_colnames`` : tuple of column names in ``table`` used as keys for grouping
It must then return either `True` or `False`. As an example, the following
will select all table groups with only positive values in the non-key columns::
def all_positive(table, key_colnames):
colnames = [name for name in table.colnames if name not in key_colnames]
for colname in colnames:
if np.any(table[colname] < 0):
return False
return True
Parameters
----------
func : function
Filter function
Returns
-------
out : Table
New table with the aggregated rows.
"""
mask = np.empty(len(self), dtype=bool)
key_colnames = self.key_colnames
for i, group_table in enumerate(self):
mask[i] = func(group_table, key_colnames)
return self[mask]
@property
def keys(self):
return self._keys
|
96d437f615c402d9d35972f261d7e8985a036fb420117c65de2055ce3de91b58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity, QuantityInfo
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from ..utils.exceptions import AstropyDeprecationWarning, NoValue
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class Table:
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in self.columns.values()]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, and FastRBT. If the supplied argument is None (by
default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (isinstance(col, np.ndarray) and len(col.dtype) > 1 and
not self._add_as_mixin_column(col)):
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from ..utils.xml.writer import xml_escape
descr = '<i>{0}</i>\n'.format(xml_escape(descr))
else:
descr = '<{0}>\n'.format(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin (unless already a valid
# mixin class).
if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and
not self._add_as_mixin_column(value)):
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np.broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np.broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, copy=copy, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
while col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=NoValue):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('S', 'U')
def convert_unicode_to_bytestring(self, python3_only=NoValue):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings. This routine takes
advantage of numpy automated conversion which works for strings that
are pure ASCII.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('U', 'S')
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.read`).
"""
out = io_registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
return out
def write(self, *args, **kwargs):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
The arguments and keywords (other than ``format``) provided to this function are
passed through to the underlying data reader (e.g. `~astropy.io.ascii.write`).
"""
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
if self.has_mixin_columns:
raise NotImplementedError('group_by not available for tables with mixin columns')
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
|
e9780196543c25162f060aa9f0bafc5f94208a991f5aab9bc9d66ee63468ee17 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table`.
"""
auto_colname = _config.ConfigItem(
'col{0}',
'The template that determines the name of a column if it cannot be '
'determined. Uses new-style (format method) string formatting.',
aliases=['astropy.table.column.auto_colname'])
default_notebook_table_class = _config.ConfigItem(
'table-striped table-bordered table-condensed',
'The table class to be used in Jupyter notebooks when displaying '
'tables (and not overridden). See <http://getbootstrap.com/css/#tables '
'for a list of useful bootstrap classes.')
replace_warnings = _config.ConfigItem(
['slice'],
'List of conditions for issuing a warning when replacing a table '
"column using setitem, e.g. t['a'] = value. Allowed options are "
"'always', 'slice', 'refcount', 'attributes'.",
'list',
)
replace_inplace = _config.ConfigItem(
False,
'Always use in-place update of a table column when using setitem, '
"e.g. t['a'] = value. This overrides the default behavior of "
"replacing the column entirely with the new value when possible. "
"This configuration option will be deprecated and then removed in "
"subsequent major releases."
)
conf = Conf()
from .column import Column, MaskedColumn, StringTruncateWarning, ColumnInfo
from .groups import TableGroups, ColumnGroups
from .table import (Table, QTable, TableColumns, Row, TableFormatter,
NdarrayMixin, TableReplaceWarning)
from .operations import join, setdiff, hstack, vstack, unique, TableMergeError
from .bst import BST, FastBST, FastRBT
from .sorted_array import SortedArray
from .serialize import SerializedColumn
# Finally import the formats for the read and write method but delay building
# the documentation until all are loaded. (#5275)
from ..io import registry
with registry.delay_doc_updates(Table):
# Import routines that connect readers/writers to astropy.table
from .jsviewer import JSViewer
from ..io.ascii import connect
from ..io.fits import connect
from ..io.misc import connect
from ..io.votable import connect
|
32eb03761f2275626c281b0a3027682fa6b28d795b7ecef7781cf4d0ae2e71a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import weakref
import re
from copy import deepcopy
import numpy as np
from numpy import ma
# Remove this when Numpy no longer emits this warning and that Numpy version
# becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
# For Numpy versions that do not raise this warning.
MaskedArrayFutureWarning = None
from ..units import Unit, Quantity
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, dtype_info_name
from ..utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = deepcopy(data.meta)
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = deepcopy(data.info.meta)
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if \
copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{0}': could not display "
"values in this column using this format ({1})".format(
self.name, err.args[0]))
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, copy=False, dtype=self.dtype, order='A')
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
self.meta = deepcopy(getattr(obj, 'meta', {}))
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1!r}'.format(attr, val))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from ..utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
def _make_compare(oper):
"""
Make comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
"""
swapped_oper = {'__eq__': '__eq__',
'__ne__': '__ne__',
'__gt__': '__lt__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__le__': '__ge__'}[oper]
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# Special case to work around #6838. Other combinations work OK,
# see tests.test_column.test_unicode_sandwich_compare(). In this
# case just swap self and other.
#
# This is related to an issue in numpy that was addressed in np 1.13.
# However that fix does not make this problem go away, but maybe
# future numpy versions will do so. NUMPY_LT_1_13 to get the
# attention of future maintainers to check (by deleting or versioning
# the if block below). See #6899 discussion.
if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and
isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):
self, other = other, self
op = swapped_oper
if self.dtype.char == 'S':
other = self._encode_str(other)
return getattr(self.data, op)(other)
return _compare
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
mask_val = np.ma.masked
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None and hasattr(data, 'mask'):
mask = data.mask
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
# Remove this when Numpy no longer emits this warning and that
# Numpy version becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
if MaskedArrayFutureWarning is None:
ma.MaskedArray.__setitem__(self, index, value)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', MaskedArrayFutureWarning)
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
06177cfc44ad79c472030c621c8175466133bfa65b0742948e30613e5d9bd87a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import numpy as np
class MaxValue:
'''
Represents an infinite value for purposes
of tuple comparison.
'''
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __repr__(self):
return "MAX"
__le__ = __lt__
__ge__ = __gt__
__str__ = __repr__
class MinValue:
'''
The opposite of MaxValue, i.e. a representation of
negative infinity.
'''
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __repr__(self):
return "MIN"
__le__ = __lt__
__ge__ = __gt__
__str__ = __repr__
class Epsilon:
'''
Represents the "next largest" version of a given value,
so that for all valid comparisons we have
x < y < Epsilon(y) < z whenever x < y < z and x, z are
not Epsilon objects.
Parameters
----------
val : object
Original value
'''
__slots__ = ('val',)
def __init__(self, val):
self.val = val
def __lt__(self, other):
if self.val == other:
return False
return self.val < other
def __gt__(self, other):
if self.val == other:
return True
return self.val > other
def __eq__(self, other):
return False
def __repr__(self):
return repr(self.val) + " + epsilon"
class Node:
'''
An element in a binary search tree, containing
a key, data, and references to children nodes and
a parent node.
Parameters
----------
key : tuple
Node key
data : list or int
Node data
'''
__lt__ = lambda x, y: x.key < y.key
__le__ = lambda x, y: x.key <= y.key
__eq__ = lambda x, y: x.key == y.key
__ge__ = lambda x, y: x.key >= y.key
__gt__ = lambda x, y: x.key > y.key
__ne__ = lambda x, y: x.key != y.key
__slots__ = ('key', 'data', 'left', 'right')
# each node has a key and data list
def __init__(self, key, data):
self.key = key
self.data = data if isinstance(data, list) else [data]
self.left = None
self.right = None
def replace(self, child, new_child):
'''
Replace this node's child with a new child.
'''
if self.left is not None and self.left == child:
self.left = new_child
elif self.right is not None and self.right == child:
self.right = new_child
else:
raise ValueError("Cannot call replace() on non-child")
def remove(self, child):
'''
Remove the given child.
'''
self.replace(child, None)
def set(self, other):
'''
Copy the given node.
'''
self.key = other.key
self.data = other.data[:]
def __str__(self):
return str((self.key, self.data))
def __repr__(self):
return str(self)
class BST:
'''
A basic binary search tree in pure Python, used
as an engine for indexing.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
NodeClass = Node
def __init__(self, data, row_index, unique=False):
self.root = None
self.size = 0
self.unique = unique
for key, row in zip(data, row_index):
self.add(tuple(key), row)
def add(self, key, data=None):
'''
Add a key, data pair.
'''
if data is None:
data = key
self.size += 1
node = self.NodeClass(key, data)
curr_node = self.root
if curr_node is None:
self.root = node
return
while True:
if node < curr_node:
if curr_node.left is None:
curr_node.left = node
break
curr_node = curr_node.left
elif node > curr_node:
if curr_node.right is None:
curr_node.right = node
break
curr_node = curr_node.right
elif self.unique:
raise ValueError("Cannot insert non-unique value")
else: # add data to node
curr_node.data.extend(node.data)
curr_node.data = sorted(curr_node.data)
return
def find(self, key):
'''
Return all data values corresponding to a given key.
Parameters
----------
key : tuple
Input key
Returns
-------
data_vals : list
List of rows corresponding to the input key
'''
node, parent = self.find_node(key)
return node.data if node is not None else []
def find_node(self, key):
'''
Find the node associated with the given key.
'''
if self.root is None:
return (None, None)
return self._find_recursive(key, self.root, None)
def shift_left(self, row):
'''
Decrement all rows larger than the given row.
'''
for node in self.traverse():
node.data = [x - 1 if x > row else x for x in node.data]
def shift_right(self, row):
'''
Increment all rows greater than or equal to the given row.
'''
for node in self.traverse():
node.data = [x + 1 if x >= row else x for x in node.data]
def _find_recursive(self, key, node, parent):
try:
if key == node.key:
return (node, parent)
elif key > node.key:
if node.right is None:
return (None, None)
return self._find_recursive(key, node.right, node)
else:
if node.left is None:
return (None, None)
return self._find_recursive(key, node.left, node)
except TypeError: # wrong key type
return (None, None)
def traverse(self, order='inorder'):
'''
Return nodes of the BST in the given order.
Parameters
----------
order : str
The order in which to recursively search the BST.
Possible values are:
"preorder": current node, left subtree, right subtree
"inorder": left subtree, current node, right subtree
"postorder": left subtree, right subtree, current node
'''
if order == 'preorder':
return self._preorder(self.root, [])
elif order == 'inorder':
return self._inorder(self.root, [])
elif order == 'postorder':
return self._postorder(self.root, [])
raise ValueError("Invalid traversal method: \"{0}\"".format(order))
def items(self):
'''
Return BST items in order as (key, data) pairs.
'''
return [(x.key, x.data) for x in self.traverse()]
def sort(self):
'''
Make row order align with key order.
'''
i = 0
for node in self.traverse():
num_rows = len(node.data)
node.data = [x for x in range(i, i + num_rows)]
i += num_rows
def sorted_data(self):
'''
Return BST rows sorted by key values.
'''
return [x for node in self.traverse() for x in node.data]
def _preorder(self, node, lst):
if node is None:
return lst
lst.append(node)
self._preorder(node.left, lst)
self._preorder(node.right, lst)
return lst
def _inorder(self, node, lst):
if node is None:
return lst
self._inorder(node.left, lst)
lst.append(node)
self._inorder(node.right, lst)
return lst
def _postorder(self, node, lst):
if node is None:
return lst
self._postorder(node.left, lst)
self._postorder(node.right, lst)
lst.append(node)
return lst
def _substitute(self, node, parent, new_node):
if node is self.root:
self.root = new_node
else:
parent.replace(node, new_node)
def remove(self, key, data=None):
'''
Remove data corresponding to the given key.
Parameters
----------
key : tuple
The key to remove
data : int or None
If None, remove the node corresponding to the given key.
If not None, remove only the given data value from the node.
Returns
-------
successful : bool
True if removal was successful, false otherwise
'''
node, parent = self.find_node(key)
if node is None:
return False
if data is not None:
if data not in node.data:
raise ValueError("Data does not belong to correct node")
elif len(node.data) > 1:
node.data.remove(data)
return True
if node.left is None and node.right is None:
self._substitute(node, parent, None)
elif node.left is None and node.right is not None:
self._substitute(node, parent, node.right)
elif node.right is None and node.left is not None:
self._substitute(node, parent, node.left)
else:
# find largest element of left subtree
curr_node = node.left
parent = node
while curr_node.right is not None:
parent = curr_node
curr_node = curr_node.right
self._substitute(curr_node, parent, curr_node.left)
node.set(curr_node)
self.size -= 1
return True
def is_valid(self):
'''
Returns whether this is a valid BST.
'''
return self._is_valid(self.root)
def _is_valid(self, node):
if node is None:
return True
return (node.left is None or node.left <= node) and \
(node.right is None or node.right >= node) and \
self._is_valid(node.left) and self._is_valid(node.right)
def range(self, lower, upper, bounds=(True, True)):
'''
Return all nodes with keys in the given range.
Parameters
----------
lower : tuple
Lower bound
upper : tuple
Upper bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
nodes = self.range_nodes(lower, upper, bounds)
return [x for node in nodes for x in node.data]
def range_nodes(self, lower, upper, bounds=(True, True)):
'''
Return nodes in the given range.
'''
if self.root is None:
return []
# op1 is <= or <, op2 is >= or >
op1 = operator.le if bounds[0] else operator.lt
op2 = operator.ge if bounds[1] else operator.gt
return self._range(lower, upper, op1, op2, self.root, [])
def same_prefix(self, val):
'''
Assuming the given value has smaller length than keys, return
nodes whose keys have this value as a prefix.
'''
if self.root is None:
return []
nodes = self._same_prefix(val, self.root, [])
return [x for node in nodes for x in node.data]
def _range(self, lower, upper, op1, op2, node, lst):
if op1(lower, node.key) and op2(upper, node.key):
lst.append(node)
if upper > node.key and node.right is not None:
self._range(lower, upper, op1, op2, node.right, lst)
if lower < node.key and node.left is not None:
self._range(lower, upper, op1, op2, node.left, lst)
return lst
def _same_prefix(self, val, node, lst):
prefix = node.key[:len(val)]
if prefix == val:
lst.append(node)
if prefix <= val and node.right is not None:
self._same_prefix(val, node.right, lst)
if prefix >= val and node.left is not None:
self._same_prefix(val, node.left, lst)
return lst
def __str__(self):
if self.root is None:
return 'Empty'
return self._print(self.root, 0)
def __repr__(self):
return str(self)
def _print(self, node, level):
line = '\t'*level + str(node) + '\n'
if node.left is not None:
line += self._print(node.left, level + 1)
if node.right is not None:
line += self._print(node.right, level + 1)
return line
@property
def height(self):
'''
Return the BST height.
'''
return self._height(self.root)
def _height(self, node):
if node is None:
return -1
return max(self._height(node.left),
self._height(node.right)) + 1
def replace_rows(self, row_map):
'''
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their nodes deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
'''
for key, data in self.items():
data[:] = [row_map[x] for x in data if x in row_map]
class FastBase:
'''
A fast binary search tree implementation for indexing,
using the bintrees library.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __init__(self, data, row_index, unique=False):
self.data = self.engine()
self.unique = unique
for key, row in zip(data, row_index):
self.add(tuple(key), row)
def add(self, key, val):
'''
Add a key, value pair.
'''
if self.unique:
if key in self.data:
# already exists
raise ValueError('Cannot add duplicate value "{0}" in a '
'unique index'.format(key))
self.data[key] = val
else:
rows = self.data.set_default(key, [])
rows.insert(np.searchsorted(rows, val), val)
def find(self, key):
'''
Find rows corresponding to the given key.
'''
rows = self.data.get(key, [])
if self.unique:
# only one row
rows = [rows]
return rows
def remove(self, key, data=None):
'''
Remove data from the given key.
'''
if self.unique:
try:
self.data.pop(key)
except KeyError:
return False
else:
node = self.data.get(key, None)
if node is None or len(node) == 0:
return False
if data is None:
self.data.pop(key)
return True
if data not in node:
if len(node) == 0:
return False
raise ValueError("Data does not belong to correct node")
node.remove(data)
return True
def shift_left(self, row):
'''
Decrement rows larger than the given row.
'''
if self.unique:
for key, x in self.data.items():
if x > row:
self.data[key] = x - 1
else:
for key, node in self.data.items():
self.data[key] = [x - 1 if x > row else x for x in node]
def shift_right(self, row):
'''
Increment rows greater than or equal to the given row.
'''
if self.unique:
for key, x in self.data.items():
if x >= row:
self.data[key] = x + 1
else:
for key, node in self.data.items():
self.data[key] = [x + 1 if x >= row else x for x in node]
def traverse(self):
'''
Return all nodes in this BST.
'''
l = []
for key, data in self.data.items():
n = Node(key, key)
n.data = data
l.append(n)
return l
def items(self):
'''
Return a list of key, data tuples.
'''
if self.unique:
return self.data.items()
return [x for x in self.data.items() if len(x[1]) > 0]
def sort(self):
'''
Make row order align with key order.
'''
if self.unique:
for i, (key, row) in enumerate(self.data.items()):
self.data[key] = i
else:
i = 0
for key, rows in self.data.items():
num_rows = len(rows)
self.data[key] = [x for x in range(i, i + num_rows)]
i += num_rows
def sorted_data(self):
'''
Return a list of rows in order sorted by key.
'''
if self.unique:
return [x for x in self.data.values()]
return [x for node in self.data.values() for x in node]
def range(self, lower, upper, bounds=(True, True)):
'''
Return row values in the given range.
'''
# we need Epsilon since bintrees searches for
# lower <= key < upper, while we might want lower <= key <= upper
# or similar
if not bounds[0]: # lower < key
lower = Epsilon(lower)
if bounds[1]: # key <= upper
upper = Epsilon(upper)
l = [v for v in self.data.value_slice(lower, upper)]
if self.unique:
return l
return [x for sublist in l for x in sublist]
def replace_rows(self, row_map):
'''
Replace rows with the values in row_map.
'''
if self.unique:
del_keys = []
for key, data in self.data.items():
if data in row_map:
self.data[key] = row_map[data]
else:
del_keys.append(key)
for key in del_keys:
self.data.pop(key)
else:
for data in self.data.values():
data[:] = [row_map[x] for x in data if x in row_map]
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
try:
# bintrees is an optional dependency
from bintrees import FastBinaryTree, FastRBTree
class FastBST(FastBase):
engine = FastBinaryTree
class FastRBT(FastBase):
engine = FastRBTree
except ImportError:
FastBST = BST
FastRBT = BST
|
cac02eb7154ca07bf77b9f2b6d56c01e767d639c56d787b0a69c9be3c1636c07 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
import re
import numpy as np
from .. import log
from ..utils.console import Getch, color_print, terminal_size, conf
from ..utils.data_info import dtype_info_name
__all__ = []
def default_format_func(format_, val):
if isinstance(val, bytes):
return val.decode('utf-8', errors='replace')
else:
return str(val)
# The first three functions are helpers for _auto_format_func
def _use_str_for_masked_values(format_func):
"""Wrap format function to trap masked values.
String format functions and most user functions will not be able to deal
with masked values, so we wrap them to ensure they are passed to str().
"""
return lambda format_, val: (str(val) if val is np.ma.masked
else format_func(format_, val))
def _possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
"""
yield lambda format_, val: format(val, format_)
yield lambda format_, val: format_.format(val)
yield lambda format_, val: format_ % val
def get_auto_format_func(
col=None,
possible_string_format_functions=_possible_string_format_functions):
"""
Return a wrapped ``auto_format_func`` function which is used in
formatting table columns. This is primarily an internal function but
gets used directly in other parts of astropy, e.g. `astropy.io.ascii`.
Parameters
----------
col_name : object, optional
Hashable object to identify column like id or name. Default is None.
possible_string_format_functions : func, optional
Function that yields possible string formatting functions
(defaults to internal function to do this).
Returns
-------
Wrapped ``auto_format_func`` function
"""
def _auto_format_func(format_, val):
"""Format ``val`` according to ``format_`` for a plain format specifier,
old- or new-style format strings, or using a user supplied function.
More importantly, determine and cache (in _format_funcs) a function
that will do this subsequently. In this way this complicated logic is
only done for the first value.
Returns the formatted value.
"""
if format_ is None:
return default_format_func(format_, val)
if format_ in col.info._format_funcs:
return col.info._format_funcs[format_](format_, val)
if callable(format_):
format_func = lambda format_, val: format_(val)
try:
out = format_func(format_, val)
if not isinstance(out, str):
raise ValueError('Format function for value {0} returned {1} '
'instead of string type'
.format(val, type(val)))
except Exception as err:
# For a masked element, the format function call likely failed
# to handle it. Just return the string representation for now,
# and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
raise ValueError('Format function for value {0} failed: {1}'
.format(val, err))
# If the user-supplied function handles formatting masked elements, use
# it directly. Otherwise, wrap it in a function that traps them.
try:
format_func(format_, np.ma.masked)
except Exception:
format_func = _use_str_for_masked_values(format_func)
else:
# For a masked element, we cannot set string-based format functions yet,
# as all tests below will fail. Just return the string representation
# of masked for now, and retry when a non-masked value comes along.
if val is np.ma.masked:
return str(val)
for format_func in possible_string_format_functions(format_):
try:
# Does this string format method work?
out = format_func(format_, val)
# Require that the format statement actually did something.
if out == format_:
raise ValueError('the format passed in did nothing.')
except Exception:
continue
else:
break
else:
# None of the possible string functions passed muster.
raise ValueError('unable to parse format string {0} for its '
'column.'.format(format_))
# String-based format functions will fail on masked elements;
# wrap them in a function that traps them.
format_func = _use_str_for_masked_values(format_func)
col.info._format_funcs[format_] = format_func
return out
return _auto_format_func
class TableFormatter:
@staticmethod
def _get_pprint_size(max_lines=None, max_width=None):
"""Get the output size (number of lines and character width) for Column and
Table pformat/pprint methods.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be determined
using the ``astropy.table.conf.max_lines`` configuration item. If a
negative value of ``max_lines`` is supplied then there is no line
limit applied.
The same applies for max_width except the configuration item is
``astropy.table.conf.max_width``.
Parameters
----------
max_lines : int or None
Maximum lines of output (header + data rows)
max_width : int or None
Maximum width (characters) output
Returns
-------
max_lines, max_width : int
"""
if max_lines is None:
max_lines = conf.max_lines
if max_width is None:
max_width = conf.max_width
if max_lines is None or max_width is None:
lines, width = terminal_size()
if max_lines is None:
max_lines = lines
elif max_lines < 0:
max_lines = sys.maxsize
if max_lines < 8:
max_lines = 8
if max_width is None:
max_width = width
elif max_width < 0:
max_width = sys.maxsize
if max_width < 10:
max_width = 10
return max_lines, max_width
def _pformat_col(self, col, max_lines=None, show_name=True, show_unit=None,
show_dtype=False, show_length=None, html=False, align=None):
"""Return a list of formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
html : bool
Output column as HTML
align : str
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively.
Returns
-------
lines : list
List of lines with formatted column values
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
if show_unit is None:
show_unit = col.info.unit is not None
outs = {} # Some values from _pformat_col_iter iterator that are needed here
col_strs_iter = self._pformat_col_iter(col, max_lines, show_name=show_name,
show_unit=show_unit,
show_dtype=show_dtype,
show_length=show_length,
outs=outs)
col_strs = list(col_strs_iter)
if len(col_strs) > 0:
col_width = max(len(x) for x in col_strs)
if html:
from ..utils.xml.writer import xml_escape
n_header = outs['n_header']
for i, col_str in enumerate(col_strs):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
val = '<{0}>{1}</{2}>'.format(td, xml_escape(col_str.strip()), td)
row = ('<tr>' + val + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
col_strs[i] = row
if n_header > 0:
# Get rid of '---' header line
col_strs.pop(n_header - 1)
col_strs.insert(0, '<table>')
col_strs.append('</table>')
# Now bring all the column string values to the same fixed width
else:
col_width = max(len(x) for x in col_strs) if col_strs else 1
# Center line header content and generate dashed headerline
for i in outs['i_centers']:
col_strs[i] = col_strs[i].center(col_width)
if outs['i_dashes'] is not None:
col_strs[outs['i_dashes']] = '-' * col_width
# Format columns according to alignment. `align` arg has precedent, otherwise
# use `col.format` if it starts as a legal alignment string. If neither applies
# then right justify.
re_fill_align = re.compile(r'(?P<fill>.?)(?P<align>[<^>=])')
match = None
if align:
# If there is an align specified then it must match
match = re_fill_align.match(align)
if not match:
raise ValueError("column align must be one of '<', '^', '>', or '='")
elif isinstance(col.info.format, str):
# col.info.format need not match, in which case rjust gets used
match = re_fill_align.match(col.info.format)
if match:
fill_char = match.group('fill')
align_char = match.group('align')
if align_char == '=':
if fill_char != '0':
raise ValueError("fill character must be '0' for '=' align")
fill_char = '' # str.zfill gets used which does not take fill char arg
else:
fill_char = ''
align_char = '>'
justify_methods = {'<': 'ljust', '^': 'center', '>': 'rjust', '=': 'zfill'}
justify_method = justify_methods[align_char]
justify_args = (col_width, fill_char) if fill_char else (col_width,)
for i, col_str in enumerate(col_strs):
col_strs[i] = getattr(col_str, justify_method)(*justify_args)
if outs['show_length']:
col_strs.append('Length = {0} rows'.format(len(col)))
return col_strs, outs
def _pformat_col_iter(self, col, max_lines, show_name, show_unit, outs,
show_dtype=False, show_length=None):
"""Iterator which yields formatted string representation of column values.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
outs : dict
Must be a dict which is used to pass back additional values
defined within the iterator.
show_dtype : bool
Include column dtype. Default is False.
show_length : bool
Include column length at end. Default is to show this only
if the column is not shown completely.
"""
max_lines, _ = self._get_pprint_size(max_lines, -1)
multidims = getattr(col, 'shape', [0])[1:]
if multidims:
multidim0 = tuple(0 for n in multidims)
multidim1 = tuple(n - 1 for n in multidims)
trivial_multidims = np.prod(multidims) == 1
i_dashes = None
i_centers = [] # Line indexes where content should be centered
n_header = 0
if show_name:
i_centers.append(n_header)
# Get column name (or 'None' if not set)
col_name = str(col.info.name)
if multidims:
col_name += ' [{0}]'.format(
','.join(str(n) for n in multidims))
n_header += 1
yield col_name
if show_unit:
i_centers.append(n_header)
n_header += 1
yield str(col.info.unit or '')
if show_dtype:
i_centers.append(n_header)
n_header += 1
try:
dtype = dtype_info_name(col.dtype)
except AttributeError:
dtype = 'object'
yield str(dtype)
if show_unit or show_name or show_dtype:
i_dashes = n_header
n_header += 1
yield '---'
max_lines -= n_header
n_print2 = max_lines // 2
n_rows = len(col)
# This block of code is responsible for producing the function that
# will format values for this column. The ``format_func`` function
# takes two args (col_format, val) and returns the string-formatted
# version. Some points to understand:
#
# - col_format could itself be the formatting function, so it will
# actually end up being called with itself as the first arg. In
# this case the function is expected to ignore its first arg.
#
# - auto_format_func is a function that gets called on the first
# column value that is being formatted. It then determines an
# appropriate formatting function given the actual value to be
# formatted. This might be deterministic or it might involve
# try/except. The latter allows for different string formatting
# options like %f or {:5.3f}. When auto_format_func is called it:
# 1. Caches the function in the _format_funcs dict so for subsequent
# values the right function is called right away.
# 2. Returns the formatted value.
#
# - possible_string_format_functions is a function that yields a
# succession of functions that might successfully format the
# value. There is a default, but Mixin methods can override this.
# See Quantity for an example.
#
# - get_auto_format_func() returns a wrapped version of auto_format_func
# with the column id and possible_string_format_functions as
# enclosed variables.
col_format = col.info.format or getattr(col.info, 'default_format',
None)
pssf = (getattr(col.info, 'possible_string_format_functions', None) or
_possible_string_format_functions)
auto_format_func = get_auto_format_func(col, pssf)
format_func = col.info._format_funcs.get(col_format, auto_format_func)
if len(col) > max_lines:
if show_length is None:
show_length = True
i0 = n_print2 - (1 if show_length else 0)
i1 = n_rows - n_print2 - max_lines % 2
indices = np.concatenate([np.arange(0, i0 + 1),
np.arange(i1 + 1, len(col))])
else:
i0 = -1
indices = np.arange(len(col))
def format_col_str(idx):
if multidims:
# Prevents columns like Column(data=[[(1,)],[(2,)]], name='a')
# with shape (n,1,...,1) from being printed as if there was
# more than one element in a row
if trivial_multidims:
return format_func(col_format, col[(idx,) + multidim0])
else:
left = format_func(col_format, col[(idx,) + multidim0])
right = format_func(col_format, col[(idx,) + multidim1])
return '{0} .. {1}'.format(left, right)
else:
return format_func(col_format, col[idx])
# Add formatted values if within bounds allowed by max_lines
for idx in indices:
if idx == i0:
yield '...'
else:
try:
yield format_col_str(idx)
except ValueError:
raise ValueError(
'Unable to parse format string "{0}" for entry "{1}" '
'in column "{2}"'.format(col_format, col[idx],
col.info.name))
outs['show_length'] = show_length
outs['n_header'] = n_header
outs['i_centers'] = i_centers
outs['i_dashes'] = i_dashes
def _pformat_table(self, table, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False,
html=False, tableid=None, tableclass=None, align=None):
"""Return a list of lines for the formatted string representation of
the table.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or None
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(table)
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
none
align : str or list or tuple
Left/right alignment of columns. Default is '>' (right) for all
columns. Other allowed values are '<', '^', and '0=' for left,
centered, and 0-padded, respectively. A list of strings can be
provided for alignment of tables with multiple columns.
Returns
-------
rows : list
Formatted table as a list of strings
outs : dict
Dict which is used to pass back additional values
defined within the iterator.
"""
# "Print" all the values into temporary lists by column for subsequent
# use and to determine the width
max_lines, max_width = self._get_pprint_size(max_lines, max_width)
cols = []
if show_unit is None:
show_unit = any(col.info.unit for col in table.columns.values())
# Coerce align into a correctly-sized list of alignments (if possible)
n_cols = len(table.columns)
if align is None or isinstance(align, str):
align = [align] * n_cols
elif isinstance(align, (list, tuple)):
if len(align) != n_cols:
raise ValueError('got {0} alignment values instead of '
'the number of columns ({1})'
.format(len(align), n_cols))
else:
raise TypeError('align keyword must be str or list or tuple (got {0})'
.format(type(align)))
for align_, col in zip(align, table.columns.values()):
lines, outs = self._pformat_col(col, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
align=align_)
if outs['show_length']:
lines = lines[:-1]
cols.append(lines)
if not cols:
return ['<No columns>'], {'show_length': False}
# Use the values for the last column since they are all the same
n_header = outs['n_header']
n_rows = len(cols[0])
outwidth = lambda cols: sum(len(c[0]) for c in cols) + len(cols) - 1
dots_col = ['...'] * n_rows
middle = len(cols) // 2
while outwidth(cols) > max_width:
if len(cols) == 1:
break
if len(cols) == 2:
cols[1] = dots_col
break
if cols[middle] is dots_col:
cols.pop(middle)
middle = len(cols) // 2
cols[middle] = dots_col
# Now "print" the (already-stringified) column values into a
# row-oriented list.
rows = []
if html:
from ..utils.xml.writer import xml_escape
if tableid is None:
tableid = 'table{id}'.format(id=id(table))
if tableclass is not None:
if isinstance(tableclass, list):
tableclass = ' '.join(tableclass)
rows.append('<table id="{tid}" class="{tcls}">'.format(
tid=tableid, tcls=tableclass))
else:
rows.append('<table id="{tid}">'.format(tid=tableid))
for i in range(n_rows):
# _pformat_col output has a header line '----' which is not needed here
if i == n_header - 1:
continue
td = 'th' if i < n_header else 'td'
vals = ('<{0}>{1}</{2}>'.format(td, xml_escape(col[i].strip()), td)
for col in cols)
row = ('<tr>' + ''.join(vals) + '</tr>')
if i < n_header:
row = ('<thead>' + row + '</thead>')
rows.append(row)
rows.append('</table>')
else:
for i in range(n_rows):
row = ' '.join(col[i] for col in cols)
rows.append(row)
return rows, outs
def _more_tabcol(self, tabcol, max_lines=None, max_width=None,
show_name=True, show_unit=None, show_dtype=False):
"""Interactive "more" of a table or column.
Parameters
----------
max_lines : int or None
Maximum number of rows to output
max_width : int or None
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is False.
"""
allowed_keys = 'f br<>qhpn'
# Count the header lines
n_header = 0
if show_name:
n_header += 1
if show_unit:
n_header += 1
if show_dtype:
n_header += 1
if show_name or show_unit or show_dtype:
n_header += 1
# Set up kwargs for pformat call. Only Table gets max_width.
kwargs = dict(max_lines=-1, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
if hasattr(tabcol, 'columns'): # tabcol is a table
kwargs['max_width'] = max_width
# If max_lines is None (=> query screen size) then increase by 2.
# This is because get_pprint_size leaves 6 extra lines so that in
# ipython you normally see the last input line.
max_lines1, max_width = self._get_pprint_size(max_lines, max_width)
if max_lines is None:
max_lines1 += 2
delta_lines = max_lines1 - n_header
# Set up a function to get a single character on any platform
inkey = Getch()
i0 = 0 # First table/column row to show
showlines = True
while True:
i1 = i0 + delta_lines # Last table/col row to show
if showlines: # Don't always show the table (e.g. after help)
try:
os.system('cls' if os.name == 'nt' else 'clear')
except Exception:
pass # No worries if clear screen call fails
lines = tabcol[i0:i1].pformat(**kwargs)
colors = ('red' if i < n_header else 'default'
for i in range(len(lines)))
for color, line in zip(colors, lines):
color_print(line, color)
showlines = True
print()
print("-- f, <space>, b, r, p, n, <, >, q h (help) --", end=' ')
# Get a valid key
while True:
try:
key = inkey().lower()
except Exception:
print("\n")
log.error('Console does not support getting a character'
' as required by more(). Use pprint() instead.')
return
if key in allowed_keys:
break
print(key)
if key.lower() == 'q':
break
elif key == ' ' or key == 'f':
i0 += delta_lines
elif key == 'b':
i0 = i0 - delta_lines
elif key == 'r':
pass
elif key == '<':
i0 = 0
elif key == '>':
i0 = len(tabcol)
elif key == 'p':
i0 -= 1
elif key == 'n':
i0 += 1
elif key == 'h':
showlines = False
print("""
Browsing keys:
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help""", end=' ')
if i0 < 0:
i0 = 0
if i0 >= len(tabcol) - delta_lines:
i0 = len(tabcol) - delta_lines
print("\n")
|
dccdaf50f189b8ef4960a13da0c420ecb823c9ade93b979113304d5e81f650d0 | """
High-level table operations:
- join()
- setdiff()
- hstack()
- vstack()
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import warnings
import collections
import itertools
from collections import OrderedDict, Counter
import numpy as np
from numpy import ma
from ..utils import metadata
from .column import Column
from . import _np_utils
from .np_utils import fix_column_name, TableMergeError
__all__ = ['join', 'setdiff', 'hstack', 'vstack', 'unique']
def _merge_table_meta(out, tables, metadata_conflicts='warn'):
out_meta = deepcopy(tables[0].meta)
for table in tables[1:]:
out_meta = metadata.merge(out_meta, table.meta, metadata_conflicts=metadata_conflicts)
out.meta.update(out_meta)
def _get_list_of_tables(tables):
"""
Check that tables is a Table or sequence of Tables. Returns the
corresponding list of Tables.
"""
from .table import Table, Row
# Make sure we have a list of things
if not isinstance(tables, collections.Sequence):
tables = [tables]
# Make sure each thing is a Table or Row
if any(not isinstance(x, (Table, Row)) for x in tables) or len(tables) == 0:
raise TypeError('`tables` arg must be a Table or sequence of Tables or Rows')
# Convert any Rows to Tables
tables = [(x if isinstance(x, Table) else Table(x)) for x in tables]
return tables
def _get_out_class(objs):
"""
From a list of input objects ``objs`` get merged output object class.
This is just taken as the deepest subclass. This doesn't handle complicated
inheritance schemes.
"""
out_class = objs[0].__class__
for obj in objs[1:]:
if issubclass(obj.__class__, out_class):
out_class = obj.__class__
if any(not issubclass(out_class, obj.__class__) for obj in objs):
raise ValueError('unmergeable object classes {}'
.format([obj.__class__.__name__ for obj in objs]))
return out_class
def join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'], metadata_conflicts='warn'):
"""
Perform a join of the left table with the right table on specified keys.
Parameters
----------
left : Table object or a value that will initialize a Table object
Left side table in the join
right : Table object or a value that will initialize a Table object
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
from .table import Table
# Try converting inputs to Table as needed
if not isinstance(left, Table):
left = Table(left)
if not isinstance(right, Table):
right = Table(right)
col_name_map = OrderedDict()
out = _join(left, right, keys, join_type,
uniq_col_name, table_names, col_name_map, metadata_conflicts)
# Merge the column and table meta data. Table subclasses might override
# these methods for custom merge behavior.
_merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts)
return out
def setdiff(table1, table2, keys=None):
"""
Take a set difference of table rows.
The row set difference will contain all rows in ``table1`` that are not
present in ``table2``. If the keys parameter is not defined, all columns in
``table1`` will be included in the output table.
Parameters
----------
table1 : `~astropy.table.Table`
``table1`` is on the left side of the set difference.
table2 : `~astropy.table.Table`
``table2`` is on the right side of the set difference.
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns in ``table1``.
Returns
-------
diff_table : `~astropy.table.Table`
New table containing the set difference between tables. If the set
difference is none, an empty table will be returned.
Examples
--------
To get a set difference between two tables::
>>> from astropy.table import setdiff, Table
>>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b'))
>>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 c
4 d
9 f
>>> print(t2)
a b
--- ---
1 c
5 b
9 f
>>> print(setdiff(t1, t2))
a b
--- ---
4 d
>>> print(setdiff(t2, t1))
a b
--- ---
5 b
"""
if keys is None:
keys = table1.colnames
#Check that all keys are in table1 and table2
for tbl, tbl_str in ((table1,'table1'), (table2,'table2')):
diff_keys = np.setdiff1d(keys, tbl.colnames)
if len(diff_keys) != 0:
raise ValueError("The {} columns are missing from {}, cannot take "
"a set difference.".format(diff_keys, tbl_str))
# Make a light internal copy of both tables
t1 = table1.copy(copy_data=False)
t1.meta = {}
t1.keep_columns(keys)
t1['__index1__'] = np.arange(len(table1)) # Keep track of rows indices
# Make a light internal copy to avoid touching table2
t2 = table2.copy(copy_data=False)
t2.meta = {}
t2.keep_columns(keys)
# Dummy column to recover rows after join
t2['__index2__'] = np.zeros(len(t2), dtype=np.uint8) # dummy column
t12 = _join(t1, t2, join_type='left', keys=keys,
metadata_conflicts='silent')
# If t12 is masked then that means some rows were in table1 but not table2.
if t12.masked:
# Define bool mask of table1 rows not in table2
diff = t12['__index2__'].mask
# Get the row indices of table1 for those rows
idx = t12['__index1__'][diff]
# Select corresponding table1 rows straight from table1 to ensure
# correct table and column types.
t12_diff = table1[idx]
else:
t12_diff = table1[[]]
return t12_diff
def vstack(tables, join_type='outer', metadata_conflicts='warn'):
"""
Stack tables vertically (along rows)
A ``join_type`` of 'exact' means that the tables must all have exactly
the same column names (though the order can vary). If ``join_type``
is 'inner' then the intersection of common columns will be the output.
A value of 'outer' (default) means the output will have the union of
all columns, with table values being masked where no common values are
available.
Parameters
----------
tables : Table or list of Table objects
Table(s) to stack along rows (vertically) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables along rows do::
>>> from astropy.table import vstack, Table
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
a b
--- ---
5 7
6 8
>>> print(vstack([t1, t2]))
a b
--- ---
1 3
2 4
5 7
6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _vstack(tables, join_type, col_name_map, metadata_conflicts)
# Merge table metadata
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def hstack(tables, join_type='outer',
uniq_col_name='{col_name}_{table_name}', table_names=None,
metadata_conflicts='warn'):
"""
Stack tables along columns (horizontally)
A ``join_type`` of 'exact' means that the tables must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' (default)
means the output will have the union of all rows, with table values being
masked where no common values are available.
Parameters
----------
tables : List of Table objects
Tables to stack along columns (horizontally) with the current table
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
metadata_conflicts : str
How to proceed with metadata conflicts. This should be one of:
* ``'silent'``: silently pick the last conflicting meta-data value
* ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default)
* ``'error'``: raise an exception.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
Examples
--------
To stack two tables horizontally (along columns) do::
>>> from astropy.table import Table, hstack
>>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b'))
>>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd'))
>>> print(t1)
a b
--- ---
1 3
2 4
>>> print(t2)
c d
--- ---
5 7
6 8
>>> print(hstack([t1, t2]))
a b c d
--- --- --- ---
1 3 5 7
2 4 6 8
"""
tables = _get_list_of_tables(tables) # validates input
if len(tables) == 1:
return tables[0] # no point in stacking a single table
col_name_map = OrderedDict()
out = _hstack(tables, join_type, uniq_col_name, table_names,
col_name_map)
_merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts)
return out
def unique(input_table, keys=None, silent=False, keep='first'):
"""
Returns the unique rows of a table.
Parameters
----------
input_table : `~astropy.table.Table` object or a value that
will initialize a `~astropy.table.Table` object
keys : str or list of str
Name(s) of column(s) used to create unique rows.
Default is to use all columns.
keep : one of 'first', 'last' or 'none'
Whether to keep the first or last row for each set of
duplicates. If 'none', all rows that are duplicate are
removed, leaving only rows that are already unique in
the input.
Default is 'first'.
silent : boolean
If `True`, masked value column(s) are silently removed from
``keys``. If `False`, an exception is raised when ``keys``
contains masked value column(s).
Default is `False`.
Returns
-------
unique_table : `~astropy.table.Table` object
New table containing only the unique rows of ``input_table``.
Examples
--------
>>> from astropy.table import unique, Table
>>> import numpy as np
>>> table = Table(data=[[1,2,3,2,3,3],
... [2,3,4,5,4,6],
... [3,4,5,6,7,8]],
... names=['col1', 'col2', 'col3'],
... dtype=[np.int32, np.int32, np.int32])
>>> table
<Table length=6>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
2 5 6
3 4 7
3 6 8
>>> unique(table, keys='col1')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
3 4 5
>>> unique(table, keys=['col1'], keep='last')
<Table length=3>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 5 6
3 6 8
>>> unique(table, keys=['col1', 'col2'])
<Table length=5>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 4 5
3 6 8
>>> unique(table, keys=['col1', 'col2'], keep='none')
<Table length=4>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
2 3 4
2 5 6
3 6 8
>>> unique(table, keys=['col1'], keep='none')
<Table length=1>
col1 col2 col3
int32 int32 int32
----- ----- -----
1 2 3
"""
if keep not in ('first', 'last', 'none'):
raise ValueError("'keep' should be one of 'first', 'last', 'none'")
if isinstance(keys, str):
keys = [keys]
if keys is None:
keys = input_table.colnames
else:
if len(set(keys)) != len(keys):
raise ValueError("duplicate key names")
if input_table.masked:
nkeys = 0
for key in keys[:]:
if np.any(input_table[key].mask):
if not silent:
raise ValueError(
"cannot use columns with masked values as keys; "
"remove column '{0}' from keys and rerun "
"unique()".format(key))
del keys[keys.index(key)]
if len(keys) == 0:
raise ValueError("no column remained in ``keys``; "
"unique() cannot work with masked value "
"key columns")
grouped_table = input_table.group_by(keys)
indices = grouped_table.groups.indices
if keep == 'first':
indices = indices[:-1]
elif keep == 'last':
indices = indices[1:] - 1
else:
indices = indices[:-1][np.diff(indices) == 1]
return grouped_table[indices]
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of tables
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.colnames:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.colnames for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {0}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns {0!r} have different shape'.format(names))
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
try:
return metadata.common_dtype(cols)
except metadata.MergeConflictError as err:
tme = TableMergeError('Columns have incompatible types {0}'
.format(err._incompat_types))
tme._incompat_types = err._incompat_types
raise tme
def _join(left, right, keys=None, join_type='inner',
uniq_col_name='{col_name}_{table_name}',
table_names=['1', '2'],
col_name_map=None, metadata_conflicts='warn'):
"""
Perform a join of the left and right Tables on specified keys.
Parameters
----------
left : Table
Left side table in the join
right : Table
Right side table in the join
keys : str or list of str
Name(s) of column(s) used to match rows of left and right tables.
Default is to use all columns which are common to both tables.
join_type : str
Join type ('inner' | 'outer' | 'left' | 'right'), default is 'inner'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2'].
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
joined_table : `~astropy.table.Table` object
New table containing the result of the join operation.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
if join_type not in ('inner', 'outer', 'left', 'right'):
raise ValueError("The 'join_type' argument should be in 'inner', "
"'outer', 'left' or 'right' (got '{0}' instead)".
format(join_type))
# If we have a single key, put it in a tuple
if keys is None:
keys = tuple(name for name in left.colnames if name in right.colnames)
if len(keys) == 0:
raise TableMergeError('No keys in common between left and right tables')
elif isinstance(keys, str):
keys = (keys,)
# Check the key columns
for arr, arr_label in ((left, 'Left'), (right, 'Right')):
for name in keys:
if name not in arr.colnames:
raise TableMergeError('{0} table does not have key column {1!r}'
.format(arr_label, name))
if hasattr(arr[name], 'mask') and np.any(arr[name].mask):
raise TableMergeError('{0} key column {1!r} has missing values'
.format(arr_label, name))
if not isinstance(arr[name], np.ndarray):
raise ValueError("non-ndarray column '{}' not allowed as a key column"
.format(name))
len_left, len_right = len(left), len(right)
if len_left == 0 or len_right == 0:
raise ValueError('input tables for join must both have at least one row')
# Joined array dtype as a list of descr (name, type_str, shape) tuples
col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names)
out_descrs = get_descrs([left, right], col_name_map)
# Make an array with just the key columns. This uses a temporary
# structured array for efficiency.
out_keys_dtype = [descr for descr in out_descrs if descr[0] in keys]
out_keys = np.empty(len_left + len_right, dtype=out_keys_dtype)
for key in keys:
out_keys[key][:len_left] = left[key]
out_keys[key][len_left:] = right[key]
idx_sort = out_keys.argsort(order=keys)
out_keys = out_keys[idx_sort]
# Get all keys
diffs = np.concatenate(([True], out_keys[1:] != out_keys[:-1], [True]))
idxs = np.flatnonzero(diffs)
# Main inner loop in Cython to compute the cartesion product
# indices for the given join type
int_join_type = {'inner': 0, 'outer': 1, 'left': 2, 'right': 3}[join_type]
masked, n_out, left_out, left_mask, right_out, right_mask = \
_np_utils.join_inner(idxs, idx_sort, len_left, int_join_type)
# If either of the inputs are masked then the output is masked
if left.masked or right.masked:
masked = True
masked = bool(masked)
out = _get_out_class([left, right])(masked=masked)
for out_name, dtype, shape in out_descrs:
left_name, right_name = col_name_map[out_name]
if left_name and right_name: # this is a key which comes from left and right
cols = [left[left_name], right[right_name]]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('join unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
out[out_name] = col_cls.info.new_like(cols, n_out, metadata_conflicts, out_name)
if issubclass(col_cls, Column):
out[out_name][:] = np.where(right_mask,
left[left_name].take(left_out),
right[right_name].take(right_out))
else:
# np.where does not work for mixin columns (e.g. Quantity) so
# use a slower workaround.
left_mask = ~right_mask
if np.any(left_mask):
out[out_name][left_mask] = left[left_name].take(left_out)
if np.any(right_mask):
out[out_name][right_mask] = right[right_name].take(right_out)
continue
elif left_name: # out_name came from the left table
name, array, array_out, array_mask = left_name, left, left_out, left_mask
elif right_name:
name, array, array_out, array_mask = right_name, right, right_out, right_mask
else:
raise TableMergeError('Unexpected column names (maybe one is ""?)')
# Finally add the joined column to the output table.
out[out_name] = array[name][array_out]
# If the output table is masked then set the output column masking
# accordingly. Check for columns that don't support a mask attribute.
if masked and np.any(array_mask):
# array_mask is 1-d corresponding to length of output column. We need
# make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..).
# Mixin columns might not have ndim attribute so use len(col.shape).
array_mask.shape = (out[out_name].shape[0],) + (1,) * (len(out[out_name].shape) - 1)
# Now broadcast to the correct final shape
array_mask = np.broadcast_to(array_mask, out[out_name].shape)
if array.masked:
array_mask = array_mask | array[name].mask[array_out]
try:
out[out_name][array_mask] = out[out_name].info.mask_val
except Exception: # Not clear how different classes will fail here
raise NotImplementedError(
"join requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, collections.Mapping):
_col_name_map.update(col_name_map)
return out
def _vstack(arrays, join_type='outer', col_name_map=None, metadata_conflicts='warn'):
"""
Stack Tables vertically (by rows)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same column names (though the order can vary). If
``join_type`` is 'inner' then the intersection of common columns will
be the output. A value of 'outer' means the output will have the union of
all columns, with array values being masked where no common values are
available.
Parameters
----------
arrays : list of Tables
Tables to stack by rows (vertically)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
col_name_map : empty dict or None
If passed as a dict then it will be updated in-place with the
mapping of output to input column names.
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
# Trivial case of one input array
if len(arrays) == 1:
return arrays[0]
# Start by assuming an outer match where all names go to output
names = set(itertools.chain(*[arr.colnames for arr in arrays]))
col_name_map = get_col_name_map(arrays, names)
# If require_match is True then the output must have exactly the same
# number of columns as each input array
if join_type == 'exact':
for names in col_name_map.values():
if any(x is None for x in names):
raise TableMergeError('Inconsistent columns in input arrays '
"(use 'inner' or 'outer' join_type to "
"allow non-matching columns)")
join_type = 'outer'
# For an inner join, keep only columns where all input arrays have that column
if join_type == 'inner':
col_name_map = OrderedDict((name, in_names) for name, in_names in col_name_map.items()
if all(x is not None for x in in_names))
if len(col_name_map) == 0:
raise TableMergeError('Input arrays have no columns in common')
# If there are any output columns where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
masked = any(getattr(arr, 'masked', False) for arr in arrays)
for names in col_name_map.values():
if any(x is None for x in names):
masked = True
break
lens = [len(arr) for arr in arrays]
n_rows = sum(lens)
out = _get_out_class(arrays)(masked=masked)
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
col_cls = _get_out_class(cols)
if not hasattr(col_cls.info, 'new_like'):
raise NotImplementedError('vstack unavailable for mixin column type(s): {}'
.format(col_cls.__name__))
try:
out[out_name] = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name)
except metadata.MergeConflictError as err:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(out_name, err._incompat_types))
idx0 = 0
for name, array in zip(in_names, arrays):
idx1 = idx0 + len(array)
if name in array.colnames:
out[out_name][idx0:idx1] = array[name]
else:
try:
out[out_name][idx0:idx1] = out[out_name].info.mask_val
except Exception:
raise NotImplementedError(
"vstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
idx0 = idx1
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, collections.Mapping):
_col_name_map.update(col_name_map)
return out
def _hstack(arrays, join_type='outer', uniq_col_name='{col_name}_{table_name}',
table_names=None, col_name_map=None):
"""
Stack tables horizontally (by columns)
A ``join_type`` of 'exact' (default) means that the arrays must all
have exactly the same number of rows. If ``join_type`` is 'inner' then
the intersection of rows will be the output. A value of 'outer' means
the output will have the union of all rows, with array values being
masked where no common values are available.
Parameters
----------
arrays : List of tables
Tables to stack by columns (horizontally)
join_type : str
Join type ('inner' | 'exact' | 'outer'), default is 'outer'
uniq_col_name : str or None
String generate a unique output column name in case of a conflict.
The default is '{col_name}_{table_name}'.
table_names : list of str or None
Two-element list of table names used when generating unique output
column names. The default is ['1', '2', ..].
Returns
-------
stacked_table : `~astropy.table.Table` object
New table containing the stacked data from the input tables.
"""
# Store user-provided col_name_map until the end
_col_name_map = col_name_map
# Input validation
if join_type not in ('inner', 'exact', 'outer'):
raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'")
if table_names is None:
table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]
if len(arrays) != len(table_names):
raise ValueError('Number of arrays must match number of table_names')
# Trivial case of one input arrays
if len(arrays) == 1:
return arrays[0]
col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)
# If require_match is True then all input arrays must have the same length
arr_lens = [len(arr) for arr in arrays]
if join_type == 'exact':
if len(set(arr_lens)) > 1:
raise TableMergeError("Inconsistent number of rows in input arrays "
"(use 'inner' or 'outer' join_type to allow "
"non-matching rows)")
join_type = 'outer'
# For an inner join, keep only the common rows
if join_type == 'inner':
min_arr_len = min(arr_lens)
if len(set(arr_lens)) > 1:
arrays = [arr[:min_arr_len] for arr in arrays]
arr_lens = [min_arr_len for arr in arrays]
# If there are any output rows where one or more input arrays are missing
# then the output must be masked. If any input arrays are masked then
# output is masked.
masked = any(getattr(arr, 'masked', False) for arr in arrays) or len(set(arr_lens)) > 1
n_rows = max(arr_lens)
out = _get_out_class(arrays)(masked=masked)
for out_name, in_names in col_name_map.items():
for name, array, arr_len in zip(in_names, arrays, arr_lens):
if name is None:
continue
if n_rows > arr_len:
indices = np.arange(n_rows)
indices[arr_len:] = 0
out[out_name] = array[name][indices]
try:
out[out_name][arr_len:] = out[out_name].info.mask_val
except Exception:
raise NotImplementedError(
"hstack requires masking column '{}' but column"
" type {} does not support masking"
.format(out_name, out[out_name].__class__.__name__))
else:
out[out_name] = array[name][:n_rows]
# If col_name_map supplied as a dict input, then update.
if isinstance(_col_name_map, collections.Mapping):
_col_name_map.update(col_name_map)
return out
|
14cec9473b334dbbe97477d84e9f8d006f90c143452e8b763377da53654d734e | """
High-level operations for numpy structured arrays.
Some code and inspiration taken from numpy.lib.recfunctions.join_by().
Redistribution license restrictions apply.
"""
from itertools import chain
import collections
from collections import OrderedDict, Counter
import numpy as np
import numpy.ma as ma
from . import _np_utils
__all__ = ['TableMergeError']
class TableMergeError(ValueError):
pass
def get_col_name_map(arrays, common_names, uniq_col_name='{col_name}_{table_name}',
table_names=None):
"""
Find the column names mapping when merging the list of structured ndarrays
``arrays``. It is assumed that col names in ``common_names`` are to be
merged into a single column while the rest will be uniquely represented
in the output. The args ``uniq_col_name`` and ``table_names`` specify
how to rename columns in case of conflicts.
Returns a dict mapping each output column name to the input(s). This takes the form
{outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names
will be present, while for the other non-key columns the value will be (col_name_0,
None, ..) or (None, col_name_1, ..) etc.
"""
col_name_map = collections.defaultdict(lambda: [None] * len(arrays))
col_name_list = []
if table_names is None:
table_names = [str(ii + 1) for ii in range(len(arrays))]
for idx, array in enumerate(arrays):
table_name = table_names[idx]
for name in array.dtype.names:
out_name = name
if name in common_names:
# If name is in the list of common_names then insert into
# the column name list, but just once.
if name not in col_name_list:
col_name_list.append(name)
else:
# If name is not one of the common column outputs, and it collides
# with the names in one of the other arrays, then rename
others = list(arrays)
others.pop(idx)
if any(name in other.dtype.names for other in others):
out_name = uniq_col_name.format(table_name=table_name, col_name=name)
col_name_list.append(out_name)
col_name_map[out_name][idx] = name
# Check for duplicate output column names
col_name_count = Counter(col_name_list)
repeated_names = [name for name, count in col_name_count.items() if count > 1]
if repeated_names:
raise TableMergeError('Merging column names resulted in duplicates: {0}. '
'Change uniq_col_name or table_names args to fix this.'
.format(repeated_names))
# Convert col_name_map to a regular dict with tuple (immutable) values
col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list)
return col_name_map
def get_descrs(arrays, col_name_map):
"""
Find the dtypes descrs resulting from merging the list of arrays' dtypes,
using the column name mapping ``col_name_map``.
Return a list of descrs for the output.
"""
out_descrs = []
for out_name, in_names in col_name_map.items():
# List of input arrays that contribute to this output column
in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None]
# List of names of the columns that contribute to this output column.
names = [name for name in in_names if name is not None]
# Output dtype is the superset of all dtypes in in_arrays
try:
dtype = common_dtype(in_cols)
except TableMergeError as tme:
# Beautify the error message when we are trying to merge columns with incompatible
# types by including the name of the columns that originated the error.
raise TableMergeError("The '{0}' columns have incompatible types: {1}"
.format(names[0], tme._incompat_types))
# Make sure all input shapes are the same
uniq_shapes = set(col.shape[1:] for col in in_cols)
if len(uniq_shapes) != 1:
raise TableMergeError('Key columns {0!r} have different shape'.format(name))
shape = uniq_shapes.pop()
out_descrs.append((fix_column_name(out_name), dtype, shape))
return out_descrs
def common_dtype(cols):
"""
Use numpy to find the common dtype for a list of structured ndarray columns.
Only allow columns within the following fundamental numpy data types:
np.bool_, np.object_, np.number, np.character, np.void
"""
np_types = (np.bool_, np.object_, np.number, np.character, np.void)
uniq_types = set(tuple(issubclass(col.dtype.type, np_type) for np_type in np_types)
for col in cols)
if len(uniq_types) > 1:
# Embed into the exception the actual list of incompatible types.
incompat_types = [col.dtype.name for col in cols]
tme = TableMergeError('Columns have incompatible types {0}'
.format(incompat_types))
tme._incompat_types = incompat_types
raise tme
arrs = [np.empty(1, dtype=col.dtype) for col in cols]
# For string-type arrays need to explicitly fill in non-zero
# values or the final arr_common = .. step is unpredictable.
for arr in arrs:
if arr.dtype.kind in ('S', 'U'):
arr[0] = '0' * arr.itemsize
arr_common = np.array([arr[0] for arr in arrs])
return arr_common.dtype.str
def _check_for_sequence_of_structured_arrays(arrays):
err = '`arrays` arg must be a sequence (e.g. list) of structured arrays'
if not isinstance(arrays, collections.Sequence):
raise TypeError(err)
for array in arrays:
# Must be structured array
if not isinstance(array, np.ndarray) or array.dtype.names is None:
raise TypeError(err)
if len(arrays) == 0:
raise ValueError('`arrays` arg must include at least one array')
def fix_column_name(val):
"""
Fixes column names so that they are compatible with Numpy on
Python 2. Raises a ValueError exception if the column name
contains Unicode characters, which can not reasonably be used as a
column name.
"""
if val is not None:
try:
val = str(val)
except UnicodeEncodeError:
raise
return val
def recarray_fromrecords(rec_list):
"""
Partial replacement for `~numpy.core.records.fromrecords` which includes
a workaround for the bug with unicode arrays described at:
https://github.com/astropy/astropy/issues/3052
This should not serve as a full replacement for the original function;
this only does enough to fulfill the needs of the table module.
"""
# Note: This is just copying what Numpy does for converting arbitrary rows
# to column arrays in the recarray module; it could be there is a better
# way
nfields = len(rec_list[0])
obj = np.array(rec_list, dtype=object)
array_list = [np.array(obj[..., i].tolist()) for i in range(nfields)]
formats = []
for obj in array_list:
formats.append(obj.dtype.str)
formats = ','.join(formats)
return np.rec.fromarrays(array_list, formats=formats)
|
9d7f2000bc50b5a112b275f8f0e598aae9d529ea2f1b078fdf3ae9b6b362781c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from distutils.extension import Extension
ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
sources = ["_np_utils.pyx", "_column_mixins.pyx"]
include_dirs = ['numpy']
exts = [
Extension(name='astropy.table.' + os.path.splitext(source)[0],
sources=[os.path.join(ROOT, source)],
include_dirs=include_dirs)
for source in sources
]
return exts
|
e1bf51dd09c6d4d854b2d8ecb7f222e84d36584651750d4e9bfebe37698cf5bb | import textwrap
import copy
from collections import OrderedDict
__all__ = ['get_header_from_yaml', 'get_yaml_from_header', 'get_yaml_from_table']
class ColumnOrderList(list):
"""
List of tuples that sorts in a specific order that makes sense for
astropy table column attributes.
"""
def sort(self, *args, **kwargs):
super().sort()
column_keys = ['name', 'unit', 'datatype', 'format', 'description', 'meta']
in_dict = dict(self)
out_list = []
for key in column_keys:
if key in in_dict:
out_list.append((key, in_dict[key]))
for key, val in self:
if key not in column_keys:
out_list.append((key, val))
# Clear list in-place
del self[:]
self.extend(out_list)
class ColumnDict(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct OrderedDict from !!omap in yaml safe load.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
Examples
--------
::
>>> yaml.load(''' # doctest: +SKIP
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''') # doctest: +SKIP
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
import yaml
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a sequence, but found {}".format(node.id), node.start_mark)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found {}".format(subnode.id),
subnode.start_mark)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found {} items".format(len(subnode.value)),
subnode.start_mark)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
import yaml
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping(u'tag:yaml.org,2002:map', data)
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
"""
attrs = ColumnDict()
attrs['name'] = col.info.name
type_name = col.info.dtype.type.__name__
if type_name.startswith(('bytes', 'str')):
type_name = 'string'
if type_name.endswith('_'):
type_name = type_name[:-1] # string_ and bool_ lose the final _ for ECSV
attrs['datatype'] = type_name
# Set the output attributes
for attr, nontrivial, xform in (('unit', lambda x: x is not None, str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {'cols': list(table.columns.values())}
if table.meta:
header['meta'] = table.meta
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package is '
'required for serializing mixin columns')
from ..io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
if hasattr(mapping, 'sort'):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header['datatype'] = [_get_col_attributes(col) for col in header['cols']]
del header['cols']
lines = yaml.dump(header, Dumper=TableDumper, width=130).splitlines()
return lines
class YamlParseError(Exception):
pass
def get_header_from_yaml(lines):
"""
Get a header dict from input ``lines`` which should be valid YAML. This
input will typically be created by get_yaml_from_header. The output is a
dictionary which describes all the table and column meta.
The get_cols() method in the io/ascii/ecsv.py file should be used as a
guide to using the information when constructing a table using this
header dict information.
Parameters
----------
lines : list
List of text lines with YAML header content
Returns
-------
header : dict
Dictionary describing table and column meta
"""
try:
import yaml
except ImportError:
raise ImportError('`import yaml` failed, PyYAML package '
'is required for serializing mixin columns')
from ..io.misc.yaml import AstropyLoader
class TableLoader(AstropyLoader):
"""
Custom Loader that constructs OrderedDict from an !!omap object.
This does nothing but provide a namespace for adding the
custom odict constructor.
"""
TableLoader.add_constructor(u'tag:yaml.org,2002:omap', _construct_odict)
# Now actually load the YAML data structure into `meta`
header_yaml = textwrap.dedent('\n'.join(lines))
try:
header = yaml.load(header_yaml, Loader=TableLoader)
except Exception as err:
raise YamlParseError(str(err))
return header
|
58edfe951fdde85d1f79bf0d7adbc0ce2d34d1e54a391ed0ad93d5661306cdb8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def _searchsorted(array, val, side='left'):
'''
Call np.searchsorted or use a custom binary
search if necessary.
'''
if hasattr(array, 'searchsorted'):
return array.searchsorted(val, side=side)
# Python binary search
begin = 0
end = len(array)
while begin < end:
mid = (begin + end) // 2
if val > array[mid]:
begin = mid + 1
elif val < array[mid]:
end = mid
elif side == 'right':
begin = mid + 1
else:
end = mid
return begin
class SortedArray:
'''
Implements a sorted array container using
a list of numpy arrays.
Parameters
----------
data : Table
Sorted columns of the original table
row_index : Column object
Row numbers corresponding to data columns
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __init__(self, data, row_index, unique=False):
self.data = data
self.row_index = row_index
self.num_cols = len(getattr(data, 'colnames', []))
self.unique = unique
@property
def cols(self):
return self.data.columns.values()
def add(self, key, row):
'''
Add a new entry to the sorted array.
Parameters
----------
key : tuple
Column values at the given row
row : int
Row number
'''
pos = self.find_pos(key, row) # first >= key
if self.unique and 0 <= pos < len(self.row_index) and \
all(self.data[pos][i] == key[i] for i in range(len(key))):
# already exists
raise ValueError('Cannot add duplicate value "{0}" in a '
'unique index'.format(key))
self.data.insert_row(pos, key)
self.row_index = self.row_index.insert(pos, row)
def _get_key_slice(self, i, begin, end):
'''
Retrieve the ith slice of the sorted array
from begin to end.
'''
if i < self.num_cols:
return self.cols[i][begin:end]
else:
return self.row_index[begin:end]
def find_pos(self, key, data, exact=False):
'''
Return the index of the largest key in data greater than or
equal to the given key, data pair.
Parameters
----------
key : tuple
Column key
data : int
Row number
exact : bool
If True, return the index of the given key in data
or -1 if the key is not present.
'''
begin = 0
end = len(self.row_index)
num_cols = self.num_cols
if not self.unique:
# consider the row value as well
key = key + (data,)
num_cols += 1
# search through keys in lexicographic order
for i in range(num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if exact and (t == len(key_slice) or key_slice[t] != key[i]):
# no match
return -1
elif t == len(key_slice) or (t == 0 and len(key_slice) > 0 and
key[i] < key_slice[0]):
# too small or too large
return begin + t
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return begin
return begin
def find(self, key):
'''
Find all rows matching the given key.
Parameters
----------
key : tuple
Column values
Returns
-------
matching_rows : list
List of rows matching the input key
'''
begin = 0
end = len(self.row_index)
# search through keys in lexicographic order
for i in range(self.num_cols):
key_slice = self._get_key_slice(i, begin, end)
t = _searchsorted(key_slice, key[i])
# t is the smallest index >= key[i]
if t == len(key_slice) or key_slice[t] != key[i]:
# no match
return []
elif t == 0 and len(key_slice) > 0 and key[i] < key_slice[0]:
# too small or too large
return []
end = begin + _searchsorted(key_slice, key[i], side='right')
begin += t
if begin >= len(self.row_index): # greater than all keys
return []
return self.row_index[begin:end]
def range(self, lower, upper, bounds):
'''
Find values in the given range.
Parameters
----------
lower : tuple
Lower search bound
upper : tuple
Upper search bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
lower_pos = self.find_pos(lower, 0)
upper_pos = self.find_pos(upper, 0)
if lower_pos == len(self.row_index):
return []
lower_bound = tuple([col[lower_pos] for col in self.cols])
if not bounds[0] and lower_bound == lower:
lower_pos += 1 # data[lower_pos] > lower
# data[lower_pos] >= lower
# data[upper_pos] >= upper
if upper_pos < len(self.row_index):
upper_bound = tuple([col[upper_pos] for col in self.cols])
if not bounds[1] and upper_bound == upper:
upper_pos -= 1 # data[upper_pos] < upper
elif upper_bound > upper:
upper_pos -= 1 # data[upper_pos] <= upper
return self.row_index[lower_pos:upper_pos + 1]
def remove(self, key, data):
'''
Remove the given entry from the sorted array.
Parameters
----------
key : tuple
Column values
data : int
Row number
Returns
-------
successful : bool
Whether the entry was successfully removed
'''
pos = self.find_pos(key, data, exact=True)
if pos == -1: # key not found
return False
self.data.remove_row(pos)
keep_mask = np.ones(len(self.row_index), dtype=bool)
keep_mask[pos] = False
self.row_index = self.row_index[keep_mask]
return True
def shift_left(self, row):
'''
Decrement all row numbers greater than the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index > row] -= 1
def shift_right(self, row):
'''
Increment all row numbers greater than or equal to the input row.
Parameters
----------
row : int
Input row number
'''
self.row_index[self.row_index >= row] += 1
def replace_rows(self, row_map):
'''
Replace all rows with the values they map to in the
given dictionary. Any rows not present as keys in
the dictionary will have their entries deleted.
Parameters
----------
row_map : dict
Mapping of row numbers to new row numbers
'''
num_rows = len(row_map)
keep_rows = np.zeros(len(self.row_index), dtype=bool)
tagged = 0
for i, row in enumerate(self.row_index):
if row in row_map:
keep_rows[i] = True
tagged += 1
if tagged == num_rows:
break
self.data = self.data[keep_rows]
self.row_index = np.array(
[row_map[x] for x in self.row_index[keep_rows]])
def items(self):
'''
Retrieve all array items as a list of pairs of the form
[(key, [row 1, row 2, ...]), ...]
'''
array = []
last_key = None
for i, key in enumerate(zip(*self.data.columns.values())):
row = self.row_index[i]
if key == last_key:
array[-1][1].append(row)
else:
last_key = key
array.append((key, [row]))
return array
def sort(self):
'''
Make row order align with key order.
'''
self.row_index = np.arange(len(self.row_index))
def sorted_data(self):
'''
Return rows in sorted order.
'''
return self.row_index
def __getitem__(self, item):
'''
Return a sliced reference to this sorted array.
Parameters
----------
item : slice
Slice to use for referencing
'''
return SortedArray(self.data[item], self.row_index[item])
def __repr__(self):
t = self.data.copy()
t['rows'] = self.row_index
return str(t)
def __str__(self):
return repr(self)
|
19f7f0f4a3a84109418be6cca20be0efae782bfdc73ac802490e271328027d72 | from importlib import import_module
import re
from copy import deepcopy
from ..utils.data_info import MixinInfo
from .column import Column
from .table import Table, QTable, has_info_class
from ..units.quantity import QuantityInfo
__construct_mixin_classes = ('astropy.time.core.Time',
'astropy.time.core.TimeDelta',
'astropy.units.quantity.Quantity',
'astropy.coordinates.angles.Latitude',
'astropy.coordinates.angles.Longitude',
'astropy.coordinates.angles.Angle',
'astropy.coordinates.distances.Distance',
'astropy.coordinates.earth.EarthLocation',
'astropy.coordinates.sky_coordinate.SkyCoord',
'astropy.table.table.NdarrayMixin')
class SerializedColumn(dict):
"""
Subclass of dict that is a used in the representation to contain the name
(and possible other info) for a mixin attribute (either primary data or an
array-like attribute) that is serialized as a column in the table.
Normally contains the single key ``name`` with the name of the column in the
table.
"""
pass
def _represent_mixin_as_column(col, name, new_cols, mixin_cols,
exclude_classes=()):
"""Convert a mixin column to a plain columns or a set of mixin columns."""
# If not a mixin, or if class in ``exclude_classes`` tuple then
# treat as a normal column. Excluded sub-classes must be explicitly
# specified.
if not has_info_class(col, MixinInfo) or col.__class__ in exclude_classes:
new_cols.append(col)
return
# Subtlety here is handling mixin info attributes. The basic list of such
# attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'.
# - name: handled directly [DON'T store]
# - unit: DON'T store if this is a parent attribute
# - dtype: captured in plain Column if relevant [DON'T store]
# - format: possibly irrelevant but settable post-object creation [DO store]
# - description: DO store
# - meta: DO store
info = {}
for attr, nontrivial, xform in (('unit', lambda x: x not in (None, ''), str),
('format', lambda x: x is not None, None),
('description', lambda x: x is not None, None),
('meta', lambda x: x, None)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = xform(col_attr) if xform else col_attr
obj_attrs = col.info._represent_as_dict()
ordered_keys = col.info._represent_as_dict_attrs
data_attrs = [key for key in ordered_keys if key in obj_attrs and
getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]]
for data_attr in data_attrs:
data = obj_attrs[data_attr]
if len(data_attrs) == 1 and not has_info_class(data, MixinInfo):
# For one non-mixin attribute, we need only one serialized column.
# We can store info there, and keep the column name as is.
new_cols.append(Column(data, name=name, **info))
obj_attrs[data_attr] = SerializedColumn({'name': name})
# Remove attributes that are already on the serialized column.
for attr in info:
if attr in obj_attrs:
del obj_attrs[attr]
else:
# New column name combines the old name and attribute
# (e.g. skycoord.ra, skycoord.dec).
new_name = name + '.' + data_attr
# TODO masking, MaskedColumn
if not has_info_class(data, MixinInfo):
new_cols.append(Column(data, name=new_name))
obj_attrs[data_attr] = SerializedColumn({'name': new_name})
else:
# recurse. This will define obj_attrs[new_name].
_represent_mixin_as_column(data, new_name, new_cols, obj_attrs)
obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name))
# Strip out from info any attributes defined by the parent
for attr in col.info.attrs_from_parent:
if attr in info:
del info[attr]
if info:
obj_attrs['__info__'] = info
# Store the fully qualified class name
obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__
mixin_cols[name] = obj_attrs
def _represent_mixins_as_columns(tbl, exclude_classes=()):
"""
Convert any mixin columns to plain Column or MaskedColumn and
return a new table. Exclude any mixin columns in ``exclude_classes``,
which must be a tuple of classes.
"""
if not tbl.has_mixin_columns:
return tbl
mixin_cols = {}
new_cols = []
for col in tbl.itercols():
_represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols,
exclude_classes=exclude_classes)
meta = deepcopy(tbl.meta)
meta['__serialized_columns__'] = mixin_cols
out = Table(new_cols, meta=meta, copy=False)
return out
def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info):
cls_full_name = obj_attrs.pop('__class__')
# If this is a supported class then import the class and run
# the _construct_from_col method. Prevent accidentally running
# untrusted code by only importing known astropy classes.
if cls_full_name not in __construct_mixin_classes:
raise ValueError('unsupported class for construct {}'.format(cls_full_name))
mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups()
module = import_module(mod_name)
cls = getattr(module, cls_name)
for attr, value in info.items():
if attr in cls.info.attrs_from_parent:
obj_attrs[attr] = value
mixin = cls.info._construct_from_dict(obj_attrs)
for attr, value in info.items():
if attr not in obj_attrs:
setattr(mixin.info, attr, value)
return mixin
def _construct_mixin_from_columns(new_name, obj_attrs, out):
data_attrs_map = {}
for name, val in obj_attrs.items():
if isinstance(val, SerializedColumn):
if 'name' in val:
data_attrs_map[val['name']] = name
else:
_construct_mixin_from_columns(name, val, out)
data_attrs_map[name] = name
for name in data_attrs_map.values():
del obj_attrs[name]
# Get the index where to add new column
idx = min(out.colnames.index(name) for name in data_attrs_map)
# Name is the column name in the table (e.g. "coord.ra") and
# data_attr is the object attribute name (e.g. "ra"). A different
# example would be a formatted time object that would have (e.g.)
# "time_col" and "value", respectively.
for name, data_attr in data_attrs_map.items():
col = out[name]
obj_attrs[data_attr] = col
del out[name]
info = obj_attrs.pop('__info__', {})
if len(data_attrs_map) == 1:
# col is the first and only serialized column; in that case, use info
# stored on the column.
for attr, nontrivial in (('unit', lambda x: x not in (None, '')),
('format', lambda x: x is not None),
('description', lambda x: x is not None),
('meta', lambda x: x)):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
info[attr] = col_attr
info['name'] = new_name
col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info)
out.add_column(col, index=idx)
def _construct_mixins_from_columns(tbl):
if '__serialized_columns__' not in tbl.meta:
return tbl
# Don't know final output class but assume QTable so no columns get
# downgraded.
out = QTable(tbl, copy=False)
mixin_cols = out.meta.pop('__serialized_columns__')
for new_name, obj_attrs in mixin_cols.items():
_construct_mixin_from_columns(new_name, obj_attrs, out)
# If no quantity subclasses are in the output then output as Table.
# For instance ascii.read(file, format='ecsv') doesn't specify an
# output class and should return the minimal table class that
# represents the table file.
has_quantities = any(isinstance(col.info, QuantityInfo)
for col in out.itercols())
if not has_quantities:
out = Table(out, copy=False)
return out
|
63b78bd566590e4e295d3d90a7dab36091ff6b69a9c64ba935f242c79be8d4d0 | # -*- coding: utf-8 -*-
ascii_coded = 'Ò♙♙♙♙♙♙♙♙♌♐♐♌♙♙♙♙♙♙♌♌♙♙Ò♙♙♙♙♙♙♙♘♐♐♐♈♙♙♙♙♙♌♐♐♐♔Ò♙♙♌♈♙♙♌♐♈♈♙♙♙♙♙♙♙♙♈♐♐♙Ò♙♐♙♙♙♐♐♙♙♙♙♙♙♙♙♙♙♙♙♙♙♙Ò♐♔♙♙♘♐♐♙♙♌♐♐♔♙♙♌♌♌♙♙♙♌Ò♐♐♙♙♘♐♐♌♙♈♐♈♙♙♙♈♐♐♙♙♘♔Ò♐♐♌♙♘♐♐♐♌♌♙♙♌♌♌♙♈♈♙♌♐♐Ò♘♐♐♐♌♐♐♐♐♐♐♌♙♈♙♌♐♐♐♐♐♔Ò♘♐♐♐♐♐♐♐♐♐♐♐♐♈♈♐♐♐♐♐♐♙Ò♙♘♐♐♐♐♈♐♐♐♐♐♐♙♙♐♐♐♐♐♙♙Ò♙♙♙♈♈♈♙♙♐♐♐♐♐♔♙♐♐♐♐♈♙♙Ò♙♙♙♙♙♙♙♙♙♈♈♐♐♐♙♈♈♈♙♙♙♙Ò'
ascii_uncoded = ''.join([chr(ord(c)-200) for c in ascii_coded])
url = 'https://media.giphy.com/media/e24Q8FKE2mxRS/giphy.gif'
message_coded = 'ĘĩĶĬĩĻ÷ĜĩĪĴĭèıĶļĭĺĩīļıķĶ'
message_uncoded = ''.join([chr(ord(c)-200) for c in message_coded])
try:
from IPython import display
html = display.Image(url=url)._repr_html_()
class HTMLWithBackup(display.HTML):
def __init__(self, data, backup_text):
super().__init__(data)
self.backup_text = backup_text
def __repr__(self):
if self.backup_text is None:
return super().__repr__()
else:
return self.backup_text
dhtml = HTMLWithBackup(html, ascii_uncoded)
display.display(dhtml)
except ImportError:
print(ascii_uncoded)
|
7fe5cb9d72ef83eb9128cfb3c983ebfdbee66a3ff71d0e53ac54d0f551847d57 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helper functions for table development, mostly creating useful
tables for testing.
"""
from itertools import cycle
import string
import numpy as np
from .table import Table, Column
from ..utils.data_info import ParentDtypeInfo
class TimingTables:
"""
Object which contains two tables and various other attributes that
are useful for timing and other API tests.
"""
def __init__(self, size=1000, masked=False):
self.masked = masked
# Initialize table
self.table = Table(masked=self.masked)
# Create column with mixed types
np.random.seed(12345)
self.table['i'] = np.arange(size)
self.table['a'] = np.random.random(size) # float
self.table['b'] = np.random.random(size) > 0.5 # bool
self.table['c'] = np.random.random((size, 10)) # 2d column
self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)), size)
self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'}
self.extra_column = np.random.randint(0, 100, size)
self.row_indices = np.where(self.table['a'] > 0.9)[0]
self.table_grouped = self.table.group_by('d')
# Another table for testing joining
self.other_table = Table(masked=self.masked)
self.other_table['i'] = np.arange(1, size, 3)
self.other_table['f'] = np.random.random()
self.other_table.sort('f')
# Another table for testing hstack
self.other_table_2 = Table(masked=self.masked)
self.other_table_2['g'] = np.random.random(size)
self.other_table_2['h'] = np.random.random((size, 10))
self.bool_mask = self.table['a'] > 0.6
def simple_table(size=3, cols=None, kinds='ifS', masked=False):
"""
Return a simple table for testing.
Example
--------
::
>>> from astropy.table.table_helpers import simple_table
>>> print(simple_table(3, 6, masked=True, kinds='ifOS'))
a b c d e f
--- --- -------- --- --- ---
-- 1.0 {'c': 2} -- 5 5.0
2 2.0 -- e 6 --
3 -- {'e': 4} f -- 7.0
Parameters
----------
size : int
Number of table rows
cols : int, optional
Number of table columns. Defaults to number of kinds.
kinds : str
String consisting of the column dtype.kinds. This string
will be cycled through to generate the column dtype.
The allowed values are 'i', 'f', 'S', 'O'.
Returns
-------
out : `Table`
New table with appropriate characteristics
"""
if cols is None:
cols = len(kinds)
if cols > 26:
raise ValueError("Max 26 columns in SimpleTable")
columns = []
names = [chr(ord('a') + ii) for ii in range(cols)]
letters = np.array([c for c in string.ascii_letters])
for jj, kind in zip(range(cols), cycle(kinds)):
if kind == 'i':
data = np.arange(1, size + 1, dtype=np.int64) + jj
elif kind == 'f':
data = np.arange(size, dtype=np.float64) + jj
elif kind == 'S':
indices = (np.arange(size) + jj) % len(letters)
data = letters[indices]
elif kind == 'O':
indices = (np.arange(size) + jj) % len(letters)
vals = letters[indices]
data = [{val: index} for val, index in zip(vals, indices)]
else:
raise ValueError('Unknown data kind')
columns.append(Column(data))
table = Table(columns, names=names, masked=masked)
if masked:
for ii, col in enumerate(table.columns.values()):
mask = np.array((np.arange(size) + ii) % 3, dtype=bool)
col.mask = ~mask
return table
def complex_table():
"""
Return a masked table from the io.votable test set that has a wide variety
of stressing types.
"""
from ..utils.data import get_pkg_data_filename
from ..io.votable.table import parse
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
votable = parse(get_pkg_data_filename('../io/votable/tests/data/regression.xml'),
pedantic=False)
first_table = votable.get_first_table()
table = first_table.to_table()
return table
class ArrayWrapper:
"""
Minimal mixin using a simple wrapper around a numpy array
"""
info = ParentDtypeInfo()
def __init__(self, data):
self.data = np.array(data)
if 'info' in getattr(data, '__dict__', ()):
self.info = data.info
def __getitem__(self, item):
if isinstance(item, (int, np.integer)):
out = self.data[item]
else:
out = self.__class__(self.data[item])
if 'info' in self.__dict__:
out.info = self.info
return out
def __setitem__(self, item, value):
self.data[item] = value
def __len__(self):
return len(self.data)
@property
def dtype(self):
return self.data.dtype
@property
def shape(self):
return self.data.shape
def __repr__(self):
return ("<{0} name='{1}' data={2}>"
.format(self.__class__.__name__, self.info.name, self.data))
|
9c45a19c8e74da0c26d242cf6c8ef504f1ad856a775983d550ebfcd84cc38210 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import math
import numpy as np
from .core import Kernel1D, Kernel2D, Kernel
from .utils import KernelSizeError
from ..modeling import models
from ..modeling.core import Fittable1DModel, Fittable2DModel
from ..utils.decorators import deprecated_renamed_argument
__all__ = ['Gaussian1DKernel', 'Gaussian2DKernel', 'CustomKernel',
'Box1DKernel', 'Box2DKernel', 'Tophat2DKernel',
'Trapezoid1DKernel', 'MexicanHat1DKernel', 'MexicanHat2DKernel',
'AiryDisk2DKernel', 'Moffat2DKernel', 'Model1DKernel',
'Model2DKernel', 'TrapezoidDisk2DKernel', 'Ring2DKernel']
def _round_up_to_odd_integer(value):
i = math.ceil(value)
if i % 2 == 0:
return i + 1
else:
return i
class Gaussian1DKernel(Kernel1D):
"""
1D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
stddev : number
Standard deviation of the Gaussian kernel.
x_size : odd int, optional
Size of the kernel array. Default = 8 * stddev
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin. Very slow.
factor : number, optional
Factor of oversampling. Default factor = 10. If the factor
is too large, evaluation can be very slow.
See Also
--------
Box1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian1DKernel
gauss_1D_kernel = Gaussian1DKernel(10)
plt.plot(gauss_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = False
def __init__(self, stddev, **kwargs):
self._model = models.Gaussian1D(1. / (np.sqrt(2 * np.pi) * stddev),
0, stddev)
self._default_size = _round_up_to_odd_integer(8 * stddev)
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Gaussian2DKernel(Kernel2D):
"""
2D Gaussian filter kernel.
The Gaussian filter is a filter with great smoothing properties. It is
isotropic and does not produce artifacts.
Parameters
----------
x_stddev : float
Standard deviation of the Gaussian in x before rotating by theta.
y_stddev : float
Standard deviation of the Gaussian in y before rotating by theta.
theta : float
Rotation angle in radians. The rotation angle increases
counterclockwise.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * stddev.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * stddev.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Gaussian2DKernel
gaussian_2D_kernel = Gaussian2DKernel(10)
plt.imshow(gaussian_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = False
@deprecated_renamed_argument('stddev', 'x_stddev', '3.0')
def __init__(self, x_stddev, y_stddev=None, theta=0.0, **kwargs):
if y_stddev is None:
y_stddev = x_stddev
self._model = models.Gaussian2D(1. / (2 * np.pi * x_stddev * y_stddev),
0, 0, x_stddev=x_stddev,
y_stddev=y_stddev, theta=theta)
self._default_size = _round_up_to_odd_integer(
8 * np.max([x_stddev, y_stddev]))
super().__init__(**kwargs)
self._truncation = np.abs(1. - self._array.sum())
class Box1DKernel(Kernel1D):
"""
1D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifacts, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2. E.g a Box kernel with an effective
smoothing of 4 pixel would have the following array: [0.5, 1, 1, 1, 0.5].
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian1DKernel, Trapezoid1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response function:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box1DKernel
box_1D_kernel = Box1DKernel(9)
plt.plot(box_1D_kernel, drawstyle='steps')
plt.xlim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box1D(1. / width, 0, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Box2DKernel(Kernel2D):
"""
2D Box filter kernel.
The Box filter or running mean is a smoothing filter. It is not isotropic
and can produce artifact, when applied repeatedly to the same data.
By default the Box kernel uses the ``linear_interp`` discretization mode,
which allows non-shifting, even-sized kernels. This is achieved by
weighting the edge pixels with 1/2.
Parameters
----------
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center'
Discretize model by taking the value
at the center of the bin.
* 'linear_interp' (default)
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Tophat2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Box2DKernel
box_2D_kernel = Box2DKernel(9)
plt.imshow(box_2D_kernel, interpolation='none', origin='lower',
vmin=0.0, vmax=0.015)
plt.xlim(-1, 9)
plt.ylim(-1, 9)
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_separable = True
_is_bool = True
def __init__(self, width, **kwargs):
self._model = models.Box2D(1. / width ** 2, 0, 0, width, width)
self._default_size = _round_up_to_odd_integer(width)
kwargs['mode'] = 'linear_interp'
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class Tophat2DKernel(Kernel2D):
"""
2D Tophat filter kernel.
The Tophat filter is an isotropic smoothing filter. It can produce
artifacts when applied repeatedly on the same data.
Parameters
----------
radius : int
Radius of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, MexicanHat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Tophat2DKernel
tophat_2D_kernel = Tophat2DKernel(40)
plt.imshow(tophat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius, **kwargs):
self._model = models.Disk2D(1. / (np.pi * radius ** 2), 0, 0, radius)
self._default_size = _round_up_to_odd_integer(2 * radius)
super().__init__(**kwargs)
self._truncation = 0
class Ring2DKernel(Kernel2D):
"""
2D Ring filter kernel.
The Ring filter kernel is the difference between two Tophat kernels of
different width. This kernel is useful for, e.g., background estimation.
Parameters
----------
radius_in : number
Inner radius of the ring kernel.
width : number
Width of the ring kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Ring2DKernel
ring_2D_kernel = Ring2DKernel(9, 8)
plt.imshow(ring_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
def __init__(self, radius_in, width, **kwargs):
radius_out = radius_in + width
self._model = models.Ring2D(1. / (np.pi * (radius_out ** 2 - radius_in ** 2)),
0, 0, radius_in, width)
self._default_size = _round_up_to_odd_integer(2 * radius_out)
super().__init__(**kwargs)
self._truncation = 0
class Trapezoid1DKernel(Kernel1D):
"""
1D trapezoid kernel.
Parameters
----------
width : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, MexicanHat1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Trapezoid1DKernel
trapezoid_1D_kernel = Trapezoid1DKernel(17, slope=0.2)
plt.plot(trapezoid_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('amplitude')
plt.xlim(-1, 28)
plt.show()
"""
_is_bool = False
def __init__(self, width, slope=1., **kwargs):
self._model = models.Trapezoid1D(1, 0, width, slope)
self._default_size = _round_up_to_odd_integer(width + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class TrapezoidDisk2DKernel(Kernel2D):
"""
2D trapezoid kernel.
Parameters
----------
radius : number
Width of the filter kernel, defined as the width of the constant part,
before it begins to slope down.
slope : number
Slope of the filter kernel's tails
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import TrapezoidDisk2DKernel
trapezoid_2D_kernel = TrapezoidDisk2DKernel(20, slope=0.2)
plt.imshow(trapezoid_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, slope=1., **kwargs):
self._model = models.TrapezoidDisk2D(1, 0, 0, radius, slope)
self._default_size = _round_up_to_odd_integer(2 * radius + 2. / slope)
super().__init__(**kwargs)
self._truncation = 0
self.normalize()
class MexicanHat1DKernel(Kernel1D):
"""
1D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smoothes the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (sqrt(2 * pi) * width ** 3). The
normalization is the same as for `scipy.ndimage.gaussian_laplace`,
except for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Box1DKernel, Gaussian1DKernel, Trapezoid1DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat1DKernel
mexicanhat_1D_kernel = MexicanHat1DKernel(10)
plt.plot(mexicanhat_1D_kernel, drawstyle='steps')
plt.xlabel('x [pixels]')
plt.ylabel('value')
plt.show()
"""
_is_bool = True
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.sqrt(2 * np.pi) * width ** 3)
self._model = models.MexicanHat1D(amplitude, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class MexicanHat2DKernel(Kernel2D):
"""
2D Mexican hat filter kernel.
The Mexican Hat, or inverted Gaussian-Laplace filter, is a
bandpass filter. It smoothes the data and removes slowly varying
or constant structures (e.g. Background). It is useful for peak or
multi-scale detection.
This kernel is derived from a normalized Gaussian function, by
computing the second derivative. This results in an amplitude
at the kernels center of 1. / (pi * width ** 4). The normalization
is the same as for `scipy.ndimage.gaussian_laplace`, except
for a minus sign.
Parameters
----------
width : number
Width of the filter kernel, defined as the standard deviation
of the Gaussian function from which it is derived.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, Ring2DKernel,
TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import MexicanHat2DKernel
mexicanhat_2D_kernel = MexicanHat2DKernel(10)
plt.imshow(mexicanhat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, width, **kwargs):
amplitude = 1.0 / (np.pi * width ** 4)
self._model = models.MexicanHat2D(amplitude, 0, 0, width)
self._default_size = _round_up_to_odd_integer(8 * width)
super().__init__(**kwargs)
self._truncation = np.abs(self._array.sum() / self._array.size)
class AiryDisk2DKernel(Kernel2D):
"""
2D Airy disk kernel.
This kernel models the diffraction pattern of a circular aperture. This
kernel is normalized to a peak value of 1.
Parameters
----------
radius : float
The radius of the Airy disk kernel (radius of the first zero).
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel, Moffat2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import AiryDisk2DKernel
airydisk_2D_kernel = AiryDisk2DKernel(10)
plt.imshow(airydisk_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, radius, **kwargs):
self._model = models.AiryDisk2D(1, 0, 0, radius)
self._default_size = _round_up_to_odd_integer(8 * radius)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Moffat2DKernel(Kernel2D):
"""
2D Moffat kernel.
This kernel is a typical model for a seeing limited PSF.
Parameters
----------
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * radius.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * radius.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
See Also
--------
Gaussian2DKernel, Box2DKernel, Tophat2DKernel, MexicanHat2DKernel,
Ring2DKernel, TrapezoidDisk2DKernel, AiryDisk2DKernel
Examples
--------
Kernel response:
.. plot::
:include-source:
import matplotlib.pyplot as plt
from astropy.convolution import Moffat2DKernel
moffat_2D_kernel = Moffat2DKernel(3, 2)
plt.imshow(moffat_2D_kernel, interpolation='none', origin='lower')
plt.xlabel('x [pixels]')
plt.ylabel('y [pixels]')
plt.colorbar()
plt.show()
"""
_is_bool = False
def __init__(self, gamma, alpha, **kwargs):
self._model = models.Moffat2D((gamma - 1.0) / (np.pi * alpha * alpha),
0, 0, gamma, alpha)
fwhm = 2.0 * alpha * (2.0 ** (1.0 / gamma) - 1.0) ** 0.5
self._default_size = _round_up_to_odd_integer(4.0 * fwhm)
super().__init__(**kwargs)
self.normalize()
self._truncation = None
class Model1DKernel(Kernel1D):
"""
Create kernel from 1D model.
The model has to be centered on x = 0.
Parameters
----------
model : `~astropy.modeling.Fittable1DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable1DModel`
See also
--------
Model2DKernel : Create kernel from `~astropy.modeling.Fittable2DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian1D model:
>>> from astropy.modeling.models import Gaussian1D
>>> from astropy.convolution.kernels import Model1DKernel
>>> gauss = Gaussian1D(1, 0, 2)
And create a custom one dimensional kernel from it:
>>> gauss_kernel = Model1DKernel(gauss, x_size=9)
This kernel can now be used like a usual Astropy kernel.
"""
_separable = False
_is_bool = False
def __init__(self, model, **kwargs):
if isinstance(model, Fittable1DModel):
self._model = model
else:
raise TypeError("Must be Fittable1DModel")
super().__init__(**kwargs)
class Model2DKernel(Kernel2D):
"""
Create kernel from 2D model.
The model has to be centered on x = 0 and y = 0.
Parameters
----------
model : `~astropy.modeling.Fittable2DModel`
Kernel response function model
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
Raises
------
TypeError
If model is not an instance of `~astropy.modeling.Fittable2DModel`
See also
--------
Model1DKernel : Create kernel from `~astropy.modeling.Fittable1DModel`
CustomKernel : Create kernel from list or array
Examples
--------
Define a Gaussian2D model:
>>> from astropy.modeling.models import Gaussian2D
>>> from astropy.convolution.kernels import Model2DKernel
>>> gauss = Gaussian2D(1, 0, 0, 2, 2)
And create a custom two dimensional kernel from it:
>>> gauss_kernel = Model2DKernel(gauss, x_size=9)
This kernel can now be used like a usual astropy kernel.
"""
_is_bool = False
_separable = False
def __init__(self, model, **kwargs):
self._separable = False
if isinstance(model, Fittable2DModel):
self._model = model
else:
raise TypeError("Must be Fittable2DModel")
super().__init__(**kwargs)
class PSFKernel(Kernel2D):
"""
Initialize filter kernel from astropy PSF instance.
"""
_separable = False
def __init__(self):
raise NotImplementedError('Not yet implemented')
class CustomKernel(Kernel):
"""
Create filter kernel from list or array.
Parameters
----------
array : list or array
Filter kernel array. Size must be odd.
Raises
------
TypeError
If array is not a list or array.
KernelSizeError
If array size is even.
See also
--------
Model2DKernel, Model1DKernel
Examples
--------
Define one dimensional array:
>>> from astropy.convolution.kernels import CustomKernel
>>> import numpy as np
>>> array = np.array([1, 2, 3, 2, 1])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
1
Define two dimensional array:
>>> array = np.array([[1, 1, 1], [1, 2, 1], [1, 1, 1]])
>>> kernel = CustomKernel(array)
>>> kernel.dimension
2
"""
def __init__(self, array):
self.array = array
super().__init__(self._array)
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
@array.setter
def array(self, array):
"""
Filter kernel array setter
"""
if isinstance(array, np.ndarray):
self._array = array.astype(np.float64)
elif isinstance(array, list):
self._array = np.array(array, dtype=np.float64)
else:
raise TypeError("Must be list or array.")
# Check if array is odd in all axes
odd = all(axes_size % 2 != 0 for axes_size in self.shape)
if not odd:
raise KernelSizeError("Kernel size must be odd in all axes.")
# Check if array is bool
ones = self._array == 1.
zeros = self._array == 0
self._is_bool = bool(np.all(np.logical_or(ones, zeros)))
self._truncation = 0.0
|
87efd49eba4cf3a4f5823185d5e70b68f94d11280045d0afe22210543af9b45f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the convolution and filter functionalities of astropy.
A few conceptual notes:
A filter kernel is mainly characterized by its response function. In the 1D
case we speak of "impulse response function", in the 2D case we call it "point
spread function". This response function is given for every kernel by an
astropy `FittableModel`, which is evaluated on a grid to obtain a filter array,
which can then be applied to binned data.
The model is centered on the array and should have an amplitude such that the array
integrates to one per default.
Currently only symmetric 2D kernels are supported.
"""
import warnings
import copy
import numpy as np
from ..utils.exceptions import AstropyUserWarning
from .utils import (discretize_model, add_kernel_arrays_1D,
add_kernel_arrays_2D)
MAX_NORMALIZATION = 100
__all__ = ['Kernel', 'Kernel1D', 'Kernel2D', 'kernel_arithmetics']
class Kernel:
"""
Convolution kernel base class.
Parameters
----------
array : `~numpy.ndarray`
Kernel array.
"""
_separable = False
_is_bool = True
_model = None
def __init__(self, array):
self._array = np.asanyarray(array)
@property
def truncation(self):
"""
Deviation from the normalization to one.
"""
return self._truncation
@property
def is_bool(self):
"""
Indicates if kernel is bool.
If the kernel is bool the multiplication in the convolution could
be omitted, to increase the performance.
"""
return self._is_bool
@property
def model(self):
"""
Kernel response model.
"""
return self._model
@property
def dimension(self):
"""
Kernel dimension.
"""
return self.array.ndim
@property
def center(self):
"""
Index of the kernel center.
"""
return [axes_size // 2 for axes_size in self._array.shape]
def normalize(self, mode='integral'):
"""
Normalize the filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel is normalized such that its integral = 1.
* 'peak'
Kernel is normalized such that its peak = 1.
"""
if mode == 'integral':
normalization = self._array.sum()
elif mode == 'peak':
normalization = self._array.max()
else:
raise ValueError("invalid mode, must be 'integral' or 'peak'")
# Warn the user for kernels that sum to zero
if normalization == 0:
warnings.warn('The kernel cannot be normalized because it '
'sums to zero.', AstropyUserWarning)
else:
np.divide(self._array, normalization, self._array)
self._kernel_sum = self._array.sum()
@property
def shape(self):
"""
Shape of the kernel array.
"""
return self._array.shape
@property
def separable(self):
"""
Indicates if the filter kernel is separable.
A 2D filter is separable, when its filter array can be written as the
outer product of two 1D arrays.
If a filter kernel is separable, higher dimension convolutions will be
performed by applying the 1D filter array consecutively on every dimension.
This is significantly faster, than using a filter array with the same
dimension.
"""
return self._separable
@property
def array(self):
"""
Filter kernel array.
"""
return self._array
def __add__(self, kernel):
"""
Add two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'add')
def __sub__(self, kernel):
"""
Subtract two filter kernels.
"""
return kernel_arithmetics(self, kernel, 'sub')
def __mul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __rmul__(self, value):
"""
Multiply kernel with number or convolve two kernels.
"""
return kernel_arithmetics(self, value, "mul")
def __array__(self):
"""
Array representation of the kernel.
"""
return self._array
def __array_wrap__(self, array, context=None):
"""
Wrapper for multiplication with numpy arrays.
"""
if type(context[0]) == np.ufunc:
return NotImplemented
else:
return array
class Kernel1D(Kernel):
"""
Base class for 1D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : odd int, optional
Size of the kernel array. Default = 8 * width.
array : `~numpy.ndarray`
Kernel array.
width : number
Width of the filter kernel.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by linearly interpolating
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, array=None, **kwargs):
# Initialize from model
if array is None:
if self._model is None:
raise TypeError("Must specify either array or model.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, **kwargs)
# Initialize from array
elif array is not None:
self._model = None
super().__init__(array)
class Kernel2D(Kernel):
"""
Base class for 2D filter kernels.
Parameters
----------
model : `~astropy.modeling.FittableModel`
Model to be evaluated.
x_size : odd int, optional
Size in x direction of the kernel array. Default = 8 * width.
y_size : odd int, optional
Size in y direction of the kernel array. Default = 8 * width.
array : `~numpy.ndarray`
Kernel array.
mode : str, optional
One of the following discretization modes:
* 'center' (default)
Discretize model by taking the value
at the center of the bin.
* 'linear_interp'
Discretize model by performing a bilinear interpolation
between the values at the corners of the bin.
* 'oversample'
Discretize model by taking the average
on an oversampled grid.
* 'integrate'
Discretize model by integrating the
model over the bin.
width : number
Width of the filter kernel.
factor : number, optional
Factor of oversampling. Default factor = 10.
"""
def __init__(self, model=None, x_size=None, y_size=None, array=None, **kwargs):
# Initialize from model
if array is None:
if self._model is None:
raise TypeError("Must specify either array or model.")
if x_size is None:
x_size = self._default_size
elif x_size != int(x_size):
raise TypeError("x_size should be an integer")
if y_size is None:
y_size = x_size
elif y_size != int(y_size):
raise TypeError("y_size should be an integer")
# Set ranges where to evaluate the model
if x_size % 2 == 0: # even kernel
x_range = (-(int(x_size)) // 2 + 0.5, (int(x_size)) // 2 + 0.5)
else: # odd kernel
x_range = (-(int(x_size) - 1) // 2, (int(x_size) - 1) // 2 + 1)
if y_size % 2 == 0: # even kernel
y_range = (-(int(y_size)) // 2 + 0.5, (int(y_size)) // 2 + 0.5)
else: # odd kernel
y_range = (-(int(y_size) - 1) // 2, (int(y_size) - 1) // 2 + 1)
array = discretize_model(self._model, x_range, y_range, **kwargs)
# Initialize from array
elif array is not None:
self._model = None
super().__init__(array)
def kernel_arithmetics(kernel, value, operation):
"""
Add, subtract or multiply two kernels.
Parameters
----------
kernel : `astropy.convolution.Kernel`
Kernel instance
value : kernel, float or int
Value to operate with
operation : {'add', 'sub', 'mul'}
One of the following operations:
* 'add'
Add two kernels
* 'sub'
Subtract two kernels
* 'mul'
Multiply kernel with number or convolve two kernels.
"""
# 1D kernels
if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D):
if operation == "add":
new_array = add_kernel_arrays_1D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_1D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel1D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# 2D kernels
elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D):
if operation == "add":
new_array = add_kernel_arrays_2D(kernel.array, value.array)
if operation == "sub":
new_array = add_kernel_arrays_2D(kernel.array, -value.array)
if operation == "mul":
raise Exception("Kernel operation not supported. Maybe you want "
"to use convolve(kernel1, kernel2) instead.")
new_kernel = Kernel2D(array=new_array)
new_kernel._separable = kernel._separable and value._separable
new_kernel._is_bool = kernel._is_bool or value._is_bool
# kernel and number
elif ((isinstance(kernel, Kernel1D) or isinstance(kernel, Kernel2D))
and np.isscalar(value)):
if operation == "mul":
new_kernel = copy.copy(kernel)
new_kernel._array *= value
else:
raise Exception("Kernel operation not supported.")
else:
raise Exception("Kernel operation not supported.")
return new_kernel
|
0ae4cf6946cebc016ed90451ed9294b185102f74aee0cf6d8dabc86e41bd4936 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .core import *
from .kernels import *
from .utils import discretize_model
try:
# Not guaranteed available at setup time
from .convolve import convolve, convolve_fft, interpolate_replace_nans, convolve_models
except ImportError:
if not _ASTROPY_SETUP_:
raise
|
8cd670f5b1439060f1f5a089ae40662748b0841fbc495d8049b31981c5c669c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from ..modeling.core import FittableModel, custom_model
__all__ = ['discretize_model']
class DiscretizationError(Exception):
"""
Called when discretization of models goes wrong.
"""
class KernelSizeError(Exception):
"""
Called when size of kernels is even.
"""
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2,
center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2,
center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(center[1] - array_2.shape[1] // 2,
center[1] + array_2.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_2.shape[0] // 2,
center[0] + array_2.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_2
return new_array
elif array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(center[1] - array_1.shape[1] // 2,
center[1] + array_1.shape[1] // 2 + 1)
slice_y = slice(center[0] - array_1.shape[0] // 2,
center[0] + array_1.shape[0] // 2 + 1)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode='center', factor=10):
"""
Function to evaluate analytical model functions on a grid.
So far the function can only deal with pixel coordinates.
Parameters
----------
model : `~astropy.modeling.FittableModel` or callable.
Analytic model function to be discretized. Callables, which are not an
instances of `~astropy.modeling.FittableModel` are passed to
`~astropy.modeling.custom_model` and then evaluated.
x_range : tuple
x range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined.
y_range : tuple, optional
y range in which the model is evaluated. The difference between the
upper an lower limit must be a whole number, so that the output array
size is well defined. Necessary only for 2D models.
mode : str, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value
at the center of the bin.
* ``'linear_interp'``
Discretize model by linearly interpolating
between the values at the corners of the bin.
For 2D models interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average
on an oversampled grid.
* ``'integrate'``
Discretize model by integrating the model
over the bin using `scipy.integrate.quad`.
Very slow.
factor : float or int
Factor of oversampling. Default = 10.
Returns
-------
array : `numpy.array`
Model value array
Notes
-----
The ``oversample`` mode allows to conserve the integral on a subpixel
scale. Here is the example of a normalized Gaussian1D:
.. plot::
:include-source:
import matplotlib.pyplot as plt
import numpy as np
from astropy.modeling.models import Gaussian1D
from astropy.convolution.utils import discretize_model
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
y_center = discretize_model(gauss_1D, (-2, 3), mode='center')
y_corner = discretize_model(gauss_1D, (-2, 3), mode='linear_interp')
y_oversample = discretize_model(gauss_1D, (-2, 3), mode='oversample')
plt.plot(y_center, label='center sum = {0:3f}'.format(y_center.sum()))
plt.plot(y_corner, label='linear_interp sum = {0:3f}'.format(y_corner.sum()))
plt.plot(y_oversample, label='oversample sum = {0:3f}'.format(y_oversample.sum()))
plt.xlabel('pixels')
plt.ylabel('value')
plt.legend()
plt.show()
"""
if not callable(model):
raise TypeError('Model must be callable.')
if not isinstance(model, FittableModel):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError('discretize_model only supports 1-d and 2-d models.')
if not float(np.diff(x_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'x_range' must be a whole number.")
if y_range:
if not float(np.diff(y_range)).is_integer():
raise ValueError("The difference between the upper an lower limit of"
" 'y_range' must be a whole number.")
if ndim == 2 and y_range is None:
raise ValueError("y range not specified, but model is 2-d")
if ndim == 1 and y_range is not None:
raise ValueError("y range specified, but model is only 1-d.")
if mode == "center":
if ndim == 1:
return discretize_center_1D(model, x_range)
elif ndim == 2:
return discretize_center_2D(model, x_range, y_range)
elif mode == "linear_interp":
if ndim == 1:
return discretize_linear_1D(model, x_range)
if ndim == 2:
return discretize_bilinear_2D(model, x_range, y_range)
elif mode == "oversample":
if ndim == 1:
return discretize_oversample_1D(model, x_range, factor)
if ndim == 2:
return discretize_oversample_2D(model, x_range, y_range, factor)
elif mode == "integrate":
if ndim == 1:
return discretize_integrate_1D(model, x_range)
if ndim == 2:
return discretize_integrate_2D(model, x_range, y_range)
else:
raise DiscretizationError('Invalid mode.')
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :]
+ values_intermediate_grid[:-1, :])
# Mean in x direction
values = 0.5 * (values[:, 1:]
+ values[:, :-1])
return values
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)[:-1]
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.arange(x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
y = np.arange(y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] + 0.5 * (1 + 1 / factor), 1. / factor)
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)[:-1, :-1]
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values
|
31e855b28433d25beb7f95dc99f65cb41a20cca0ac5d9a71b7a50662e45a2338 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from functools import partial
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from ..utils.decorators import deprecated_renamed_argument
from .. import units as u
from ..nddata import support_nddata
from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from ..modeling.core import _CompoundModelMeta
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `numpy.ndarray` or `~astropy.nddata.NDData`
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array (default).
* 'fill'
Set values outside the array boundary to ``fill_value``.
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one prior to
convolving
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
from .boundary_none import (convolve1d_boundary_none,
convolve2d_boundary_none,
convolve3d_boundary_none)
from .boundary_extend import (convolve1d_boundary_extend,
convolve2d_boundary_extend,
convolve3d_boundary_extend)
from .boundary_fill import (convolve1d_boundary_fill,
convolve2d_boundary_fill,
convolve3d_boundary_fill)
from .boundary_wrap import (convolve1d_boundary_wrap,
convolve2d_boundary_wrap,
convolve3d_boundary_wrap)
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# The cython routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Check if kernel is kernel instance
if isinstance(kernel, Kernel):
# Check if array is also kernel instance, if so convolve and
# return new kernel instance
if isinstance(array, Kernel):
if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D):
new_array = convolve1d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel1D(array=new_array)
elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D):
new_array = convolve2d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel2D(array=new_array)
else:
raise Exception("Can't convolve 1D and 2D kernel.")
new_kernel._separable = kernel._separable and array._separable
new_kernel._is_bool = False
return new_kernel
kernel = kernel.array
# Check that the arguments are lists or Numpy arrays
if isinstance(array, list):
array_internal = np.array(array, dtype=float)
array_dtype = array_internal.dtype
elif isinstance(array, np.ndarray):
# Note this won't copy if it doesn't have to -- which is okay
# because none of what follows modifies array_internal.
array_dtype = array.dtype
array_internal = array.astype(float, copy=False)
else:
raise TypeError("array should be a list or a Numpy array")
if isinstance(kernel, list):
kernel_internal = np.array(kernel, dtype=float)
elif isinstance(kernel, np.ndarray):
# Note this always makes a copy, since we will be modifying it
kernel_internal = kernel.astype(float)
else:
raise TypeError("kernel should be a list or a Numpy array")
# Check that the number of dimensions is compatible
if array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
# anything that's masked must be turned into NaNs for the interpolation.
# This requires copying the array_internal
array_internal_copied = False
if np.ma.is_masked(array):
array_internal = array_internal.filled(np.nan)
array_internal_copied = True
if mask is not None:
if not array_internal_copied:
array_internal = array_internal.copy()
array_internal_copied = True
# mask != 0 yields a bool mask for all ints/floats/bool
array_internal[mask != 0] = np.nan
if np.ma.is_masked(kernel):
# *kernel* doesn't support NaN interpolation, so instead we just fill it
kernel_internal = kernel.filled(fill_value)
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan:
badvals = np.isnan(array_internal)
if nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
array_internal[initially_nan] = fill_value
# Because the Cython routines have to normalize the kernel on the fly, we
# explicitly normalize the kernel here, and then scale the image at the
# end if normalization was not requested.
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
if not kernel_sums_to_zero:
kernel_internal /= kernel_sum
else:
kernel_internal = kernel
renormalize_by_kernel = not kernel_sums_to_zero
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim == 1:
if boundary == 'extend':
result = convolve1d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve1d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve1d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve1d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
elif array_internal.ndim == 2:
if boundary == 'extend':
result = convolve2d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary == 'fill':
result = convolve2d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel,
)
elif boundary == 'wrap':
result = convolve2d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary is None:
result = convolve2d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif array_internal.ndim == 3:
if boundary == 'extend':
result = convolve3d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve3d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve3d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve3d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
else:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
# If normalization was not requested, we need to scale the array (since
# the kernel is effectively normalized within the cython functions)
if not normalize_kernel and not kernel_sums_to_zero:
result *= kernel_sum
if preserve_nan:
result[badvals] = np.nan
if nan_treatment == 'fill':
array_internal[initially_nan] = np.nan
# Try to preserve the input type if it's a floating point type
if array_dtype.kind == 'f':
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0')
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, quiet=False,
min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.python.org/pypi/pyFFTW>`_ or
`pyFFTW3 <https://pypi.python.org/pypi/PyFFTW3/0.2.1>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : 'interpolate', 'fill'
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
Other Parameters
----------------
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fftn, ifftn : functions, optional
The fft and inverse fft functions. Can be overridden to use your own
ffts, e.g. an fftw3 wrapper or scipy's fftn,
``fft=scipy.fftpack.fftn``
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
quiet : bool, optional
Silence warning message about NaN interpolation
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = np.asarray(array, dtype=complex)
kernel = np.asarray(kernel, dtype=complex)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# mask catching - masks must be turned into NaNs for use later in the image
if np.ma.is_masked(array):
mamask = array.mask
array = np.array(array)
array[mamask] = np.nan
elif mask is not None:
# copying here because we have to mask it below. But no need to copy
# if mask is None because we won't modify it.
array = np.array(array)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array[mask != 0] = np.nan
# the *kernel* doesn't support NaN interpolation, so instead we just fill it
if np.ma.is_masked(kernel):
kernel = kernel.filled(0)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.array([fsize for ii in range(array.ndim)], dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
# curiously, at the floating-point limit, can get slightly negative numbers
# they break the min_wt=0 "flag" and must therefore be removed
bigimwt[bigimwt < 0] = 0
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
# restore NaNs in original image (they were modified inplace earlier)
# We don't have to worry about masked arrays - if input was masked, it was
# copied
array[nanmaskarray] = np.nan
kernel[nanmaskkernel] = np.nan
fftmult *= kernel_scale
if return_fft:
return fftmult
if interpolate_nan:
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
rifft[bigimwt < min_wt] = np.nan
if min_wt == 0.0:
rifft[bigimwt == 0.0] = 0.0
else:
rifft = (ifftn(fftmult))
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs))
elif mode == 'convolve':
BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs))
else:
raise ValueError('Mode {} is not supported.'.format(mode))
return _CompoundModelMeta._from_operator(mode, model, kernel)
|
3a31f66c637e99055a4f4565c425a6c9912405f7d7076e4c8c18f7d1a367cbf0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""This module contains classes and functions to standardize access to
configuration files for Astropy and affiliated packages.
.. note::
The configuration system makes use of the 'configobj' package, which stores
configuration in a text format like that used in the standard library
`ConfigParser`. More information and documentation for configobj can be
found at http://www.voidspace.org.uk/python/configobj.html.
"""
from contextlib import contextmanager
import hashlib
import io
from os import path
import re
from warnings import warn
from ..extern.configobj import configobj, validate
from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning
from ..utils import find_current_module
from ..utils.introspection import resolve_name
from ..utils.misc import InheritDocstrings
from .paths import get_config_dir
__all__ = ['InvalidConfigurationItemWarning',
'ConfigurationMissingWarning', 'get_config',
'reload_config', 'ConfigNamespace', 'ConfigItem']
class InvalidConfigurationItemWarning(AstropyWarning):
""" A Warning that is issued when the configuration value specified in the
astropy configuration file does not match the type expected for that
configuration value.
"""
class ConfigurationMissingWarning(AstropyWarning):
""" A Warning that is issued when the configuration directory cannot be
accessed (usually due to a permissions problem). If this warning appears,
configuration items will be set to their defaults rather than read from the
configuration file, and no configuration will persist across sessions.
"""
# these are not in __all__ because it's not intended that a user ever see them
class ConfigurationDefaultMissingError(ValueError):
""" An exception that is raised when the configuration defaults (which
should be generated at build-time) are missing.
"""
# this is used in astropy/__init__.py
class ConfigurationDefaultMissingWarning(AstropyWarning):
""" A warning that is issued when the configuration defaults (which
should be generated at build-time) are missing.
"""
class ConfigurationChangedWarning(AstropyWarning):
"""
A warning that the configuration options have changed.
"""
class _ConfigNamespaceMeta(type):
def __init__(cls, name, bases, dict):
if cls.__bases__[0] is object:
return
for key, val in dict.items():
if isinstance(val, ConfigItem):
val.name = key
class ConfigNamespace(metaclass=_ConfigNamespaceMeta):
"""
A namespace of configuration items. Each subpackage with
configuration items should define a subclass of this class,
containing `ConfigItem` instances as members.
For example::
class Conf(_config.ConfigNamespace):
unicode_output = _config.ConfigItem(
False,
'Use Unicode characters when outputting values, ...')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when ...',
aliases=['astropy.utils.console.USE_COLOR'])
conf = Conf()
"""
def set_temp(self, attr, value):
"""
Temporarily set a configuration value.
Parameters
----------
attr : str
Configuration item name
value : object
The value to set temporarily.
Examples
--------
>>> import astropy
>>> with astropy.conf.set_temp('use_color', False):
... pass
... # console output will not contain color
>>> # console output contains color again...
"""
if hasattr(self, attr):
return self.__class__.__dict__[attr].set_temp(value)
raise AttributeError("No configuration parameter '{0}'".format(attr))
def reload(self, attr=None):
"""
Reload a configuration item from the configuration file.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reload all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
return self.__class__.__dict__[attr].reload()
raise AttributeError("No configuration parameter '{0}'".format(attr))
for item in self.__class__.__dict__.values():
if isinstance(item, ConfigItem):
item.reload()
def reset(self, attr=None):
"""
Reset a configuration item to its default.
Parameters
----------
attr : str, optional
The name of the configuration parameter to reload. If not
provided, reset all configuration parameters.
"""
if attr is not None:
if hasattr(self, attr):
prop = self.__class__.__dict__[attr]
prop.set(prop.defaultvalue)
return
raise AttributeError("No configuration parameter '{0}'".format(attr))
for item in self.__class__.__dict__.values():
if isinstance(item, ConfigItem):
item.set(item.defaultvalue)
class ConfigItem(metaclass=InheritDocstrings):
"""
A setting and associated value stored in a configuration file.
These objects should be created as members of
`ConfigNamespace` subclasses, for example::
class _Conf(config.ConfigNamespace):
unicode_output = config.ConfigItem(
False,
'Use Unicode characters when outputting values, and writing widgets '
'to the console.')
conf = _Conf()
Parameters
----------
defaultvalue : object, optional
The default value for this item. If this is a list of strings, this
item will be interpreted as an 'options' value - this item must be one
of those values, and the first in the list will be taken as the default
value.
description : str or None, optional
A description of this item (will be shown as a comment in the
configuration file)
cfgtype : str or None, optional
A type specifier like those used as the *values* of a particular key
in a ``configspec`` file of ``configobj``. If None, the type will be
inferred from the default value.
module : str or None, optional
The full module name that this item is associated with. The first
element (e.g. 'astropy' if this is 'astropy.config.configuration')
will be used to determine the name of the configuration file, while
the remaining items determine the section. If None, the package will be
inferred from the package within whiich this object's initializer is
called.
aliases : str, or list of str, optional
The deprecated location(s) of this configuration item. If the
config item is not found at the new location, it will be
searched for at all of the old locations.
Raises
------
RuntimeError
If ``module`` is `None`, but the module this item is created from
cannot be determined.
"""
# this is used to make validation faster so a Validator object doesn't
# have to be created every time
_validator = validate.Validator()
cfgtype = None
"""
A type specifier like those used as the *values* of a particular key in a
``configspec`` file of ``configobj``.
"""
def __init__(self, defaultvalue='', description=None, cfgtype=None,
module=None, aliases=None):
from ..utils import isiterable
if module is None:
module = find_current_module(2)
if module is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
module = module.__name__
self.module = module
self.description = description
self.__doc__ = description
# now determine cfgtype if it is not given
if cfgtype is None:
if (isiterable(defaultvalue) and not
isinstance(defaultvalue, str)):
# it is an options list
dvstr = [str(v) for v in defaultvalue]
cfgtype = 'option(' + ', '.join(dvstr) + ')'
defaultvalue = dvstr[0]
elif isinstance(defaultvalue, bool):
cfgtype = 'boolean'
elif isinstance(defaultvalue, int):
cfgtype = 'integer'
elif isinstance(defaultvalue, float):
cfgtype = 'float'
elif isinstance(defaultvalue, str):
cfgtype = 'string'
defaultvalue = str(defaultvalue)
self.cfgtype = cfgtype
self._validate_val(defaultvalue)
self.defaultvalue = defaultvalue
if aliases is None:
self.aliases = []
elif isinstance(aliases, str):
self.aliases = [aliases]
else:
self.aliases = aliases
def __set__(self, obj, value):
return self.set(value)
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self()
def set(self, value):
"""
Sets the current value of this ``ConfigItem``.
This also updates the comments that give the description and type
information.
Parameters
----------
value
The value this item should be set to.
Raises
------
TypeError
If the provided ``value`` is not valid for this ``ConfigItem``.
"""
try:
value = self._validate_val(value)
except validate.ValidateError as e:
msg = 'Provided value for configuration item {0} not valid: {1}'
raise TypeError(msg.format(self.name, e.args[0]))
sec = get_config(self.module)
sec[self.name] = value
@contextmanager
def set_temp(self, value):
"""
Sets this item to a specified value only inside a with block.
Use as::
ITEM = ConfigItem('ITEM', 'default', 'description')
with ITEM.set_temp('newval'):
#... do something that wants ITEM's value to be 'newval' ...
print(ITEM)
# ITEM is now 'default' after the with block
Parameters
----------
value
The value to set this item to inside the with block.
"""
initval = self()
self.set(value)
try:
yield
finally:
self.set(initval)
def reload(self):
""" Reloads the value of this ``ConfigItem`` from the relevant
configuration file.
Returns
-------
val
The new value loaded from the configuration file.
"""
self.set(self.defaultvalue)
baseobj = get_config(self.module, True)
secname = baseobj.name
cobj = baseobj
# a ConfigObj's parent is itself, so we look for the parent with that
while cobj.parent is not cobj:
cobj = cobj.parent
newobj = configobj.ConfigObj(cobj.filename, interpolation=False)
if secname is not None:
if secname not in newobj:
return baseobj.get(self.name)
newobj = newobj[secname]
if self.name in newobj:
baseobj[self.name] = newobj[self.name]
return baseobj.get(self.name)
def __repr__(self):
out = '<{0}: name={1!r} value={2!r} at 0x{3:x}>'.format(
self.__class__.__name__, self.name, self(), id(self))
return out
def __str__(self):
out = '\n'.join(('{0}: {1}',
' cfgtype={2!r}',
' defaultvalue={3!r}',
' description={4!r}',
' module={5}',
' value={6!r}'))
out = out.format(self.__class__.__name__, self.name, self.cfgtype,
self.defaultvalue, self.description, self.module,
self())
return out
def __call__(self):
""" Returns the value of this ``ConfigItem``
Returns
-------
val
This item's value, with a type determined by the ``cfgtype``
attribute.
Raises
------
TypeError
If the configuration value as stored is not this item's type.
"""
def section_name(section):
if section == '':
return 'at the top-level'
else:
return 'in section [{0}]'.format(section)
options = []
sec = get_config(self.module)
if self.name in sec:
options.append((sec[self.name], self.module, self.name))
for alias in self.aliases:
module, name = alias.rsplit('.', 1)
sec = get_config(module)
if '.' in module:
filename, module = module.split('.', 1)
else:
filename = module
module = ''
if name in sec:
if '.' in self.module:
new_module = self.module.split('.', 1)[1]
else:
new_module = ''
warn(
"Config parameter '{0}' {1} of the file '{2}' "
"is deprecated. Use '{3}' {4} instead.".format(
name, section_name(module), get_config_filename(filename),
self.name, section_name(new_module)),
AstropyDeprecationWarning)
options.append((sec[name], module, name))
if len(options) == 0:
self.set(self.defaultvalue)
options.append((self.defaultvalue, None, None))
if len(options) > 1:
filename, sec = self.module.split('.', 1)
warn(
"Config parameter '{0}' {1} of the file '{2}' is "
"given by more than one alias ({3}). Using the first.".format(
self.name, section_name(sec), get_config_filename(filename),
', '.join([
'.'.join(x[1:3]) for x in options if x[1] is not None])),
AstropyDeprecationWarning)
val = options[0][0]
try:
return self._validate_val(val)
except validate.ValidateError as e:
raise TypeError('Configuration value not valid:' + e.args[0])
def _validate_val(self, val):
""" Validates the provided value based on cfgtype and returns the
type-cast value
throws the underlying configobj exception if it fails
"""
# note that this will normally use the *class* attribute `_validator`,
# but if some arcane reason is needed for making a special one for an
# instance or sub-class, it will be used
return self._validator.check(self.cfgtype, val)
# this dictionary stores the master copy of the ConfigObj's for each
# root package
_cfgobjs = {}
def get_config_filename(packageormod=None):
"""
Get the filename of the config file associated with the given
package or module.
"""
cfg = get_config(packageormod)
while cfg.parent is not cfg:
cfg = cfg.parent
return cfg.filename
# This is used by testing to override the config file, so we can test
# with various config files that exercise different features of the
# config system.
_override_config_file = None
def get_config(packageormod=None, reload=False):
""" Gets the configuration object or section associated with a particular
package or module.
Parameters
-----------
packageormod : str or None
The package for which to retrieve the configuration object. If a
string, it must be a valid package name, or if `None`, the package from
which this function is called will be used.
reload : bool, optional
Reload the file, even if we have it cached.
Returns
-------
cfgobj : ``configobj.ConfigObj`` or ``configobj.Section``
If the requested package is a base package, this will be the
``configobj.ConfigObj`` for that package, or if it is a subpackage or
module, it will return the relevant ``configobj.Section`` object.
Raises
------
RuntimeError
If ``packageormod`` is `None`, but the package this item is created
from cannot be determined.
"""
if packageormod is None:
packageormod = find_current_module(2)
if packageormod is None:
msg1 = 'Cannot automatically determine get_config module, '
msg2 = 'because it is not called from inside a valid module'
raise RuntimeError(msg1 + msg2)
else:
packageormod = packageormod.__name__
packageormodspl = packageormod.split('.')
rootname = packageormodspl[0]
secname = '.'.join(packageormodspl[1:])
cobj = _cfgobjs.get(rootname, None)
if cobj is None or reload:
if _ASTROPY_SETUP_:
# There's no reason to use anything but the default config
cobj = configobj.ConfigObj(interpolation=False)
else:
cfgfn = None
try:
# This feature is intended only for use by the unit tests
if _override_config_file is not None:
cfgfn = _override_config_file
else:
cfgfn = path.join(get_config_dir(), rootname + '.cfg')
cobj = configobj.ConfigObj(cfgfn, interpolation=False)
except OSError as e:
msg = ('Configuration defaults will be used due to ')
errstr = '' if len(e.args) < 1 else (':' + str(e.args[0]))
msg += e.__class__.__name__ + errstr
msg += ' on {0}'.format(cfgfn)
warn(ConfigurationMissingWarning(msg))
# This caches the object, so if the file becomes accessible, this
# function won't see it unless the module is reloaded
cobj = configobj.ConfigObj(interpolation=False)
_cfgobjs[rootname] = cobj
if secname: # not the root package
if secname not in cobj:
cobj[secname] = {}
return cobj[secname]
else:
return cobj
def reload_config(packageormod=None):
""" Reloads configuration settings from a configuration file for the root
package of the requested package/module.
This overwrites any changes that may have been made in `ConfigItem`
objects. This applies for any items that are based on this file, which
is determined by the *root* package of ``packageormod``
(e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'``
module).
Parameters
----------
packageormod : str or None
The package or module name - see `get_config` for details.
"""
sec = get_config(packageormod, True)
# look for the section that is its own parent - that's the base object
while sec.parent is not sec:
sec = sec.parent
sec.reload()
def is_unedited_config_file(content, template_content=None):
"""
Determines if a config file can be safely replaced because it doesn't
actually contain any meaningful content.
To meet this criteria, the config file must be either:
- All comments or completely empty
- An exact match to a "legacy" version of the config file prior to
Astropy 0.4, when APE3 was implemented and the config file
contained commented-out values by default.
"""
# We want to calculate the md5sum using universal line endings, so
# that even if the files had their line endings converted to \r\n
# on Windows, this will still work.
content = content.encode('latin-1')
# The jquery_url setting, present in 0.3.2 and later only, is
# effectively auto-generated by the build system, so we need to
# ignore it in the md5sum calculation for 0.3.2.
content = re.sub(br'\njquery_url\s*=\s*[^\n]+', b'', content)
# First determine if the config file has any effective content
buffer = io.BytesIO(content)
buffer.seek(0)
raw_cfg = configobj.ConfigObj(buffer, interpolation=True)
for v in raw_cfg.values():
if len(v):
break
else:
return True
# Now determine if it matches the md5sum of a known, unedited
# config file.
known_configs = set([
'7d4b4f1120304b286d71f205975b1286', # v0.3.2
'5df7e409425e5bfe7ed041513fda3288', # v0.3
'8355f99a01b3bdfd8761ef45d5d8b7e5', # v0.2
'4ea5a84de146dc3fcea2a5b93735e634' # v0.2.1, v0.2.2, v0.2.3, v0.2.4, v0.2.5
])
md5 = hashlib.md5()
md5.update(content)
digest = md5.hexdigest()
return digest in known_configs
# this is not in __all__ because it's not intended that a user uses it
def update_default_config(pkg, default_cfg_dir_or_fn, version=None):
"""
Checks if the configuration file for the specified package exists,
and if not, copy over the default configuration. If the
configuration file looks like it has already been edited, we do
not write over it, but instead write a file alongside it named
``pkg.version.cfg`` as a "template" for the user.
Parameters
----------
pkg : str
The package to be updated.
default_cfg_dir_or_fn : str
The filename or directory name where the default configuration file is.
If a directory name, ``'pkg.cfg'`` will be used in that directory.
version : str, optional
The current version of the given package. If not provided, it will
be obtained from ``pkg.__version__``.
Returns
-------
updated : bool
If the profile was updated, `True`, otherwise `False`.
Raises
------
AttributeError
If the version number of the package could not determined.
"""
if path.isdir(default_cfg_dir_or_fn):
default_cfgfn = path.join(default_cfg_dir_or_fn, pkg + '.cfg')
else:
default_cfgfn = default_cfg_dir_or_fn
if not path.isfile(default_cfgfn):
# There is no template configuration file, which basically
# means the affiliated package is not using the configuration
# system, so just return.
return False
cfgfn = get_config(pkg).filename
with open(default_cfgfn, 'rt', encoding='latin-1') as fr:
template_content = fr.read()
doupdate = False
if cfgfn is not None:
if path.exists(cfgfn):
with open(cfgfn, 'rt', encoding='latin-1') as fd:
content = fd.read()
identical = (content == template_content)
if not identical:
doupdate = is_unedited_config_file(
content, template_content)
elif path.exists(path.dirname(cfgfn)):
doupdate = True
identical = False
if version is None:
version = resolve_name(pkg, '__version__')
# Don't install template files for dev versions, or we'll end up
# spamming `~/.astropy/config`.
if 'dev' not in version and cfgfn is not None:
template_path = path.join(
get_config_dir(), '{0}.{1}.cfg'.format(pkg, version))
needs_template = not path.exists(template_path)
else:
needs_template = False
if doupdate or needs_template:
if needs_template:
with open(template_path, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
# If we just installed a new template file and we can't
# update the main configuration file because it has user
# changes, display a warning.
if not identical and not doupdate:
warn(
"The configuration options in {0} {1} may have changed, "
"your configuration file was not updated in order to "
"preserve local changes. A new configuration template "
"has been saved to '{2}'.".format(
pkg, version, template_path),
ConfigurationChangedWarning)
if doupdate and not identical:
with open(cfgfn, 'wt', encoding='latin-1') as fw:
fw.write(template_content)
return True
return False
|
608f960f70d8f5adec0f2a2cb4aace60fd4bc1a37795e324a30958a84e2cd62a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from ..utils.decorators import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = os.environ['HOME']
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = os.environ['HOMESHARE']
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = os.path.join(os.environ['USERPROFILE'])
else:
try:
import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = os.environ['HOME']
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath:
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super().__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super().__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise OSError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise OSError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
|
2ed3175203f19f263f30f5d18c91914135da3d89acb86ab738b82d7d0f087fd2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {
str('astropy.config.tests'): ['data/*.cfg']
}
|
5ac70e74d82e46691e6ca7ee5a3768aa5260994e99df89a101d11e6830467b33 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from .utils import poly_map_domain, _combine_equivalency_dict
from ..units import Quantity
from ..utils.exceptions import AstropyUserWarning
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units['x'], equivalencies=input_units_equivalencies['x'])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units['y'], equivalencies=input_units_equivalencies['y'])
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(x=x, y=y, z=z)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(y, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(x=x, y=y, z=z)
return model_new
else:
raise NotImplementedError("This model does not support being fit to data with units")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like (optional)
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x)
else:
lhs = model_copy.fit_deriv(x, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y)
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices, x=x, y=y)
else:
lhs = model_copy.fit_deriv(x, y, *model_copy.parameters)
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
if z.ndim > 2:
# Basically this code here is making the assumption that if
# z has 3 dimensions it represents multiple models where
# the value of z is one plane per model. It's then
# flattening each plane and transposing so that the model
# axis is *last*. That's fine, but this could be
# generalized for other dimensionalities of z.
# TODO: See above comment
rhs = z.reshape((z.shape[0], -1)).T
else:
rhs = z.T
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if lhs.ndim > 2:
raise ValueError('{0} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
if rcond is None:
rcond = len(x) * np.finfo(x.dtype).eps
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a number of iterations ``niter``, outliers are removed
and fitting is performed for each iteration.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter.
outlier_func : function
A function for outlier removal.
niter : int (optional)
Number of iterations.
outlier_kwargs : dict (optional)
Keyword arguments for outlier_func.
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like (optional)
Data measurements (2D case).
weights : array-like (optional)
Weights to be passed to the fitter.
kwargs : dict (optional)
Keyword arguments to be passed to the fitter.
Returns
-------
filtered_data : numpy.ma.core.MaskedArray
Data used to perform the fitting after outlier removal.
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
"""
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
if z is None:
filtered_data = y
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x),
**self.outlier_kwargs)
filtered_data += fitted_model(x)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
else:
filtered_data = z
for n in range(self.niter):
filtered_data = self.outlier_func(filtered_data - fitted_model(x, y),
**self.outlier_kwargs)
filtered_data += fitted_model(x, y)
fitted_model = self.fitter(fitted_model,
x[~filtered_data.mask],
y[~filtered_data.mask],
filtered_data.data[~filtered_data.mask],
**kwargs)
return filtered_data, fitted_model
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) * np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in (
np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
else:
return [np.ravel(_) for _ in (weights * np.array(model.fit_deriv(x, y, *params)))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array (optional)
input coordinates
weights : array (optional)
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
p0, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, p0, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = [p.flatten() for p in model.parameters]
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
model.parameters[slice_] = values
offset += size
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
model.parameters[slice_] = value
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
else:
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning('{type} error occurred in entry '
'point {name}.' .format(type=type(e).__name__, name=name)))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to be a '
'Class.' .format(name)))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {0} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
|
be17316456660f161cda727d5ec15748a4da313a736b1ab725b882ed0073afc0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines two classes that deal with parameters.
It is unlikely users will need to work with these classes directly, unless they
define their own models.
"""
import functools
import numbers
import types
import operator
import numpy as np
from .. import units as u
from ..units import Quantity, UnitsError
from ..utils import isiterable, OrderedDescriptor
from .utils import array_repr_oneline
from .utils import get_inputs_and_params
__all__ = ['Parameter', 'InputParameterError', 'ParameterError']
class ParameterError(Exception):
"""Generic exception class for all exceptions pertaining to Parameters."""
class InputParameterError(ValueError, ParameterError):
"""Used for incorrect input parameter values and definitions."""
class ParameterDefinitionError(ParameterError):
"""Exception in declaration of class-level Parameters."""
def _tofloat(value):
"""Convert a parameter to float or float array"""
if isiterable(value):
try:
value = np.asanyarray(value, dtype=float)
except (TypeError, ValueError):
# catch arrays with strings or user errors like different
# types of parameters in a parameter set
raise InputParameterError(
"Parameter of {0} could not be converted to "
"float".format(type(value)))
elif isinstance(value, Quantity):
# Quantities are fine as is
pass
elif isinstance(value, np.ndarray):
# A scalar/dimensionless array
value = float(value.item())
elif isinstance(value, (numbers.Number, np.number)):
value = float(value)
elif isinstance(value, bool):
raise InputParameterError(
"Expected parameter to be of numerical type, not boolean")
else:
raise InputParameterError(
"Don't know how to convert parameter of {0} to "
"float".format(type(value)))
return value
# Helpers for implementing operator overloading on Parameter
def _binary_arithmetic_operation(op, reflected=False):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
if reflected:
return op(val, self_value)
else:
return op(self_value, val)
return wrapper
def _binary_comparison_operation(op):
@functools.wraps(op)
def wrapper(self, val):
if self._model is None:
if op is operator.lt:
# Because OrderedDescriptor uses __lt__ to work, we need to
# call the super method, but only when not bound to an instance
# anyways
return super(self.__class__, self).__lt__(val)
else:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value, val)
return wrapper
def _unary_arithmetic_operation(op):
@functools.wraps(op)
def wrapper(self):
if self._model is None:
return NotImplemented
if self.unit is not None:
self_value = Quantity(self.value, self.unit)
else:
self_value = self.value
return op(self_value)
return wrapper
class Parameter(OrderedDescriptor):
"""
Wraps individual parameters.
This class represents a model's parameter (in a somewhat broad sense). It
acts as both a descriptor that can be assigned to a class attribute to
describe the parameters accepted by an individual model (this is called an
"unbound parameter"), or it can act as a proxy for the parameter values on
an individual model instance (called a "bound parameter").
Parameter instances never store the actual value of the parameter directly.
Rather, each instance of a model stores its own parameters parameter values
in an array. A *bound* Parameter simply wraps the value in a Parameter
proxy which provides some additional information about the parameter such
as its constraints. In other words, this is a high-level interface to a
model's adjustable parameter values.
*Unbound* Parameters are not associated with any specific model instance,
and are merely used by model classes to determine the names of their
parameters and other information about each parameter such as their default
values and default constraints.
See :ref:`modeling-parameters` for more details.
Parameters
----------
name : str
parameter name
.. warning::
The fact that `Parameter` accepts ``name`` as an argument is an
implementation detail, and should not be used directly. When
defining a new `Model` class, parameter names are always
automatically defined by the class attribute they're assigned to.
description : str
parameter description
default : float or array
default value to use for this parameter
unit : `~astropy.units.Unit`
if specified, the parameter will be in these units, and when the
parameter is updated in future, it should be set to a
:class:`~astropy.units.Quantity` that has equivalent units.
getter : callable
a function that wraps the raw (internal) value of the parameter
when returning the value through the parameter proxy (eg. a
parameter may be stored internally as radians but returned to the
user as degrees)
setter : callable
a function that wraps any values assigned to this parameter; should
be the inverse of getter
fixed : bool
if True the parameter is not varied during fitting
tied : callable or False
if callable is supplied it provides a way to link the value of this
parameter to another parameter (or some other arbitrary function)
min : float
the lower bound of a parameter
max : float
the upper bound of a parameter
bounds : tuple
specify min and max as a single tuple--bounds may not be specified
simultaneously with min or max
model : `Model` instance
binds the the `Parameter` instance to a specific model upon
instantiation; this should only be used internally for creating bound
Parameters, and should not be used for `Parameter` descriptors defined
as class attributes
"""
constraints = ('fixed', 'tied', 'bounds')
"""
Types of constraints a parameter can have. Excludes 'min' and 'max'
which are just aliases for the first and second elements of the 'bounds'
constraint (which is represented as a 2-tuple).
"""
# Settings for OrderedDescriptor
_class_attribute_ = '_parameters_'
_name_attribute_ = '_name'
def __init__(self, name='', description='', default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None, model=None):
super().__init__()
self._name = name
self.__doc__ = self._description = description.strip()
# We only need to perform this check on unbound parameters
if model is None and isinstance(default, Quantity):
if unit is not None and not unit.is_equivalent(default.unit):
raise ParameterDefinitionError(
"parameter default {0} does not have units equivalent to "
"the required unit {1}".format(default, unit))
unit = default.unit
default = default.value
self._default = default
self._unit = unit
# NOTE: These are *default* constraints--on model instances constraints
# are taken from the model if set, otherwise the defaults set here are
# used
if bounds is not None:
if min is not None or max is not None:
raise ValueError(
'bounds may not be specified simultaneously with min or '
'or max when instantiating Parameter {0}'.format(name))
else:
bounds = (min, max)
self._fixed = fixed
self._tied = tied
self._bounds = bounds
self._order = None
self._model = None
# The getter/setter functions take one or two arguments: The first
# argument is always the value itself (either the value returned or the
# value being set). The second argument is optional, but if present
# will contain a reference to the model object tied to a parameter (if
# it exists)
self._getter = self._create_value_wrapper(getter, None)
self._setter = self._create_value_wrapper(setter, None)
self._validator = None
# Only Parameters declared as class-level descriptors require
# and ordering ID
if model is not None:
self._bind(model)
def __get__(self, obj, objtype):
if obj is None:
return self
# All of the Parameter.__init__ work should already have been done for
# the class-level descriptor; we can skip that stuff and just copy the
# existing __dict__ and then bind to the model instance
parameter = self.__class__.__new__(self.__class__)
parameter.__dict__.update(self.__dict__)
parameter._bind(obj)
return parameter
def __set__(self, obj, value):
value = _tofloat(value)
# Check that units are compatible with default or units already set
param_unit = obj._param_metrics[self.name]['orig_unit']
if param_unit is None:
if isinstance(value, Quantity):
obj._param_metrics[self.name]['orig_unit'] = value.unit
else:
if not isinstance(value, Quantity):
raise UnitsError("The '{0}' parameter should be given as a "
"Quantity because it was originally initialized "
"as a Quantity".format(self._name))
else:
# We need to make sure we update the unit because the units are
# then dropped from the value below.
obj._param_metrics[self.name]['orig_unit'] = value.unit
# Call the validator before the setter
if self._validator is not None:
self._validator(obj, value)
if self._setter is not None:
setter = self._create_value_wrapper(self._setter, obj)
if self.unit is not None:
value = setter(value * self.unit).value
else:
value = setter(value)
self._set_model_value(obj, value)
def __len__(self):
if self._model is None:
raise TypeError('Parameter definitions do not have a length.')
return len(self._model)
def __getitem__(self, key):
value = self.value
if len(self._model) == 1:
# Wrap the value in a list so that getitem can work for sensible
# indices like [0] and [-1]
value = [value]
return value[key]
def __setitem__(self, key, value):
# Get the existing value and check whether it even makes sense to
# apply this index
oldvalue = self.value
n_models = len(self._model)
# if n_models == 1:
# # Convert the single-dimension value to a list to allow some slices
# # that would be compatible with a length-1 array like [:] and [0:]
# oldvalue = [oldvalue]
if isinstance(key, slice):
if len(oldvalue[key]) == 0:
raise InputParameterError(
"Slice assignment outside the parameter dimensions for "
"'{0}'".format(self.name))
for idx, val in zip(range(*key.indices(len(self))), value):
self.__setitem__(idx, val)
else:
try:
oldvalue[key] = value
except IndexError:
raise InputParameterError(
"Input dimension {0} invalid for {1!r} parameter with "
"dimension {2}".format(key, self.name, n_models))
def __repr__(self):
args = "'{0}'".format(self._name)
if self._model is None:
if self._default is not None:
args += ', default={0}'.format(self._default)
else:
args += ', value={0}'.format(self.value)
if self.unit is not None:
args += ', unit={0}'.format(self.unit)
for cons in self.constraints:
val = getattr(self, cons)
if val not in (None, False, (None, None)):
# Maybe non-obvious, but False is the default for the fixed and
# tied constraints
args += ', {0}={1}'.format(cons, val)
return "{0}({1})".format(self.__class__.__name__, args)
@property
def name(self):
"""Parameter name"""
return self._name
@property
def default(self):
"""Parameter default value"""
if (self._model is None or self._default is None or
len(self._model) == 1):
return self._default
# Otherwise the model we are providing for has more than one parameter
# sets, so ensure that the default is repeated the correct number of
# times along the model_set_axis if necessary
n_models = len(self._model)
model_set_axis = self._model._model_set_axis
default = self._default
new_shape = (np.shape(default) +
(1,) * (model_set_axis + 1 - np.ndim(default)))
default = np.reshape(default, new_shape)
# Now roll the new axis into its correct position if necessary
default = np.rollaxis(default, -1, model_set_axis)
# Finally repeat the last newly-added axis to match n_models
default = np.repeat(default, n_models, axis=-1)
# NOTE: Regardless of what order the last two steps are performed in,
# the resulting array will *look* the same, but only if the repeat is
# performed last will it result in a *contiguous* array
return default
@property
def value(self):
"""The unadorned value proxied by this parameter."""
if self._model is None:
raise AttributeError('Parameter definition does not have a value')
value = self._get_model_value(self._model)
if self._getter is None:
return value
else:
raw_unit = self._model._param_metrics[self.name]['raw_unit']
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if raw_unit is not None:
return np.float64(self._getter(value, raw_unit, orig_unit).value)
else:
return self._getter(value)
@value.setter
def value(self, value):
if self._model is None:
raise AttributeError('Cannot set a value on a parameter '
'definition')
if self._setter is not None:
val = self._setter(value)
if isinstance(value, Quantity):
raise TypeError("The .value property on parameters should be set to "
"unitless values, not Quantity objects. To set a "
"parameter to a quantity simply set the parameter "
"directly without using .value")
self._set_model_value(self._model, value)
@property
def unit(self):
"""
The unit attached to this parameter, if any.
On unbound parameters (i.e. parameters accessed through the
model class, rather than a model instance) this is the required/
default unit for the parameter.
"""
if self._model is None:
return self._unit
else:
# orig_unit may be undefined early on in model instantiation
return self._model._param_metrics[self.name].get('orig_unit',
self._unit)
@unit.setter
def unit(self, unit):
self._set_unit(unit)
def _set_unit(self, unit, force=False):
if self._model is None:
raise AttributeError('Cannot set unit on a parameter definition')
orig_unit = self._model._param_metrics[self.name]['orig_unit']
if force:
self._model._param_metrics[self.name]['orig_unit'] = unit
else:
if orig_unit is None:
raise ValueError('Cannot attach units to parameters that were '
'not initially specified with units')
else:
raise ValueError('Cannot change the unit attribute directly, '
'instead change the parameter to a new quantity')
@property
def quantity(self):
"""
This parameter, as a :class:`~astropy.units.Quantity` instance.
"""
if self.unit is not None:
return self.value * self.unit
else:
return None
@quantity.setter
def quantity(self, quantity):
if not isinstance(quantity, Quantity):
raise TypeError("The .quantity attribute should be set to a Quantity object")
self.value = quantity.value
self._set_unit(quantity.unit, force=True)
@property
def shape(self):
"""The shape of this parameter's value array."""
if self._model is None:
raise AttributeError('Parameter definition does not have a '
'shape.')
shape = self._model._param_metrics[self._name]['shape']
if len(self._model) > 1:
# If we are dealing with a model *set* the shape is the shape of
# the parameter within a single model in the set
model_axis = self._model._model_set_axis
if model_axis < 0:
model_axis = len(shape) + model_axis
shape = shape[:model_axis] + shape[model_axis + 1:]
return shape
@property
def size(self):
"""The size of this parameter's value array."""
# TODO: Rather than using self.value this could be determined from the
# size of the parameter in _param_metrics
return np.size(self.value)
@property
def fixed(self):
"""
Boolean indicating if the parameter is kept fixed during fitting.
"""
if self._model is not None:
fixed = self._model._constraints['fixed']
return fixed.get(self._name, self._fixed)
else:
return self._fixed
@fixed.setter
def fixed(self, value):
"""Fix a parameter"""
if self._model is not None:
if not isinstance(value, bool):
raise TypeError("Fixed can be True or False")
self._model._constraints['fixed'][self._name] = value
else:
raise AttributeError("can't set attribute 'fixed' on Parameter "
"definition")
@property
def tied(self):
"""
Indicates that this parameter is linked to another one.
A callable which provides the relationship of the two parameters.
"""
if self._model is not None:
tied = self._model._constraints['tied']
return tied.get(self._name, self._tied)
else:
return self._tied
@tied.setter
def tied(self, value):
"""Tie a parameter"""
if self._model is not None:
if not callable(value) and value not in (False, None):
raise TypeError("Tied must be a callable")
self._model._constraints['tied'][self._name] = value
else:
raise AttributeError("can't set attribute 'tied' on Parameter "
"definition")
@property
def bounds(self):
"""The minimum and maximum values of a parameter as a tuple"""
if self._model is not None:
bounds = self._model._constraints['bounds']
return bounds.get(self._name, self._bounds)
else:
return self._bounds
@bounds.setter
def bounds(self, value):
"""Set the minimum and maximum values of a parameter from a tuple"""
if self._model is not None:
_min, _max = value
if _min is not None:
if not isinstance(_min, numbers.Number):
raise TypeError("Min value must be a number")
_min = float(_min)
if _max is not None:
if not isinstance(_max, numbers.Number):
raise TypeError("Max value must be a number")
_max = float(_max)
bounds = self._model._constraints.setdefault('bounds', {})
self._model._constraints['bounds'][self._name] = (_min, _max)
else:
raise AttributeError("can't set attribute 'bounds' on Parameter "
"definition")
@property
def min(self):
"""A value used as a lower bound when fitting a parameter"""
return self.bounds[0]
@min.setter
def min(self, value):
"""Set a minimum value of a parameter"""
if self._model is not None:
self.bounds = (value, self.max)
else:
raise AttributeError("can't set attribute 'min' on Parameter "
"definition")
@property
def max(self):
"""A value used as an upper bound when fitting a parameter"""
return self.bounds[1]
@max.setter
def max(self, value):
"""Set a maximum value of a parameter."""
if self._model is not None:
self.bounds = (self.min, value)
else:
raise AttributeError("can't set attribute 'max' on Parameter "
"definition")
@property
def validator(self):
"""
Used as a decorator to set the validator method for a `Parameter`.
The validator method validates any value set for that parameter.
It takes two arguments--``self``, which refers to the `Model`
instance (remember, this is a method defined on a `Model`), and
the value being set for this parameter. The validator method's
return value is ignored, but it may raise an exception if the value
set on the parameter is invalid (typically an `InputParameterError`
should be raised, though this is not currently a requirement).
The decorator *returns* the `Parameter` instance that the validator
is set on, so the underlying validator method should have the same
name as the `Parameter` itself (think of this as analogous to
``property.setter``). For example::
>>> from astropy.modeling import Fittable1DModel
>>> class TestModel(Fittable1DModel):
... a = Parameter()
... b = Parameter()
...
... @a.validator
... def a(self, value):
... # Remember, the value can be an array
... if np.any(value < self.b):
... raise InputParameterError(
... "parameter 'a' must be greater than or equal "
... "to parameter 'b'")
...
... @staticmethod
... def evaluate(x, a, b):
... return a * x + b
...
>>> m = TestModel(a=1, b=2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
>>> m = TestModel(a=2, b=2)
>>> m.a = 0 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
On bound parameters this property returns the validator method itself,
as a bound method on the `Parameter`. This is not often as useful, but
it allows validating a parameter value without setting that parameter::
>>> m.a.validator(42) # Passes
>>> m.a.validator(-42) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
InputParameterError: parameter 'a' must be greater than or equal
to parameter 'b'
"""
if self._model is None:
# For unbound parameters return the validator setter
def validator(func, self=self):
self._validator = func
return self
return validator
else:
# Return the validator method, bound to the Parameter instance with
# the name "validator"
def validator(self, value):
if self._validator is not None:
return self._validator(self._model, value)
return types.MethodType(validator, self)
def copy(self, name=None, description=None, default=None, unit=None,
getter=None, setter=None, fixed=False, tied=False, min=None,
max=None, bounds=None):
"""
Make a copy of this `Parameter`, overriding any of its core attributes
in the process (or an exact copy).
The arguments to this method are the same as those for the `Parameter`
initializer. This simply returns a new `Parameter` instance with any
or all of the attributes overridden, and so returns the equivalent of:
.. code:: python
Parameter(self.name, self.description, ...)
"""
kwargs = locals().copy()
del kwargs['self']
for key, value in kwargs.items():
if value is None:
# Annoying special cases for min/max where are just aliases for
# the components of bounds
if key in ('min', 'max'):
continue
else:
if hasattr(self, key):
value = getattr(self, key)
elif hasattr(self, '_' + key):
value = getattr(self, '_' + key)
kwargs[key] = value
return self.__class__(**kwargs)
@property
def _raw_value(self):
"""
Currently for internal use only.
Like Parameter.value but does not pass the result through
Parameter.getter. By design this should only be used from bound
parameters.
This will probably be removed are retweaked at some point in the
process of rethinking how parameter values are stored/updated.
"""
return self._get_model_value(self._model)
def _bind(self, model):
"""
Bind the `Parameter` to a specific `Model` instance; don't use this
directly on *unbound* parameters, i.e. `Parameter` descriptors that
are defined in class bodies.
"""
self._model = model
self._getter = self._create_value_wrapper(self._getter, model)
self._setter = self._create_value_wrapper(self._setter, model)
# TODO: These methods should probably be moved to the Model class, since it
# has entirely to do with details of how the model stores parameters.
# Parameter should just act as a user front-end to this.
def _get_model_value(self, model):
"""
This method implements how to retrieve the value of this parameter from
the model instance. See also `Parameter._set_model_value`.
These methods take an explicit model argument rather than using
self._model so that they can be used from unbound `Parameter`
instances.
"""
if not hasattr(model, '_parameters'):
# The _parameters array hasn't been initialized yet; just translate
# this to an AttributeError
raise AttributeError(self._name)
# Use the _param_metrics to extract the parameter value from the
# _parameters array
param_metrics = model._param_metrics[self._name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
value = model._parameters[param_slice]
if param_shape:
value = value.reshape(param_shape)
else:
value = value[0]
return value
def _set_model_value(self, model, value):
"""
This method implements how to store the value of a parameter on the
model instance.
Currently there is only one storage mechanism (via the ._parameters
array) but other mechanisms may be desireable, in which case really the
model class itself should dictate this and *not* `Parameter` itself.
"""
def _update_parameter_value(model, name, value):
# TODO: Maybe handle exception on invalid input shape
param_metrics = model._param_metrics[name]
param_slice = param_metrics['slice']
param_shape = param_metrics['shape']
param_size = np.prod(param_shape)
if np.size(value) != param_size:
raise InputParameterError(
"Input value for parameter {0!r} does not have {1} elements "
"as the current value does".format(name, param_size))
model._parameters[param_slice] = np.array(value).ravel()
_update_parameter_value(model, self._name, value)
if hasattr(model, "_param_map"):
submodel_ind, param_name = model._param_map[self._name]
if hasattr(model._submodels[submodel_ind], "_param_metrics"):
_update_parameter_value(model._submodels[submodel_ind], param_name, value)
@staticmethod
def _create_value_wrapper(wrapper, model):
"""Wraps a getter/setter function to support optionally passing in
a reference to the model object as the second argument.
If a model is tied to this parameter and its getter/setter supports
a second argument then this creates a partial function using the model
instance as the second argument.
"""
if isinstance(wrapper, np.ufunc):
if wrapper.nin != 1:
raise TypeError("A numpy.ufunc used for Parameter "
"getter/setter may only take one input "
"argument")
elif wrapper is None:
# Just allow non-wrappers to fall through silently, for convenience
return None
else:
inputs, params = get_inputs_and_params(wrapper)
nargs = len(inputs)
if nargs == 1:
pass
elif nargs == 2:
if model is not None:
# Don't make a partial function unless we're tied to a
# specific model instance
model_arg = inputs[1].name
wrapper = functools.partial(wrapper, **{model_arg: model})
else:
raise TypeError("Parameter getter/setter must be a function "
"of either one or two arguments")
return wrapper
def __array__(self, dtype=None):
# Make np.asarray(self) work a little more straightforwardly
arr = np.asarray(self.value, dtype=dtype)
if self.unit is not None:
arr = Quantity(arr, self.unit, copy=False)
return arr
def __bool__(self):
if self._model is None:
return True
else:
return bool(self.value)
__add__ = _binary_arithmetic_operation(operator.add)
__radd__ = _binary_arithmetic_operation(operator.add, reflected=True)
__sub__ = _binary_arithmetic_operation(operator.sub)
__rsub__ = _binary_arithmetic_operation(operator.sub, reflected=True)
__mul__ = _binary_arithmetic_operation(operator.mul)
__rmul__ = _binary_arithmetic_operation(operator.mul, reflected=True)
__pow__ = _binary_arithmetic_operation(operator.pow)
__rpow__ = _binary_arithmetic_operation(operator.pow, reflected=True)
__div__ = _binary_arithmetic_operation(operator.truediv)
__rdiv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__truediv__ = _binary_arithmetic_operation(operator.truediv)
__rtruediv__ = _binary_arithmetic_operation(operator.truediv, reflected=True)
__eq__ = _binary_comparison_operation(operator.eq)
__ne__ = _binary_comparison_operation(operator.ne)
__lt__ = _binary_comparison_operation(operator.lt)
__gt__ = _binary_comparison_operation(operator.gt)
__le__ = _binary_comparison_operation(operator.le)
__ge__ = _binary_comparison_operation(operator.ge)
__neg__ = _unary_arithmetic_operation(operator.neg)
__abs__ = _unary_arithmetic_operation(operator.abs)
def param_repr_oneline(param):
"""
Like array_repr_oneline but works on `Parameter` objects and supports
rendering parameters with units like quantities.
"""
out = array_repr_oneline(param.value)
if param.unit is not None:
out = '{0} {1!s}'.format(out, param.unit)
return out
|
c66b04270b93ef12ee28868f7d91cc15de70e26ea6bca2dac8091bf90bf84456 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
import abc
import copy
import copyreg
import inspect
import functools
import operator
import sys
import types
import warnings
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
from itertools import chain, islice
import numpy as np
from ..utils import indent, isinstancemethod, metadata
from ..table import Table
from ..units import Quantity, UnitsError, dimensionless_unscaled
from ..units.utils import quantity_asanyarray
from ..utils import (sharedmethod, find_current_module,
InheritDocstrings, OrderedDescriptorContainer,
check_broadcast, IncompatibleShapeError, isiterable)
from ..utils.codegen import make_function_with_signature
from ..utils.exceptions import AstropyDeprecationWarning
from .utils import (combine_labels, make_binary_operator_eval,
ExpressionTree, AliasDict, get_inputs_and_params,
_BoundingBox, _combine_equivalency_dict)
from ..nddata.utils import add_array, extract_array
from .parameters import Parameter, InputParameterError, param_repr_oneline
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'custom_model', 'ModelDefinitionError']
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions"""
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
Any additional keyword arguments passed in are passed to
`_CompoundModelMeta._from_operator`.
"""
# Note: Originally this used functools.partial, but that won't work when
# used in the class definition of _CompoundModelMeta since
# _CompoundModelMeta has not been defined yet.
# Perform an arithmetic operation on two models.
return lambda left, right: _CompoundModelMeta._from_operator(oper,
left, right, **kwargs)
class _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
_parameters_ = OrderedDict()
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
return super().__new__(mcls, name, bases, members)
def __init__(cls, name, bases, members):
# Make sure OrderedDescriptorContainer gets to run before doing
# anything else
super().__init__(name, bases, members)
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(cls._parameters_)
else:
cls.param_names = tuple(cls._parameters_)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
cls._handle_special_methods(members)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
else:
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def n_inputs(cls):
return len(cls.inputs)
@property
def n_outputs(cls):
return len(cls.outputs)
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name):
"""
Creates a copy of this model class with a new name.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class '__main__.SkyRotation'>
Name: SkyRotation (Rotation2D)
Inputs: ('x', 'y')
Outputs: ('x', 'y')
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
new_cls = type(name, (cls,), {})
new_cls.__module__ = modname
if hasattr(cls, '__qualname__'):
if new_cls.__module__ == '__main__':
# __main__ is not added to a class's qualified name
new_cls.__qualname__ = name
else:
new_cls.__qualname__ = '{0}.{1}'.format(modname, name)
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
# TODO: Maybe warn in the above case?
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__ = make_function_with_signature(__call__, ('self',), kwargs)
return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'inputs' in members and
isinstance(members['inputs'], tuple)):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
inputs = members['inputs']
args = ('self',) + inputs
new_call = make_function_with_signature(
__call__, args, [('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None)])
# The following makes it look like __call__ was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional arguments
if all(p.default is not None for p in cls._parameters_.values()):
args = ('self',)
kwargs = []
for param_name in cls.param_names:
default = cls._parameters_[param_name].default
unit = cls._parameters_[param_name].unit
# If the unit was specified in the parameter but the default
# is not a Quantity, attach the unit to the default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + cls.param_names
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
else:
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('Inputs', cls.inputs),
('Outputs', cls.outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
This class sets the constraints and other properties for all individual
parameters and performs parameter validation.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
Dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list of length 2
giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
inputs = ()
"""The name(s) of the input variable(s) on which a model is evaluated."""
outputs = ()
"""The name(s) of the output(s) of the model."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# Enforce strict units on inputs to evaluate. If this is set to True, input
# values to evaluate have to be in the exact right units specified by
# input_units. In this case, if the input quantities are convertible to
# input_units, they are converted.
input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units.
# Only has an effect if input_units is defined.
input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the model
# inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
if meta is not None:
self.meta = meta
self._name = name
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
# Check whether any of the inputs are quantities
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = None
if with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if self.n_inputs > 1 and bbox is not None:
# bounding_box is in python order - convert it to the order of the inputs
bbox = bbox[::-1]
if bbox is None:
outputs = self.evaluate(*chain(inputs, parameters))
else:
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(inputs[0].shape, dtype=bool)
for ind, inp in enumerate(inputs):
# Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.
axis_ind = np.zeros(inp.shape, dtype=bool)
axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)
nan_ind[axis_ind] = 1
# get an array with indices of valid inputs
valid_ind = np.logical_not(nan_ind).nonzero()
# inputs holds only inputs within the bbox
args = []
for input in inputs:
if not input.shape:
# shape is ()
if nan_ind:
outputs = [fill_value for a in args]
else:
args.append(input)
else:
args.append(input[valid_ind])
valid_result = self.evaluate(*chain(args, parameters))
if self.n_outputs == 1:
valid_result = [valid_result]
# combine the valid results with the ``fill_value`` values
# outside the bbox
result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
# shape is ()
result[ind] = r
else:
result[ind][valid_ind] = r
# format output
if self.n_outputs == 1:
outputs = np.asarray(result[0])
else:
outputs = [np.asarray(r) for r in result]
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
# If input values were quantities, we use return_units to cast
# the return values to the units specified by return_units.
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units[out_name], subok=True)
for out, out_name in zip(outputs, self.outputs)])
if self.n_outputs == 1:
return outputs[0]
else:
return outputs
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def n_inputs(self):
"""
The number of inputs to this model.
Equivalent to ``len(model.inputs)``.
"""
return len(self.inputs)
@property
def n_outputs(self):
"""
The number of outputs from this model.
Equivalent to ``len(model.outputs)``.
"""
return len(self.outputs)
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on `Model Sets
<http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
@property
def fixed(self):
"""
A `dict` mapping parameter names to their fixed constraint.
"""
return self._constraints['fixed']
@property
def tied(self):
"""
A `dict` mapping parameter names to their tied constraint.
"""
return self._constraints['tied']
@property
def bounds(self):
"""
A `dict` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples.
"""
return self._constraints['bounds']
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._constraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._constraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "astropy\modeling\core.py", line 980, in bounding_box
"No bounding box is defined for this model (note: the "
NotImplementedError: No bounding box is defined for this model (note:
the bounding box was explicitly disabled for this model; use `del
model.bounding_box` to restore the default bounding box, if one is
defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
else:
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have been
converted to the right units for the data, then the units have been
stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not necessarily
the units of the input data, but are derived from them. Model subclasses
that want fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
return model
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units of
the input data, but are derived from them. Model subclasses that want
fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly, hence
# the call to _set_unit.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a '_parameter_units_for_data_units' method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of the
returned array. If this is not provided (or None), the model will be
evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of this
model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overriden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@input_units.setter
def input_units(self, input_units):
self._input_units = input_units
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the output
of evaluate should be in, and returns a dictionary mapping outputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overriden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
@return_units.setter
def return_units(self, return_units):
self._return_units = return_units
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
# Check that the units are correct, if applicable
if self.input_units is not None:
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
equivalencies,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is because
# some equivalencies are non-linear, and we need to be
# sure that we evaluate the model in its own frame
# of reference. If input_units_strict is set, we also
# need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict:
inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("Units of input '{0}', {1} ({2}), could not be "
"converted to required dimensionless "
"input".format(self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("Units of input '{0}', {1} ({2}), could not be "
"converted to required input units of "
"{3} ({4})".format(self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless and
input_unit is not dimensionless_unscaled and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("Units of input '{0}', (dimensionless), could not be "
"converted to required input units of "
"{1} ({2})".format(self.inputs[i], input_unit,
input_unit.physical_type))
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def prepare_outputs(self, format_info, *outputs, **kwargs):
if len(self) == 1:
return _prepare_outputs_single_model(self, outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
@sharedmethod
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
# *** Internal methods ***
@sharedmethod
def _from_existing(self, existing, param_names):
"""
Creates a new instance of ``cls`` that shares its underlying parameter
values with an existing model instance given by ``existing``.
This is used primarily by compound models to return a view of an
individual component of a compound model. ``param_names`` should be
the names of the parameters in the *existing* model to use as the
parameters in this new model. Its length should equal the number of
parameters this model takes, so that it can map parameters on the
existing model to parameters on this model one-to-one.
"""
# Basically this is an alternative __init__
if isinstance(self, type):
# self is a class, not an instance
needs_initialization = True
dummy_args = (0,) * len(param_names)
self = self.__new__(self, *dummy_args)
else:
needs_initialization = False
self = self.copy()
aliases = dict(zip(self.param_names, param_names))
# This is basically an alternative _initialize_constraints
constraints = {}
for cons_type in self.parameter_constraints:
orig = existing._constraints[cons_type]
constraints[cons_type] = AliasDict(orig, aliases)
self._constraints = constraints
self._n_models = existing._n_models
self._model_set_axis = existing._model_set_axis
self._parameters = existing._parameters
self._param_metrics = defaultdict(dict)
for param_a, param_b in aliases.items():
# Take the param metrics info for the giving parameters in the
# existing model, and hand them to the appropriate parameters in
# the new model
self._param_metrics[param_a] = existing._param_metrics[param_b]
if needs_initialization:
self.__init__(*dummy_args)
return self
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
if hasattr(self, '_constraints'):
# Skip constraint initialization if it has already been handled via
# an alternate initialization
return
self._constraints = {}
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
param = getattr(self, param_name)
# Parameters don't have all constraint types
value = getattr(param, constraint)
if value is not None:
self._constraints[constraint][param_name] = value
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._constraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
if hasattr(self, '_parameters'):
# Skip parameter initialization if it has already been handled via
# an alternate initialization
return
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = {}
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=float)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[param_name] = quantity_asanyarray(value, dtype=float)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name, value in params.items():
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(params, max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(params, None)
self._n_models = n_models
self._initialize_parameter_values(params)
def _initialize_parameter_values(self, params):
# self._param_metrics should have been initialized in
# self._initialize_parameters
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
unit = None
param_descr = getattr(self, name)
if params.get(name) is None:
default = param_descr.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
"{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, name))
value = params[name] = default
unit = param_descr.unit
else:
value = params[name]
if isinstance(value, Quantity):
unit = value.unit
else:
unit = None
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
if unit is None and param_descr.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, name))
param_metrics[name]['orig_unit'] = unit
param_metrics[name]['raw_unit'] = None
if param_descr._setter is not None:
_val = param_descr._setter(value)
if isinstance(_val, Quantity):
param_metrics[name]['raw_unit'] = _val.unit
else:
param_metrics[name]['raw_unit'] = None
total_size += param_size
self._param_metrics = param_metrics
self._parameters = np.empty(total_size, dtype=np.float64)
# Now set the parameter values (this will also fill
# self._parameters)
# TODO: This is a bit ugly, but easier to deal with than how this was
# done previously. There's still lots of opportunity for refactoring
# though, in particular once we move the _get/set_model_value methods
# out of Parameter and into Model (renaming them
# _get/set_parameter_value)
for name, value in params.items():
# value here may be a Quantity object.
param_descr = getattr(self, name)
unit = param_descr.unit
value = np.array(value)
orig_unit = param_metrics[name]['orig_unit']
if param_descr._setter is not None:
if unit is not None:
value = np.asarray(param_descr._setter(value * orig_unit).value)
else:
value = param_descr._setter(value)
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
for name in params:
param_descr = getattr(self, name)
param_descr.validator(param_descr.value)
def _check_param_broadcast(self, params, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
param_names = []
model_set_axis = self._model_set_axis
for name in self.param_names:
# Previously this just used iteritems(params), but we loop over all
# param_names instead just to ensure some determinism in the
# ordering behavior
if name not in params:
continue
value = params[name]
param_names.append(name)
# We've already checked that each parameter array is compatible in
# the model_set_axis dimension, but now we need to check the
# dimensions excluding that axis
# Split the array dimensions into the axes before model_set_axis
# and after model_set_axis
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = param_names[shape_a_idx]
param_b = param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
param_metrics = self._param_metrics
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw:
value = param._raw_value
else:
value = param.value
broadcast_shape = param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and self._param_metrics[name]['raw_unit'] is not None:
unit = self._param_metrics[name]['raw_unit']
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
# TODO: Returning an array from this method may be entirely pointless
# for internal use--perhaps only the external param_sets method should
# return an array (and just for backwards compat--I would prefer to
# maybe deprecate that method)
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# TODO: I think this could be reworked to preset model sets better
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] != value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[]):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords + keywords
if value is not None]
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x', 'y')
outputs = ('z',)
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
# TODO: Support a couple unary operators--at least negation?
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
class _CompoundModelMeta(_ModelMeta):
_tree = None
_submodels = None
_submodel_names = None
_nextid = 0
_param_names = None
# _param_map is a mapping of the compound model's generated param names to
# the parameters of submodels they are associated with. The values in this
# mapping are (idx, name) tuples were idx is the index of the submodel this
# parameter is associated with, and name is the same parameter's name on
# the submodel
# In principle this will allow compound models to give entirely new names
# to parameters that don't have to be the same as their original names on
# the submodels, but right now that isn't taken advantage of
_param_map = None
_slice_offset = 0
# When taking slices of a compound model, this keeps track of how offset
# the first model in the slice is from the first model in the original
# compound model it was taken from
# This just inverts _param_map, swapping keys with values. This is also
# useful to have.
_param_map_inverse = None
_fittable = None
_evaluate = None
def __getitem__(cls, index):
index = cls._normalize_index(index)
if isinstance(index, (int, np.integer)):
return cls._get_submodels()[index]
else:
return cls._get_slice(index.start, index.stop)
def __getattr__(cls, attr):
# Make sure the _tree attribute is set; otherwise we are not looking up
# an attribute on a concrete compound model class and should just raise
# the AttributeError
if cls._tree is not None and attr in cls.param_names:
cls._init_param_descriptors()
return getattr(cls, attr)
raise AttributeError(attr)
def __repr__(cls):
if cls._tree is None:
# This case is mostly for debugging purposes
return cls._format_cls_repr()
expression = cls._format_expression()
components = cls._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return cls._format_cls_repr(keywords=keywords)
def __dir__(cls):
"""
Returns a list of attributes defined on a compound model, including
all of its parameters.
"""
basedir = super().__dir__()
if cls._tree is not None:
for name in cls.param_names:
basedir.append(name)
basedir.sort()
return basedir
def __reduce__(cls):
rv = super().__reduce__()
if isinstance(rv, tuple):
# Delete _evaluate from the members dict
with suppress(KeyError):
del rv[1][2]['_evaluate']
return rv
@property
def submodel_names(cls):
if cls._submodel_names is None:
seen = {}
names = []
for idx, submodel in enumerate(cls._get_submodels()):
name = str(submodel.name)
if name in seen:
names.append('{0}_{1}'.format(name, idx))
if seen[name] >= 0:
jdx = seen[name]
names[jdx] = '{0}_{1}'.format(names[jdx], jdx)
seen[name] = -1
else:
names.append(name)
seen[name] = idx
cls._submodel_names = tuple(names)
return cls._submodel_names
@property
def param_names(cls):
if cls._param_names is None:
cls._init_param_names()
return cls._param_names
@property
def fittable(cls):
if cls._fittable is None:
cls._fittable = all(m.fittable for m in cls._get_submodels())
return cls._fittable
# TODO: Maybe we could use make_function_with_signature for evaluate, but
# it's probably not worth it (and I'm not sure what the limit is on number
# of function arguments/local variables but we could break that limit for
# complicated compound models...
def evaluate(cls, *args):
if cls._evaluate is None:
func = cls._tree.evaluate(BINARY_OPERATORS,
getter=cls._model_evaluate_getter)[0]
cls._evaluate = func
inputs = args[:cls.n_inputs]
params = iter(args[cls.n_inputs:])
result = cls._evaluate(inputs, params)
if cls.n_outputs == 1:
return result[0]
else:
return result
# TODO: This supports creating a new compound model from two existing
# compound models (or normal models) and a single operator. However, it
# ought also to be possible to create a new model from an *entire*
# expression, represented as a sequence of operators and their operands (or
# an exiting ExpressionTree) and build that into a compound model without
# creating an intermediate _CompoundModel class for every single operator
# in the expression. This will prove to be a useful optimization in many
# cases
@classmethod
def _from_operator(mcls, operator, left, right, additional_members={}):
"""
Given a Python operator (represented by a string, such as ``'+'``
or ``'*'``, and two model classes or instances, return a new compound
model that evaluates the given operator on the outputs of the left and
right input models.
If either of the input models are a model *class* (i.e. a subclass of
`~astropy.modeling.Model`) then the returned model is a new subclass of
`~astropy.modeling.Model` that may be instantiated with any parameter
values. If both input models are *instances* of a model, a new class
is still created, but this method returns an *instance* of that class,
taking the parameter values from the parameters of the input model
instances.
If given, the ``additional_members`` `dict` may provide additional
class members that should be added to the generated
`~astropy.modeling.Model` subclass. Some members that are generated by
this method should not be provided by ``additional_members``. These
include ``_tree``, ``inputs``, ``outputs``, ``linear``,
``standard_broadcasting``, and ``__module__`. This is currently for
internal use only.
"""
# Note, currently this only supports binary operators, but could be
# easily extended to support unary operators (namely '-') if/when
# needed
children = []
for child in (left, right):
if isinstance(child, (_CompoundModelMeta, _CompoundModel)):
"""
Although the original child models were copied we make another
copy here to ensure that changes in this child compound model
parameters will not propagate to the reuslt, that is
cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()
cm2 = cm1 | Scale()
cm1.amplitude_0 = 100
assert(cm2.amplitude_0 == 1)
"""
children.append(copy.deepcopy(child._tree))
elif isinstance(child, Model):
children.append(ExpressionTree(child.copy()))
else:
children.append(ExpressionTree(child))
tree = ExpressionTree(operator, left=children[0], right=children[1])
name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))
_CompoundModelMeta._nextid += 1
mod = find_current_module(3)
if mod:
modname = mod.__name__
else:
modname = '__main__'
inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)
if operator in ('|', '+', '-'):
linear = left.linear and right.linear
else:
# Which is not to say it is *definitely* not linear but it would be
# trickier to determine
linear = False
standard_broadcasting = \
left.standard_broadcasting and right.standard_broadcasting
# Note: If any other members are added here, make sure to mention them
# in the docstring of this method.
members = additional_members
members.update({
'_tree': tree,
'_is_dynamic': True, # See docs for _ModelMeta._is_dynamic
'inputs': inputs,
'outputs': outputs,
'linear': linear,
'standard_broadcasting': standard_broadcasting,
'__module__': str(modname)})
new_cls = mcls(name, (_CompoundModel,), members)
if isinstance(left, Model) and isinstance(right, Model):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
instance = new_cls()
# Workaround for https://github.com/astropy/astropy/issues/3542
# TODO: Any effort to restructure the tree-like data structure for
# compound models should try to obviate this workaround--if
# intermediate compound models are stored in the tree as well then
# we can immediately check for custom inverses on sub-models when
# computing the inverse
instance._user_inverse = mcls._make_user_inverse(
operator, left, right)
if left._n_models == right._n_models:
instance._n_models = left._n_models
else:
raise ValueError('Model sets must have the same number of '
'components.')
return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@classmethod
def _check_inputs_and_outputs(mcls, operator, left, right):
# TODO: These aren't the full rules for handling inputs and outputs, but
# this will handle most basic cases correctly
if operator == '|':
inputs = left.inputs
outputs = right.outputs
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
elif operator == '&':
inputs = combine_labels(left.inputs, right.inputs)
outputs = combine_labels(left.outputs, right.outputs)
else:
# Without loss of generality
inputs = left.inputs
outputs = left.outputs
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
"Unsupported operands for {0}: {1} (n_inputs={2}, "
"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator".format(
operator, left.name, left.n_inputs, left.n_outputs,
right.name, right.n_inputs, right.n_outputs))
return inputs, outputs
@classmethod
def _make_user_inverse(mcls, operator, left, right):
"""
Generates an inverse `Model` for this `_CompoundModel` when either
model in the operation has a *custom inverse* that was manually
assigned by the user.
If either model has a custom inverse, and in particular if another
`_CompoundModel` has a custom inverse, then none of that model's
sub-models should be considered at all when computing the inverse.
So in that case we just compute the inverse ahead of time and set
it as the new compound model's custom inverse.
Note, this use case only applies when combining model instances,
since model classes don't currently have a notion of a "custom
inverse" (though it could probably be supported by overriding the
class's inverse property).
TODO: Consider fixing things so the aforementioned class-based case
works as well. However, for the present purposes this is good enough.
"""
if not (operator in ('&', '|') and
(left._user_inverse or right._user_inverse)):
# These are the only operators that support an inverse right now
return None
try:
left_inv = left.inverse
right_inv = right.inverse
except NotImplementedError:
# If either inverse is undefined then just return False; this
# means the normal _CompoundModel.inverse routine will fail
# naturally anyways, since it requires all sub-models to have
# an inverse defined
return None
if operator == '&':
return left_inv & right_inv
else:
return right_inv | left_inv
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
# Would make this a lazyproperty but those don't currently work with
# type objects
if cls._submodels is not None:
return cls._submodels
submodels = [c.value for c in cls._tree.traverse_postorder()
if c.isleaf]
cls._submodels = submodels
return submodels
def _init_param_descriptors(cls):
"""
This routine sets up the names for all the parameters on a compound
model, including figuring out unique names for those parameters and
also mapping them back to their associated parameters of the underlying
submodels.
Setting this all up is costly, and only necessary for compound models
that a user will directly interact with. For example when building an
expression like::
>>> M = (Model1 + Model2) * Model3 # doctest: +SKIP
the user will generally never interact directly with the temporary
result of the subexpression ``(Model1 + Model2)``. So there's no need
to setup all the parameters for that temporary throwaway. Only once
the full expression is built and the user initializes or introspects
``M`` is it necessary to determine its full parameterization.
"""
# Accessing cls.param_names will implicitly call _init_param_names if
# needed and thus also set up the _param_map; I'm not crazy about that
# design but it stands for now
for param_name in cls.param_names:
submodel_idx, submodel_param = cls._param_map[param_name]
submodel = cls[submodel_idx]
orig_param = getattr(submodel, submodel_param, None)
if isinstance(submodel, Model):
# Take the parameter's default from the model's value for that
# parameter
default = orig_param.value
else:
default = orig_param.default
# Copy constraints
constraints = dict((key, getattr(orig_param, key))
for key in Model.parameter_constraints)
# Note: Parameter.copy() returns a new unbound Parameter, never
# a bound Parameter even if submodel is a Model instance (as
# opposed to a Model subclass)
new_param = orig_param.copy(name=param_name, default=default,
unit=orig_param.unit,
**constraints)
setattr(cls, param_name, new_param)
def _init_param_names(cls):
"""
This subroutine is solely for setting up the ``param_names`` attribute
itself.
See ``_init_param_descriptors`` for the full parameter setup.
"""
# Currently this skips over Model *instances* in the expression tree;
# basically these are treated as constants and do not add
# fittable/tunable parameters to the compound model.
# TODO: I'm not 100% happy with this design, and maybe we need some
# interface for distinguishing fittable/settable parameters with
# *constant* parameters (which would be distinct from parameters with
# fixed constraints since they're permanently locked in place). But I'm
# not sure if this is really the best way to treat the issue.
names = []
param_map = {}
# Start counting the suffix indices to put on parameter names from the
# slice_offset. Usually this will just be zero, but for compound
# models that were sliced from another compound model this may be > 0
param_suffix = cls._slice_offset
for idx, model in enumerate(cls._get_submodels()):
if not model.param_names:
# Skip models that don't have parameters in the numbering
# TODO: Reevaluate this if it turns out to be confusing, though
# parameter-less models are not very common in practice (there
# are a few projections that don't take parameters)
continue
for param_name in model.param_names:
# This is sort of heuristic, but we want to check that
# model.param_name *actually* returns a Parameter descriptor,
# and that the model isn't some inconsistent type that happens
# to have a param_names attribute but does not actually
# implement settable parameters.
# In the future we can probably remove this check, but this is
# here specifically to support the legacy compat
# _CompositeModel which can be considered a pathological case
# in the context of the new framework
# if not isinstance(getattr(model, param_name, None),
# Parameter):
# break
name = '{0}_{1}'.format(param_name, param_suffix + idx)
names.append(name)
param_map[name] = (idx, param_name)
cls._param_names = tuple(names)
cls._param_map = param_map
cls._param_map_inverse = dict((v, k) for k, v in param_map.items())
def _format_expression(cls):
# TODO: At some point might be useful to make a public version of this,
# albeit with more formatting options
return cls._tree.format_expression(OPERATOR_PRECEDENCE)
def _format_components(cls):
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(cls._get_submodels()))
def _normalize_index(cls, index):
"""
Converts an index given to __getitem__ to either an integer, or
a slice with integer start and stop values.
If the length of the slice is exactly 1 this converts the index to a
simple integer lookup.
Negative integers are converted to positive integers.
"""
def get_index_from_name(name):
try:
return cls.submodel_names.index(name)
except ValueError:
raise IndexError(
'Compound model {0} does not have a component named '
'{1}'.format(cls.name, name))
def check_for_negative_index(index):
if index < 0:
new_index = len(cls.submodel_names) + index
if new_index < 0:
# If still < 0 then this is an invalid index
raise IndexError(
"Model index {0} out of range.".format(index))
else:
index = new_index
return index
if isinstance(index, str):
return get_index_from_name(index)
elif isinstance(index, slice):
if index.step not in (1, None):
# In principle it could be but I can scarcely imagine a case
# where it would be useful. If someone can think of one then
# we can enable it.
raise ValueError(
"Step not supported for compound model slicing.")
start = index.start if index.start is not None else 0
stop = (index.stop
if index.stop is not None else len(cls.submodel_names))
if isinstance(start, (int, np.integer)):
start = check_for_negative_index(start)
if isinstance(stop, (int, np.integer)):
stop = check_for_negative_index(stop)
if isinstance(start, str):
start = get_index_from_name(start)
if isinstance(stop, str):
stop = get_index_from_name(stop) + 1
length = stop - start
if length == 1:
return start
elif length <= 0:
raise ValueError("Empty slice of a compound model.")
return slice(start, stop)
elif isinstance(index, (int, np.integer)):
if index >= len(cls.submodel_names):
raise IndexError(
"Model index {0} out of range.".format(index))
return check_for_negative_index(index)
raise TypeError(
'Submodels can be indexed either by their integer order or '
'their name (got {0!r}).'.format(index))
def _get_slice(cls, start, stop):
"""
Return a new model build from a sub-expression of the expression
represented by this model.
Right now this is highly inefficient, as it creates a new temporary
model for each operator that appears in the sub-expression. It would
be better if this just built a new expression tree, and the new model
instantiated directly from that tree.
Once tree -> model instantiation is possible this should be fixed to
use that instead.
"""
members = {'_slice_offset': cls._slice_offset + start}
operators = dict((oper, _model_oper(oper, additional_members=members))
for oper in BINARY_OPERATORS)
return cls._tree.evaluate(operators, start=start, stop=stop)
@staticmethod
def _model_evaluate_getter(idx, model):
n_params = len(model.param_names)
n_inputs = model.n_inputs
n_outputs = model.n_outputs
# There is currently an unfortunate inconsistency in some models, which
# requires them to be instantiated for their evaluate to work. I think
# that needs to be reconsidered and fixed somehow, but in the meantime
# we need to check for that case
if (not isinstance(model, Model) and
isinstancemethod(model, model.evaluate)):
if n_outputs == 1:
# Where previously model was a class, now make an instance
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return (model(*param_values).evaluate(
*chain(inputs, param_values)),)
else:
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return model(*param_values).evaluate(
*chain(inputs, param_values))
else:
evaluate = model.evaluate
if n_outputs == 1:
f = lambda inputs, params: \
(evaluate(*chain(inputs, islice(params, n_params))),)
else:
f = lambda inputs, params: \
evaluate(*chain(inputs, islice(params, n_params)))
return (f, n_inputs, n_outputs)
class _CompoundModel(Model, metaclass=_CompoundModelMeta):
fit_deriv = None
col_fit_deriv = False
_submodels = None
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def __getattr__(self, attr):
# This __getattr__ is necessary, because _CompoundModelMeta creates
# Parameter descriptors *lazily*--they do not exist in the class
# __dict__ until one of them has been accessed.
# However, this is at odds with how Python looks up descriptors (see
# (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)
# which is to look directly in the class __dict__
# This workaround allows descriptors to work correctly when they are
# not initially found in the class __dict__
value = getattr(self.__class__, attr)
if hasattr(value, '__get__'):
# Object is a descriptor, so we should really return the result of
# its __get__
value = value.__get__(self, self.__class__)
return value
def __getitem__(self, index):
index = self.__class__._normalize_index(index)
model = self.__class__[index]
if isinstance(index, slice):
param_names = model.param_names
else:
param_map = self.__class__._param_map_inverse
param_names = tuple(param_map[index, name]
for name in model.param_names)
return model._from_existing(self, param_names)
@property
def submodel_names(self):
return self.__class__.submodel_names
@sharedmethod
def n_submodels(self):
return len(self.submodel_names)
@property
def param_names(self):
return self.__class__.param_names
@property
def fittable(self):
return self.__class__.fittable
@sharedmethod
def evaluate(self, *args):
return self.__class__.evaluate(*args)
# TODO: The way this works is highly inefficient--the inverse is created by
# making a new model for each operator in the compound model, which could
# potentially mean creating a large number of temporary throwaway model
# classes. This can definitely be optimized in the future by implementing
# a way to construct a single model class from an existing tree
@property
def inverse(self):
def _not_implemented(oper):
def _raise(x, y):
raise NotImplementedError(
"The inverse is not currently defined for compound "
"models created using the {0} operator.".format(oper))
return _raise
operators = dict((oper, _not_implemented(oper))
for oper in ('+', '-', '*', '/', '**'))
operators['&'] = operator.and_
# Reverse the order of compositions
operators['|'] = lambda x, y: operator.or_(y, x)
leaf_idx = -1
def getter(idx, model):
try:
# By indexing on self[] this will return an instance of the
# model, with all the appropriate parameters set, which is
# currently required to return an inverse
return self[idx].inverse
except NotImplementedError:
raise NotImplementedError(
"All models in a composite model must have an inverse "
"defined in order for the composite model to have an "
"inverse. {0!r} does not have an inverse.".format(model))
return self._tree.evaluate(operators, getter=getter)
@sharedmethod
def _get_submodels(self):
return self.__class__._get_submodels()
def _parameter_units_for_data_units(self, input_units, output_units):
units_for_data = {}
for imodel, model in enumerate(self._submodels):
units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)
for param_sub in units_for_data_sub:
param = self._param_map_inverse[(imodel, param_sub)]
units_for_data[param] = units_for_data_sub[param_sub]
return units_for_data
def deepcopy(self):
"""
Return a deep copy of a compound model.
"""
new_model = self.copy()
new_model._submodels = [model.deepcopy() for model in self._submodels]
return new_model
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = dict((param.name, Parameter(param.name, default=param.default))
for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'inputs': tuple(x.name for x in inputs),
'outputs': output_names,
'evaluate': staticmethod(func),
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from ``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set, coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
else:
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(model, outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = np.asscalar(output)
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,
**kwargs):
reshaped = []
pivots = []
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis] +
_input.shape[model_set_axis + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape, param.shape)
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(param.shape) > len(max_param_shape):
max_param_shape = param.shape
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model.model_set_axis
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info):
pivots = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model.model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model.model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
if not (validate_broadcasting or check_model_set_axis):
# Nothing else needed here
return
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
elif input_shape[model_set_axis] != n_models:
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argnames[idx], model_set_axis,
n_models))
all_shapes.append(input_shape)
if not validate_broadcasting:
return
try:
input_broadcast = check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot "
"be broadcast with input {2!r} of shape {3!r}".format(
arg_a, shape_a, arg_b, shape_b))
return input_broadcast
copyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)
copyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)
|
cd555bcec03ec1b3b139ade2664745a66c4e4948c7bf9253ebb46d8abe526664 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Creates a common namespace for all pre-defined models.
"""
from .core import custom_model # pylint: disable=W0611
from .mappings import *
from .projections import *
from .rotations import *
from .polynomial import *
from .functional_models import *
from .powerlaws import *
from .tabular import *
from .blackbody import BlackBody1D
"""
Attach a docstring explaining constraints to all models which support them.
Note: add new models to this list
"""
CONSTRAINTS_DOC = """
Other Parameters
----------------
fixed : a dict
A dictionary ``{parameter_name: boolean}`` of parameters to not be
varied during fitting. True means the parameter is held fixed.
Alternatively the `~astropy.modeling.Parameter.fixed`
property of a parameter may be used.
tied : dict
A dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship. Alternatively the
`~astropy.modeling.Parameter.tied` property of a parameter
may be used.
bounds : dict
A dictionary ``{parameter_name: boolean}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list of length 2
giving the desired range for the parameter. Alternatively the
`~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` properties of a parameter
may be used.
eqcons : list
A list of functions of length ``n`` such that ``eqcons[j](x0,*args) ==
0.0`` in a successfully optimized problem.
ineqcons : list
A list of functions of length ``n`` such that ``ieqcons[j](x0,*args) >=
0.0`` is a successfully optimized problem.
"""
MODELS_WITH_CONSTRAINTS = [
AiryDisk2D, Moffat1D, Moffat2D, Box1D, Box2D,
Const1D, Const2D, Ellipse2D, Disk2D,
Gaussian1D, Gaussian2D,
Linear1D, Lorentz1D, MexicanHat1D, MexicanHat2D,
PowerLaw1D, Sersic1D, Sersic2D, Sine1D, Trapezoid1D, TrapezoidDisk2D,
Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre2D, Legendre1D,
Polynomial1D, Polynomial2D, Voigt1D
]
for item in MODELS_WITH_CONSTRAINTS:
if isinstance(item.__doc__, str):
item.__doc__ += CONSTRAINTS_DOC
|
9fadff46cf02d5fb3ed3ad4f9fb967c54f2fea92b03c81666e4ec93df0eb0231 | """
Special models useful for complex compound models where control is needed over
which outputs from a source model are mapped to which inputs of a target model.
"""
from .core import FittableModel
__all__ = ['Mapping', 'Identity']
class Mapping(FittableModel):
"""
Allows inputs to be reordered, duplicated or dropped.
Parameters
----------
mapping : tuple
A tuple of integers representing indices of the inputs to this model
to return and in what order to return them. See
:ref:`compound-model-mappings` for more details.
n_inputs : int
Number of inputs; if `None` (default) then ``max(mapping) + 1`` is
used (i.e. the highest input index used in the mapping).
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Raises
------
TypeError
Raised when number of inputs is less that ``max(mapping)``.
Examples
--------
>>> from astropy.modeling.models import Polynomial2D, Shift, Mapping
>>> poly1 = Polynomial2D(1, c0_0=1, c1_0=2, c0_1=3)
>>> poly2 = Polynomial2D(1, c0_0=1, c1_0=2.4, c0_1=2.1)
>>> model = (Shift(1) & Shift(2)) | Mapping((0, 1, 0, 1)) | (poly1 & poly2)
>>> model(1, 2) # doctest: +FLOAT_CMP
(17.0, 14.2)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, mapping, n_inputs=None, name=None, meta=None):
if n_inputs is None:
self._inputs = tuple('x' + str(idx)
for idx in range(max(mapping) + 1))
else:
self._inputs = tuple('x' + str(idx)
for idx in range(n_inputs))
self._outputs = tuple('x' + str(idx) for idx in range(len(mapping)))
self._mapping = mapping
super().__init__(name=name, meta=meta)
@property
def inputs(self):
"""
The name(s) of the input variable(s) on which a model is evaluated.
"""
return self._inputs
@property
def outputs(self):
"""The name(s) of the output(s) of the model."""
return self._outputs
@property
def mapping(self):
"""Integers representing indices of the inputs."""
return self._mapping
def __repr__(self):
if self.name is None:
return '<Mapping({0})>'.format(self.mapping)
else:
return '<Mapping({0}, name={1})>'.format(self.mapping, self.name)
def evaluate(self, *args):
if len(args) != self.n_inputs:
name = self.name if self.name is not None else "Mapping"
raise TypeError('{0} expects {1} inputs; got {2}'.format(
name, self.n_inputs, len(args)))
result = tuple(args[idx] for idx in self._mapping)
if self.n_outputs == 1:
return result[0]
return result
@property
def inverse(self):
"""
A `Mapping` representing the inverse of the current mapping.
Raises
------
`NotImplementedError`
An inverse does no exist on mappings that drop some of its inputs
(there is then no way to reconstruct the inputs that were dropped).
"""
try:
mapping = tuple(self.mapping.index(idx)
for idx in range(self.n_inputs))
except ValueError:
raise NotImplementedError(
"Mappings such as {0} that drop one or more of their inputs "
"are not invertible at this time.".format(self.mapping))
inv = self.__class__(mapping)
inv._inputs = self._outputs
inv._outputs = self._inputs
return inv
class Identity(Mapping):
"""
Returns inputs unchanged.
This class is useful in compound models when some of the inputs must be
passed unchanged to the next model.
Parameters
----------
n_inputs : int
Specifies the number of inputs this identity model accepts.
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict-like
Free-form metadata to associate with this model.
Examples
--------
Transform ``(x, y)`` by a shift in x, followed by scaling the two inputs::
>>> from astropy.modeling.models import (Polynomial1D, Shift, Scale,
... Identity)
>>> model = (Shift(1) & Identity(1)) | Scale(1.2) & Scale(2)
>>> model(1,1) # doctest: +FLOAT_CMP
(2.4, 2.0)
>>> model.inverse(2.4, 2) # doctest: +FLOAT_CMP
(1.0, 1.0)
"""
linear = True # FittableModel is non-linear by default
def __init__(self, n_inputs, name=None, meta=None):
mapping = tuple(range(n_inputs))
super().__init__(mapping, name=name, meta=meta)
def __repr__(self):
if self.name is None:
return '<Identity({0})>'.format(self.n_inputs)
else:
return '<Identity({0}, name={1})>'.format(self.n_inputs, self.name)
@property
def inverse(self):
"""
The inverse transformation.
In this case of `Identity`, ``self.inverse is self``.
"""
return self
|
3ebdbd6e608be84baae96e7a7fc3cf12837f0ccd4250027c217c45d77099728a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Model and functions related to blackbody radiation.
.. _blackbody-planck-law:
Blackbody Radiation
-------------------
Blackbody flux is calculated with Planck law
(:ref:`Rybicki & Lightman 1979 <ref-rybicki1979>`):
.. math::
B_{\\lambda}(T) = \\frac{2 h c^{2} / \\lambda^{5}}{exp(h c / \\lambda k T) - 1}
B_{\\nu}(T) = \\frac{2 h \\nu^{3} / c^{2}}{exp(h \\nu / k T) - 1}
where the unit of :math:`B_{\\lambda}(T)` is
:math:`erg \\; s^{-1} cm^{-2} \\mathring{A}^{-1} sr^{-1}`, and
:math:`B_{\\nu}(T)` is :math:`erg \\; s^{-1} cm^{-2} Hz^{-1} sr^{-1}`.
:func:`~astropy.modeling.blackbody.blackbody_lambda` and
:func:`~astropy.modeling.blackbody.blackbody_nu` calculate the
blackbody flux for :math:`B_{\\lambda}(T)` and :math:`B_{\\nu}(T)`,
respectively.
For blackbody representation as a model, see :class:`BlackBody1D`.
.. _blackbody-examples:
Examples
^^^^^^^^
>>> import numpy as np
>>> from astropy import units as u
>>> from astropy.modeling.blackbody import blackbody_lambda, blackbody_nu
Calculate blackbody flux for 5000 K at 100 and 10000 Angstrom while suppressing
any Numpy warnings:
>>> wavelengths = [100, 10000] * u.AA
>>> temperature = 5000 * u.K
>>> with np.errstate(all='ignore'):
... flux_lam = blackbody_lambda(wavelengths, temperature)
... flux_nu = blackbody_nu(wavelengths, temperature)
>>> flux_lam # doctest: +FLOAT_CMP
<Quantity [ 1.27452545e-108, 7.10190526e+005] erg / (Angstrom cm2 s sr)>
>>> flux_nu # doctest: +FLOAT_CMP
<Quantity [ 4.25135927e-123, 2.36894060e-005] erg / (cm2 Hz s sr)>
Plot a blackbody spectrum for 5000 K:
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as const
from astropy import units as u
from astropy.modeling.blackbody import blackbody_lambda
temperature = 5000 * u.K
wavemax = (const.b_wien / temperature).to(u.AA) # Wien's displacement law
waveset = np.logspace(
0, np.log10(wavemax.value + 10 * wavemax.value), num=1000) * u.AA
with np.errstate(all='ignore'):
flux = blackbody_lambda(waveset, temperature)
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(waveset.value, flux.value)
ax.axvline(wavemax.value, ls='--')
ax.get_yaxis().get_major_formatter().set_powerlimits((0, 1))
ax.set_xlabel(r'$\\lambda$ ({0})'.format(waveset.unit))
ax.set_ylabel(r'$B_{\\lambda}(T)$')
ax.set_title('Blackbody, T = {0}'.format(temperature))
Note that an array of temperatures can also be given instead of a single
temperature. In this case, the Numpy broadcasting rules apply: for instance, if
the frequency and temperature have the same shape, the output will have this
shape too, while if the frequency is a 2-d array with shape ``(n, m)`` and the
temperature is an array with shape ``(m,)``, the output will have a shape
``(n, m)``.
See Also
^^^^^^^^
.. _ref-rybicki1979:
Rybicki, G. B., & Lightman, A. P. 1979, Radiative Processes in Astrophysics (New York, NY: Wiley)
"""
import warnings
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter
from .. import constants as const
from .. import units as u
from ..utils.exceptions import AstropyUserWarning
__all__ = ['BlackBody1D', 'blackbody_nu', 'blackbody_lambda']
# Units
FNU = u.erg / (u.cm**2 * u.s * u.Hz)
FLAM = u.erg / (u.cm**2 * u.s * u.AA)
# Some platform implementations of expm1() are buggy and Numpy uses
# them anyways--the bug is that on certain large inputs it returns
# NaN instead of INF like it should (it should only return NaN on a
# NaN input
# See https://github.com/astropy/astropy/issues/4171
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
_has_buggy_expm1 = np.isnan(np.expm1(1000)) or np.isnan(np.expm1(1e10))
class BlackBody1D(Fittable1DModel):
"""
One dimensional blackbody model.
Parameters
----------
temperature : :class:`~astropy.units.Quantity`
Blackbody temperature.
bolometric_flux : :class:`~astropy.units.Quantity`
The bolometric flux of the blackbody (i.e., the integral over the
spectral axis).
Notes
-----
Model formula:
.. math:: f(x) = \\pi B_{\\nu} f_{\\text{bolometric}} / (\\sigma T^{4})
Examples
--------
>>> from astropy.modeling import models
>>> from astropy import units as u
>>> bb = models.BlackBody1D()
>>> bb(6000 * u.AA) # doctest: +FLOAT_CMP
<Quantity 1.3585381201978953e-15 erg / (cm2 Hz s)>
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import BlackBody1D
from astropy.modeling.blackbody import FLAM
from astropy import units as u
from astropy.visualization import quantity_support
bb = BlackBody1D(temperature=5778*u.K)
wav = np.arange(1000, 110000) * u.AA
flux = bb(wav).to(FLAM, u.spectral_density(wav))
with quantity_support():
plt.figure()
plt.semilogx(wav, flux)
plt.axvline(bb.lambda_max.to(u.AA).value, ls='--')
plt.show()
"""
# We parametrize this model with a temperature and a bolometric flux. The
# bolometric flux is the integral of the model over the spectral axis. This
# is more useful than simply having an amplitude parameter.
temperature = Parameter(default=5000, min=0, unit=u.K)
bolometric_flux = Parameter(default=1, unit=u.erg / u.cm ** 2 / u.s)
# We allow values without units to be passed when evaluating the model, and
# in this case the input x values are assumed to be frequencies in Hz.
input_units_allow_dimensionless = True
# We enable the spectral equivalency by default for the spectral axis
input_units_equivalencies = {'x': u.spectral()}
def evaluate(self, x, temperature, bolometric_flux):
"""Evaluate the model.
Parameters
----------
x : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Frequency at which to compute the blackbody. If no units are given,
this defaults to Hz.
temperature : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Temperature of the blackbody. If no units are given, this defaults
to Kelvin.
bolometric_flux : float, `~numpy.ndarray`, or `~astropy.units.Quantity`
Desired integral for the blackbody.
Returns
-------
y : number or ndarray
Blackbody spectrum. The units are determined from the units of
``bolometric_flux``.
"""
# We need to make sure that we attach units to the temperature if it
# doesn't have any units. We do this because even though blackbody_nu
# can take temperature values without units, the / temperature ** 4
# factor needs units to be defined.
if isinstance(temperature, u.Quantity):
temperature = temperature.to(u.K, equivalencies=u.temperature())
else:
temperature = u.Quantity(temperature, u.K)
# We normalize the returned blackbody so that the integral would be
# unity, and we then multiply by the bolometric flux. A normalized
# blackbody has f_nu = pi * B_nu / (sigma * T^4), which is what we
# calculate here. We convert to 1/Hz to make sure the units are
# simplified as much as possible, then we multiply by the bolometric
# flux to get the normalization right.
fnu = ((np.pi * u.sr * blackbody_nu(x, temperature) /
const.sigma_sb / temperature ** 4).to(1 / u.Hz) *
bolometric_flux)
# If the bolometric_flux parameter has no unit, we should drop the /Hz
# and return a unitless value. This occurs for instance during fitting,
# since we drop the units temporarily.
if hasattr(bolometric_flux, 'unit'):
return fnu
else:
return fnu.value
@property
def input_units(self):
# The input units are those of the 'x' value, which should always be
# Hz. Because we do this, and because input_units_allow_dimensionless
# is set to True, dimensionless values are assumed to be in Hz.
return {'x': u.Hz}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('temperature', u.K),
('bolometric_flux', outputs_unit['y'] * u.Hz)])
@property
def lambda_max(self):
"""Peak wavelength when the curve is expressed as power density."""
return const.b_wien / self.temperature
def blackbody_nu(in_x, temperature):
"""Calculate blackbody flux per steradian, :math:`B_{\\nu}(T)`.
.. note::
Use `numpy.errstate` to suppress Numpy warnings, if desired.
.. warning::
Output values might contain ``nan`` and ``inf``.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Hz.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} Hz^{-1} sr^{-1}`.
Raises
------
ValueError
Invalid temperature.
ZeroDivisionError
Wavelength is zero (when converting to frequency).
"""
# Convert to units for calculations, also force double precision
with u.add_enabled_equivalencies(u.spectral() + u.temperature()):
freq = u.Quantity(in_x, u.Hz, dtype=np.float64)
temp = u.Quantity(temperature, u.K, dtype=np.float64)
# Check if input values are physically possible
if np.any(temp < 0):
raise ValueError('Temperature should be positive: {0}'.format(temp))
if not np.all(np.isfinite(freq)) or np.any(freq <= 0):
warnings.warn('Input contains invalid wavelength/frequency value(s)',
AstropyUserWarning)
log_boltz = const.h * freq / (const.k_B * temp)
boltzm1 = np.expm1(log_boltz)
if _has_buggy_expm1:
# Replace incorrect nan results with infs--any result of 'nan' is
# incorrect unless the input (in log_boltz) happened to be nan to begin
# with. (As noted in #4393 ideally this would be replaced by a version
# of expm1 that doesn't have this bug, rather than fixing incorrect
# results after the fact...)
boltzm1_nans = np.isnan(boltzm1)
if np.any(boltzm1_nans):
if boltzm1.isscalar and not np.isnan(log_boltz):
boltzm1 = np.inf
else:
boltzm1[np.where(~np.isnan(log_boltz) & boltzm1_nans)] = np.inf
# Calculate blackbody flux
bb_nu = (2.0 * const.h * freq ** 3 / (const.c ** 2 * boltzm1))
flux = bb_nu.to(FNU, u.spectral_density(freq))
return flux / u.sr # Add per steradian to output flux unit
def blackbody_lambda(in_x, temperature):
"""Like :func:`blackbody_nu` but for :math:`B_{\\lambda}(T)`.
Parameters
----------
in_x : number, array-like, or `~astropy.units.Quantity`
Frequency, wavelength, or wave number.
If not a Quantity, it is assumed to be in Angstrom.
temperature : number, array-like, or `~astropy.units.Quantity`
Blackbody temperature.
If not a Quantity, it is assumed to be in Kelvin.
Returns
-------
flux : `~astropy.units.Quantity`
Blackbody monochromatic flux in
:math:`erg \\; cm^{-2} s^{-1} \\mathring{A}^{-1} sr^{-1}`.
"""
if getattr(in_x, 'unit', None) is None:
in_x = u.Quantity(in_x, u.AA)
bb_nu = blackbody_nu(in_x, temperature) * u.sr # Remove sr for conversion
flux = bb_nu.to(FLAM, u.spectral_density(in_x))
return flux / u.sr # Add per steradian to output flux unit
|
82fe5a1cfc7144462f126d80c6745462501540bc79fb6c997e9ed96d5d8e6e7b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Optimization algorithms used in `~astropy.modeling.fitting`.
"""
import warnings
import abc
import numpy as np
from ..utils.exceptions import AstropyUserWarning
__all__ = ["Optimization", "SLSQP", "Simplex"]
# Maximum number of iterations
DEFAULT_MAXITER = 100
# Step for the forward difference approximation of the Jacobian
DEFAULT_EPS = np.sqrt(np.finfo(float).eps)
# Default requested accuracy
DEFAULT_ACC = 1e-07
DEFAULT_BOUNDS = (-10 ** 12, 10 ** 12)
class Optimization(metaclass=abc.ABCMeta):
"""
Base class for optimizers.
Parameters
----------
opt_method : callable
Implements optimization method
Notes
-----
The base Optimizer does not support any constraints by default; individual
optimizers should explicitly set this list to the specific constraints
it supports.
"""
supported_constraints = []
def __init__(self, opt_method):
self._opt_method = opt_method
self._maxiter = DEFAULT_MAXITER
self._eps = DEFAULT_EPS
self._acc = DEFAULT_ACC
@property
def maxiter(self):
"""Maximum number of iterations"""
return self._maxiter
@maxiter.setter
def maxiter(self, val):
"""Set maxiter"""
self._maxiter = val
@property
def eps(self):
"""Step for the forward difference approximation of the Jacobian"""
return self._eps
@eps.setter
def eps(self, val):
"""Set eps value"""
self._eps = val
@property
def acc(self):
"""Requested accuracy"""
return self._acc
@acc.setter
def acc(self, val):
"""Set accuracy"""
self._acc = val
def __repr__(self):
fmt = "{0}()".format(self.__class__.__name__)
return fmt
@property
def opt_method(self):
return self._opt_method
@abc.abstractmethod
def __call__(self):
raise NotImplementedError("Subclasses should implement this method")
class SLSQP(Optimization):
"""
Sequential Least Squares Programming optimization algorithm.
The algorithm is described in [1]_. It supports tied and fixed
parameters, as well as bounded constraints. Uses
`scipy.optimize.fmin_slsqp`.
References
----------
.. [1] http://www.netlib.org/toms/733
"""
supported_constraints = ['bounds', 'eqcons', 'ineqcons', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin_slsqp
super().__init__(fmin_slsqp)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'message': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
kwargs['iter'] = kwargs.pop('maxiter', self._maxiter)
if 'epsilon' not in kwargs:
kwargs['epsilon'] = self._eps
if 'acc' not in kwargs:
kwargs['acc'] = self._acc
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
# set the values of constraints to match the requirements of fmin_slsqp
model = fargs[0]
pars = [getattr(model, name) for name in model.param_names]
bounds = [par.bounds for par in pars if not (par.fixed or par.tied)]
bounds = np.asarray(bounds)
for i in bounds:
if i[0] is None:
i[0] = DEFAULT_BOUNDS[0]
if i[1] is None:
i[1] = DEFAULT_BOUNDS[1]
# older versions of scipy require this array to be float
bounds = np.asarray(bounds, dtype=float)
eqcons = np.array(model.eqcons)
ineqcons = np.array(model.ineqcons)
fitparams, final_func_val, numiter, exit_mode, mess = self.opt_method(
objfunc, initval, args=fargs, full_output=True, disp=disp,
bounds=bounds, eqcons=eqcons, ieqcons=ineqcons,
**kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['message'] = mess
if exit_mode != 0:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return fitparams, self.fit_info
class Simplex(Optimization):
"""
Neald-Mead (downhill simplex) algorithm.
This algorithm [1]_ only uses function values, not derivatives.
Uses `scipy.optimize.fmin`.
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
"""
supported_constraints = ['bounds', 'fixed', 'tied']
def __init__(self):
from scipy.optimize import fmin as simplex
super().__init__(simplex)
self.fit_info = {
'final_func_val': None,
'numiter': None,
'exit_mode': None,
'num_function_calls': None
}
def __call__(self, objfunc, initval, fargs, **kwargs):
"""
Run the solver.
Parameters
----------
objfunc : callable
objection function
initval : iterable
initial guess for the parameter values
fargs : tuple
other arguments to be passed to the statistic function
kwargs : dict
other keyword arguments to be passed to the solver
"""
if 'maxiter' not in kwargs:
kwargs['maxiter'] = self._maxiter
if 'acc' in kwargs:
self._acc = kwargs['acc']
kwargs.pop('acc')
if 'xtol' in kwargs:
self._acc = kwargs['xtol']
kwargs.pop('xtol')
# Get the verbosity level
disp = kwargs.pop('verblevel', None)
fitparams, final_func_val, numiter, funcalls, exit_mode = self.opt_method(
objfunc, initval, args=fargs, xtol=self._acc, disp=disp,
full_output=True, **kwargs)
self.fit_info['final_func_val'] = final_func_val
self.fit_info['numiter'] = numiter
self.fit_info['exit_mode'] = exit_mode
self.fit_info['num_function_calls'] = funcalls
if self.fit_info['exit_mode'] == 1:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of function evaluations reached.",
AstropyUserWarning)
if self.fit_info['exit_mode'] == 2:
warnings.warn("The fit may be unsuccessful; "
"Maximum number of iterations reached.",
AstropyUserWarning)
return fitparams, self.fit_info
|
f37129005be7b5868442fbbaf6843a376a59279b3a9968038c2029966e90a9b0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Implements rotations, including spherical rotations as defined in WCS Paper II
[1]_
`RotateNative2Celestial` and `RotateCelestial2Native` follow the convention in
WCS Paper II to rotate to/from a native sphere and the celestial sphere.
The implementation uses `EulerAngleRotation`. The model parameters are
three angles: the longitude (``lon``) and latitude (``lat``) of the fiducial point
in the celestial system (``CRVAL`` keywords in FITS), and the longitude of the celestial
pole in the native system (``lon_pole``). The Euler angles are ``lon+90``, ``90-lat``
and ``-(lon_pole-90)``.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import math
import numpy as np
from .core import Model
from .parameters import Parameter
from ..coordinates.matrix_utilities import rotation_matrix, matrix_product
from .. import units as u
from ..utils.decorators import deprecated
from .utils import _to_radian, _to_orig_unit
__all__ = ['RotateCelestial2Native', 'RotateNative2Celestial', 'Rotation2D',
'EulerAngleRotation']
class _EulerRotation:
"""
Base class which does the actual computation.
"""
_separable = False
def _create_matrix(self, phi, theta, psi, axes_order):
matrices = []
for angle, axis in zip([phi, theta, psi], axes_order):
if isinstance(angle, u.Quantity):
angle = angle.value
angle = np.asscalar(angle)
matrices.append(rotation_matrix(angle, axis, unit=u.rad))
result = matrix_product(*matrices[::-1])
return result
@staticmethod
def spherical2cartesian(alpha, delta):
alpha = np.deg2rad(alpha)
delta = np.deg2rad(delta)
x = np.cos(alpha) * np.cos(delta)
y = np.cos(delta) * np.sin(alpha)
z = np.sin(delta)
return np.array([x, y, z])
@staticmethod
def cartesian2spherical(x, y, z):
h = np.hypot(x, y)
alpha = np.rad2deg(np.arctan2(y, x))
delta = np.rad2deg(np.arctan2(z, h))
return alpha, delta
@deprecated(2.0)
@staticmethod
def rotation_matrix_from_angle(angle):
"""
Clockwise rotation matrix.
Parameters
----------
angle : float
Rotation angle in radians.
"""
return np.array([[math.cos(angle), math.sin(angle)],
[-math.sin(angle), math.cos(angle)]])
def evaluate(self, alpha, delta, phi, theta, psi, axes_order):
shape = None
if isinstance(alpha, np.ndarray) and alpha.ndim == 2:
alpha = alpha.flatten()
delta = delta.flatten()
shape = alpha.shape
inp = self.spherical2cartesian(alpha, delta)
matrix = self._create_matrix(phi, theta, psi, axes_order)
result = np.dot(matrix, inp)
a, b = self.cartesian2spherical(*result)
if shape is not None:
a.shape = shape
b.shape = shape
return a, b
input_units_strict = True
input_units_allow_dimensionless = True
@property
def input_units(self):
""" Input units. """
return {'alpha': u.deg, 'delta': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha': u.deg, 'delta': u.deg}
class EulerAngleRotation(_EulerRotation, Model):
"""
Implements Euler angle intrinsic rotations.
Rotates one coordinate system into another (fixed) coordinate system.
All coordinate systems are right-handed. The sign of the angles is
determined by the right-hand rule..
Parameters
----------
phi, theta, psi : float or `~astropy.units.Quantity`
"proper" Euler angles in deg.
If floats, they should be in deg.
axes_order : str
A 3 character string, a combination of 'x', 'y' and 'z',
where each character denotes an axis in 3D space.
"""
inputs = ('alpha', 'delta')
outputs = ('alpha', 'delta')
phi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
theta = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
psi = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, phi, theta, psi, axes_order, **kwargs):
self.axes = ['x', 'y', 'z']
if len(axes_order) != 3:
raise TypeError(
"Expected axes_order to be a character sequence of length 3,"
"got {0}".format(axes_order))
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError("Unrecognized axis label {0}; "
"should be one of {1} ".format(unrecognized, self.axes))
self.axes_order = axes_order
qs = [isinstance(par, u.Quantity) for par in [phi, theta, psi]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(phi=phi, theta=theta, psi=psi, **kwargs)
def inverse(self):
return self.__class__(phi=-self.psi,
theta=-self.theta,
psi=-self.phi,
axes_order=self.axes_order[::-1])
def evaluate(self, alpha, delta, phi, theta, psi):
a, b = super().evaluate(alpha, delta, phi, theta, psi, self.axes_order)
return a, b
class _SkyRotation(_EulerRotation, Model):
"""
Base class for RotateNative2Celestial and RotateCelestial2Native.
"""
lon = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lat = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
lon_pole = Parameter(default=0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, lon, lat, lon_pole, **kwargs):
qs = [isinstance(par, u.Quantity) for par in [lon, lat, lon_pole]]
if any(qs) and not all(qs):
raise TypeError("All parameters should be of the same type - float or Quantity.")
super().__init__(lon, lat, lon_pole, **kwargs)
self.axes_order = 'zxz'
def _evaluate(self, phi, theta, lon, lat, lon_pole):
alpha, delta = super().evaluate(phi, theta, lon, lat, lon_pole,
self.axes_order)
mask = alpha < 0
if isinstance(mask, np.ndarray):
alpha[mask] += 360
else:
alpha += 360
return alpha, delta
class RotateNative2Celestial(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the native sphere
inputs = ('phi_N', 'theta_N')
#: Outputs are angles on the celestial sphere
outputs = ('alpha_C', 'delta_C')
@property
def input_units(self):
""" Input units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
@property
def return_units(self):
""" Output units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles in the Native coordinate system.
lon, lat, lon_pole : float (in deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles on the Celestial sphere.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = - (np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
class RotateCelestial2Native(_SkyRotation):
"""
Transform from Celestial to Native Spherical Coordinates.
Parameters
----------
lon : float or or `~astropy.units.Quantity`
Celestial longitude of the fiducial point.
lat : float or or `~astropy.units.Quantity`
Celestial latitude of the fiducial point.
lon_pole : float or or `~astropy.units.Quantity`
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they should be in units of deg.
"""
#: Inputs are angles on the celestial sphere
inputs = ('alpha_C', 'delta_C')
#: Outputs are angles on the native sphere
outputs = ('phi_N', 'theta_N')
@property
def input_units(self):
""" Input units. """
return {'alpha_C': u.deg, 'delta_C': u.deg}
@property
def return_units(self):
""" Output units. """
return {'phi_N': u.deg, 'theta_N': u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
def evaluate(self, alpha_C, delta_C, lon, lat, lon_pole):
"""
Parameters
----------
alpha_C, delta_C : float (deg) or `~astropy.units.Quantity`
Angles in the Celestial coordinate frame.
lon, lat, lon_pole : float (deg) or `~astropy.units.Quantity`
Parameter values when the model was initialized.
Returns
-------
phi_N, theta_N : float (deg) or `~astropy.units.Quantity`
Angles on the Native sphere.
"""
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = (np.pi / 2 + lon)
theta = (np.pi / 2 - lat)
psi = -(lon_pole - np.pi / 2)
phi_N, theta_N = super()._evaluate(alpha_C, delta_C, phi, theta, psi)
return phi_N, theta_N
@property
def inverse(self):
return RotateNative2Celestial(self.lon, self.lat, self.lon_pole)
class Rotation2D(Model):
"""
Perform a 2D rotation given an angle.
Positive angles represent a counter-clockwise rotation and vice-versa.
Parameters
----------
angle : float or `~astropy.units.Quantity`
Angle of rotation (if float it should be in deg).
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
_separable = False
angle = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
input_units_strict = True
input_units_allow_dimensionless = True
@property
def inverse(self):
"""Inverse rotation."""
return self.__class__(angle=-self.angle)
@classmethod
def evaluate(cls, x, y, angle):
"""
Rotate (x, y) about ``angle``.
Parameters
----------
x, y : ndarray-like
Input quantities
angle : float (deg) or `~astropy.units.Quantity`
Angle of rotations.
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
if isinstance(x, u.Quantity):
unit = x.unit
else:
unit = None
inarr = np.array([x.flatten(), y.flatten()])
if isinstance(angle, u.Quantity):
angle = angle.value
result = np.dot(cls._compute_matrix(angle), inarr)
x, y = result[0], result[1]
x.shape = y.shape = orig_shape
if unit is not None:
return u.Quantity(x, unit=unit), u.Quantity(y, unit=unit)
else:
return x, y
@staticmethod
def _compute_matrix(angle):
return np.array([[math.cos(angle), -math.sin(angle)],
[math.sin(angle), math.cos(angle)]],
dtype=np.float64)
|
49c587feb89d4da20c2e7735d206b168f98e4ab9949e42065655230b4af6f2af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
from distutils import log
from astropy_helpers import setup_helpers, utils
from astropy_helpers.version_helpers import get_pkg_version_module
wcs_setup_package = utils.import_file(join('astropy', 'wcs', 'setup_package.py'))
MODELING_ROOT = os.path.relpath(os.path.dirname(__file__))
MODELING_SRC = join(MODELING_ROOT, 'src')
SRC_FILES = [join(MODELING_SRC, 'projections.c.templ'),
__file__]
GEN_FILES = [join(MODELING_SRC, 'projections.c')]
# This defines the set of projection functions that we want to wrap.
# The key is the projection name, and the value is the number of
# parameters.
# (These are in the order that the appear in the WCS coordinate
# systems paper).
projections = {
'azp': 2,
'szp': 3,
'tan': 0,
'stg': 0,
'sin': 2,
'arc': 0,
'zea': 0,
'air': 1,
'cyp': 2,
'cea': 1,
'mer': 0,
'sfl': 0,
'par': 0,
'mol': 0,
'ait': 0,
'cop': 2,
'coe': 2,
'cod': 2,
'coo': 2,
'bon': 1,
'pco': 0,
'tsc': 0,
'csc': 0,
'qsc': 0,
'hpx': 2,
'xph': 0,
}
def pre_build_py_hook(cmd_obj):
preprocess_source()
def pre_build_ext_hook(cmd_obj):
preprocess_source()
def pre_sdist_hook(cmd_obj):
preprocess_source()
def preprocess_source():
# TODO: Move this to setup_helpers
# Generating the wcslib wrappers should only be done if needed. This also
# ensures that it is not done for any release tarball since those will
# include core.py and core.c.
if all(os.path.exists(filename) for filename in GEN_FILES):
# Determine modification times
src_mtime = max(os.path.getmtime(filename) for filename in SRC_FILES)
gen_mtime = min(os.path.getmtime(filename) for filename in GEN_FILES)
version = get_pkg_version_module('astropy')
if gen_mtime > src_mtime:
# If generated source is recent enough, don't update
return
elif version.release:
# or, if we're on a release, issue a warning, but go ahead and use
# the wrappers anyway
log.warn('WARNING: The autogenerated wrappers in '
'astropy.modeling._projections seem to be older '
'than the source templates used to create '
'them. Because this is a release version we will '
'use them anyway, but this might be a sign of '
'some sort of version mismatch or other '
'tampering. Or it might just mean you moved '
'some files around or otherwise accidentally '
'changed timestamps.')
return
# otherwise rebuild the autogenerated files
# If jinja2 isn't present, then print a warning and use existing files
try:
import jinja2 # pylint: disable=W0611
except ImportError:
log.warn("WARNING: jinja2 could not be imported, so the existing "
"modeling _projections.c file will be used")
return
from jinja2 import Environment, FileSystemLoader
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(MODELING_SRC))
c_in = env.get_template('projections.c.templ')
c_out = c_in.render(projections=projections)
with open(join(MODELING_SRC, 'projections.c'), 'w') as fd:
fd.write(c_out)
def get_package_data():
return {
'astropy.modeling.tests': ['data/*.fits', 'data/*.hdr',
'../../wcs/tests/maps/*.hdr']
}
def get_extensions():
wcslib_files = [ # List of wcslib files to compile
'prj.c',
'wcserr.c',
'wcsprintf.c',
'wcsutil.c'
]
wcslib_config_paths = [
join(MODELING_SRC, 'wcsconfig.h')
]
cfg = setup_helpers.DistutilsExtensionArgs()
wcs_setup_package.get_wcslib_cfg(cfg, wcslib_files, wcslib_config_paths)
cfg['include_dirs'].append(MODELING_SRC)
astropy_files = [ # List of astropy.modeling files to compile
'projections.c'
]
cfg['sources'].extend(join(MODELING_SRC, x) for x in astropy_files)
cfg['sources'] = [str(x) for x in cfg['sources']]
cfg = dict((str(key), val) for key, val in cfg.items())
return [Extension(str('astropy.modeling._projections'), **cfg)]
|
27ae6f39535c3c76b612a745a6d94047f3c6743283f470a17bfc46a28c39e12d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to determine if a model is separable, i.e.
if the model outputs are independent.
It analyzes ``n_inputs``, ``n_outputs`` and the operators
in a compound model by stepping through the transforms
and creating a ``coord_matrix`` of shape (``n_outputs``, ``n_inputs``).
Each modeling operator is represented by a function which
takes two simple models (or two ``coord_matrix`` arrays) and
returns an array of shape (``n_outputs``, ``n_inputs``).
"""
import numpy as np
from .core import Model, _CompoundModel, ModelDefinitionError
from .mappings import Mapping
__all__ = ["is_separable"]
def is_separable(transform):
"""
A separability test for the outputs of a transform.
Parameters
----------
transform : `~astropy.modeling.core.Model`
A (compound) model.
Returns
-------
is_separable : ndarray
A boolean array with size ``transform.n_outputs`` where
each element indicates whether the output is independent
and the result of a separable transform.
Examples
--------
>>> from astropy.modeling.models import Shift, Scale, Rotation2D, Polynomial2D
>>> is_separable(Shift(1) & Shift(2) | Scale(1) & Scale(2))
array([ True, True]...)
>>> is_separable(Shift(1) & Shift(2) | Rotation2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]) | \
Polynomial2D(1) & Polynomial2D(2))
array([False, False]...)
>>> is_separable(Shift(1) & Shift(2) | Mapping([0, 1, 0, 1]))
array([ True, True, True, True]...)
"""
if transform.n_inputs == 1 and transform.n_outputs > 1:
is_separable = np.array([False] * transform.n_outputs).T
return is_separable
separable_matrix = _separable(transform)
is_separable = separable_matrix.sum(1)
is_separable = np.where(is_separable != 1, False, True)
return is_separable
def _compute_n_outputs(left, right):
"""
Compute the number of outputs of two models.
The two models are the left and right model to an operation in
the expression tree of a compound model.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
"""
if isinstance(left, Model):
lnout = left.n_outputs
else:
lnout = left.shape[0]
if isinstance(right, Model):
rnout = right.n_outputs
else:
rnout = right.shape[0]
noutp = lnout + rnout
return noutp
def _arith_oper(left, right):
"""
Function corresponding to one of the arithmetic operators
['+', '-'. '*', '/', '**'].
This always returns a nonseparable output.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
# models have the same number of inputs and outputs
def _n_inputs_outputs(input):
if isinstance(input, Model):
n_outputs, n_inputs = input.n_outputs, input.n_inputs
else:
n_outputs, n_inputs = input.shape
return n_inputs, n_outputs
left_inputs, left_outputs = _n_inputs_outputs(left)
right_inputs, right_outputs = _n_inputs_outputs(right)
if left_inputs != right_inputs or left_outputs != right_outputs:
raise ModelDefinitionError(
"Unsupported operands for arithmetic operator: left (n_inputs={0}, "
"n_outputs={1}) and right (n_inputs={2}, n_outputs={3}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator.".format(
left_inputs, left_outputs, right_inputs, right_outputs))
result = np.ones((left_outputs, left_inputs))
return result
def _coord_matrix(model, pos, noutp):
"""
Create an array representing inputs and outputs of a simple model.
The array has a shape (noutp, model.n_inputs).
Parameters
----------
model : `astropy.modeling.Model`
model
pos : str
Position of this model in the expression tree.
One of ['left', 'right'].
noutp : int
Number of outputs of the compound model of which the input model
is a left or right child.
"""
if isinstance(model, Mapping):
axes = []
for i in model.mapping:
axis = np.zeros((model.n_inputs,))
axis[i] = 1
axes.append(axis)
m = np.vstack(axes)
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[: model.n_outputs, :model.n_inputs] = m
else:
mat[-model.n_outputs:, -model.n_inputs:] = m
return mat
if not model.separable:
# this does not work for more than 2 coordinates
mat = np.zeros((noutp, model.n_inputs))
if pos == 'left':
mat[:model.n_outputs, : model.n_inputs] = 1
else:
mat[-model.n_outputs:, -model.n_inputs:] = 1
else:
mat = np.zeros((noutp, model.n_inputs))
for i in range(model.n_inputs):
mat[i, i] = 1
if pos == 'right':
mat = np.roll(mat, (noutp - model.n_outputs))
return mat
def _cstack(left, right):
"""
Function corresponding to '&' operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
noutp = _compute_n_outputs(left, right)
if isinstance(left, Model):
cleft = _coord_matrix(left, 'left', noutp)
else:
cleft = np.zeros((noutp, left.shape[1]))
cleft[: left.shape[0], : left.shape[1]] = left
if isinstance(right, Model):
cright = _coord_matrix(right, 'right', noutp)
else:
cright = np.zeros((noutp, right.shape[1]))
cright[-right.shape[0]:, -right.shape[1]:] = 1
return np.hstack([cleft, cright])
def _cdot(left, right):
"""
Function corresponding to "|" operation.
Parameters
----------
left, right : `astropy.modeling.Model` or ndarray
If input is of an array, it is the output of `coord_matrix`.
Returns
-------
result : ndarray
Result from this operation.
"""
left, right = right, left
def _n_inputs_outputs(input, position):
"""
Return ``n_inputs``, ``n_outputs`` for a model or coord_matrix.
"""
if isinstance(input, Model):
coords = _coord_matrix(input, position, input.n_outputs)
else:
coords = input
return coords
cleft = _n_inputs_outputs(left, 'left')
cright = _n_inputs_outputs(right, 'right')
try:
result = np.dot(cleft, cright)
except ValueError:
raise ModelDefinitionError(
'Models cannot be combined with the "|" operator; '
'left coord_matrix is {0}, right coord_matrix is {1}'.format(
cright, cleft))
return result
def _separable(transform):
"""
Calculate the separability of outputs.
Parameters
----------
transform : `astropy.modeling.Model`
A transform (usually a compound model).
Returns
-------
is_separable : ndarray of dtype np.bool
An array of shape (transform.n_outputs,) of boolean type
Each element represents the separablity of the corresponding output.
"""
if isinstance(transform, _CompoundModel):
is_separable = transform._tree.evaluate(_operators)
elif isinstance(transform, Model):
is_separable = _coord_matrix(transform, 'left', transform.n_outputs)
return is_separable
# Maps modeling operators to a function computing and represents the
# relationship of axes as an array of 0-es and 1-s
_operators = {'&': _cstack, '|': _cdot, '+': _arith_oper, '-': _arith_oper,
'*': _arith_oper, '/': _arith_oper, '**': _arith_oper}
|
c8b8c18c672b907b8bb18ac0fd3468434593e3666bd3ec793cd7e0f27f1308ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides utility functions for the models package
"""
from collections import deque, MutableMapping
from inspect import signature
import numpy as np
from ..utils import isiterable, check_broadcast
from ..utils.compat import NUMPY_LT_1_14
from .. import units as u
__all__ = ['ExpressionTree', 'AliasDict', 'check_broadcast',
'poly_map_domain', 'comb', 'ellipse_extent']
class ExpressionTree:
__slots__ = ['left', 'right', 'value']
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
# Two subtrees can't be the same *object* or else traverse_postorder
# breaks, so we just always copy the right subtree to subvert that.
if right is not None and left is right:
right = right.copy()
self.right = right
def __getstate__(self):
# For some reason the default pickle protocol on Python 2 does not just
# do this. On Python 3 it's not a problem.
return dict((slot, getattr(self, slot)) for slot in self.__slots__)
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
@property
def isleaf(self):
return self.left is None and self.right is None
def traverse_preorder(self):
stack = deque([self])
while stack:
node = stack.pop()
yield node
if node.right is not None:
stack.append(node.right)
if node.left is not None:
stack.append(node.left)
def traverse_inorder(self):
stack = deque()
node = self
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right
def traverse_postorder(self):
stack = deque([self])
last = None
while stack:
node = stack[-1]
if last is None or node is last.left or node is last.right:
if node.left is not None:
stack.append(node.left)
elif node.right is not None:
stack.append(node.right)
elif node.left is last and node.right is not None:
stack.append(node.right)
else:
yield stack.pop()
last = node
def evaluate(self, operators, getter=None, start=0, stop=None):
"""Evaluate the expression represented by this tree.
``Operators`` should be a dictionary mapping operator names ('tensor',
'product', etc.) to a function that implements that operator for the
correct number of operands.
If given, ``getter`` is a function evaluated on each *leaf* node's
value before applying the operator between them. This could be used,
for example, to operate on an attribute of the node values rather than
directly on the node values. The ``getter`` is passed both the index
of the leaf (a count starting at 0 that is incremented after each leaf
is found) and the leaf node itself.
The ``start`` and ``stop`` arguments allow evaluating a sub-expression
within the expression tree.
TODO: Document this better.
"""
stack = deque()
if getter is None:
getter = lambda idx, value: value
if start is None:
start = 0
leaf_idx = 0
for node in self.traverse_postorder():
if node.isleaf:
# For a "tree" containing just a single operator at the root
# Also push the index of this leaf onto the stack, which will
# prove useful for evaluating subexpressions
stack.append((getter(leaf_idx, node.value), leaf_idx))
leaf_idx += 1
else:
operator = operators[node.value]
if len(stack) < 2:
# Skip this operator if there are not enough operands on
# the stack; this can happen if some operands were skipped
# when evaluating a sub-expression
continue
right = stack.pop()
left = stack.pop()
operands = []
for operand in (left, right):
# idx is the leaf index; -1 if not a leaf node
if operand[-1] == -1:
operands.append(operand)
else:
operand, idx = operand
if start <= idx and (stop is None or idx < stop):
operands.append((operand, idx))
if len(operands) == 2:
# evaluate the operator with the given operands and place
# the result on the stack (with -1 for the "leaf index"
# since this result is not a leaf node
left, right = operands
stack.append((operator(left[0], right[0]), -1))
elif len(operands) == 0:
# Just push the left one back on the stack
# TODO: Explain and/or refactor this better
# This is here because even if both operands were "skipped"
# due to being outside the (start, stop) range, we've only
# skipped one operator. But there should be at least 2
# operators involving these operands, so we push the one
# from the left back onto the stack so that the next
# operator will be skipped as well. Should probably come
# up with an easier to follow way to write this algorithm
stack.append(left)
else:
# one or more of the operands was not included in the
# sub-expression slice, so don't evaluate the operator;
# instead place left over operands (if any) back on the
# stack for later use
stack.extend(operands)
return stack.pop()[0]
def copy(self):
# Hopefully this won't blow the stack for any practical case; if such a
# case arises that this won't work then I suppose we can find an
# iterative approach.
children = []
for child in (self.left, self.right):
if isinstance(child, ExpressionTree):
children.append(child.copy())
else:
children.append(child)
return self.__class__(self.value, left=children[0], right=children[1])
def format_expression(self, operator_precedence, format_leaf=None):
leaf_idx = 0
operands = deque()
if format_leaf is None:
format_leaf = lambda i, l: '[{0}]'.format(i)
for node in self.traverse_postorder():
if node.isleaf:
operands.append(format_leaf(leaf_idx, node))
leaf_idx += 1
continue
oper_order = operator_precedence[node.value]
right = operands.pop()
left = operands.pop()
if (node.left is not None and not node.left.isleaf and
operator_precedence[node.left.value] < oper_order):
left = '({0})'.format(left)
if (node.right is not None and not node.right.isleaf and
operator_precedence[node.right.value] < oper_order):
right = '({0})'.format(right)
operands.append(' '.join((left, node.value, right)))
return ''.join(operands)
class AliasDict(MutableMapping):
"""
Creates a `dict` like object that wraps an existing `dict` or other
`MutableMapping`, along with a `dict` of *key aliases* that translate
between specific keys in this dict to different keys in the underlying
dict.
In other words, keys that do not have an associated alias are accessed and
stored like a normal `dict`. However, a key that has an alias is accessed
and stored to the "parent" dict via the alias.
Parameters
----------
parent : dict-like
The parent `dict` that aliased keys and accessed from and stored to.
aliases : dict-like
Maps keys in this dict to their associated keys in the parent dict.
Examples
--------
>>> parent = {'a': 1, 'b': 2, 'c': 3}
>>> aliases = {'foo': 'a', 'bar': 'c'}
>>> alias_dict = AliasDict(parent, aliases)
>>> alias_dict['foo']
1
>>> alias_dict['bar']
3
Keys in the original parent dict are not visible if they were not
aliased::
>>> alias_dict['b']
Traceback (most recent call last):
...
KeyError: 'b'
Likewise, updates to aliased keys are reflected back in the parent dict::
>>> alias_dict['foo'] = 42
>>> alias_dict['foo']
42
>>> parent['a']
42
However, updates/insertions to keys that are *not* aliased are not
reflected in the parent dict::
>>> alias_dict['qux'] = 99
>>> alias_dict['qux']
99
>>> 'qux' in parent
False
In particular, updates on the `AliasDict` to a key that is equal to
one of the aliased keys in the parent dict does *not* update the parent
dict. For example, ``alias_dict`` aliases ``'foo'`` to ``'a'``. But
assigning to a key ``'a'`` on the `AliasDict` does not impact the
parent::
>>> alias_dict['a'] = 'nope'
>>> alias_dict['a']
'nope'
>>> parent['a']
42
"""
_store_type = dict
"""
Subclasses may override this to use other mapping types as the underlying
storage, for example an `OrderedDict`. However, even in this case
additional work may be needed to get things like the ordering right.
"""
def __init__(self, parent, aliases):
self._parent = parent
self._store = self._store_type()
self._aliases = dict(aliases)
def __getitem__(self, key):
if key in self._aliases:
try:
return self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
return self._store[key]
def __setitem__(self, key, value):
if key in self._aliases:
self._parent[self._aliases[key]] = value
else:
self._store[key] = value
def __delitem__(self, key):
if key in self._aliases:
try:
del self._parent[self._aliases[key]]
except KeyError:
raise KeyError(key)
else:
del self._store[key]
def __iter__(self):
"""
First iterates over keys from the parent dict (if the aliased keys are
present in the parent), followed by any keys in the local store.
"""
for key, alias in self._aliases.items():
if alias in self._parent:
yield key
for key in self._store:
yield key
def __len__(self):
# TODO:
# This could be done more efficiently, but at present the use case for
# it is narrow if non-existent.
return len(list(iter(self)))
def __repr__(self):
# repr() just like any other dict--this should look transparent
store_copy = self._store_type()
for key, alias in self._aliases.items():
if alias in self._parent:
store_copy[key] = self._parent[alias]
store_copy.update(self._store)
return repr(store_copy)
class _BoundingBox(tuple):
"""
Base class for models with custom bounding box templates (methods that
return an actual bounding box tuple given some adjustable parameters--see
for example `~astropy.modeling.models.Gaussian1D.bounding_box`).
On these classes the ``bounding_box`` property still returns a `tuple`
giving the default bounding box for that instance of the model. But that
tuple may also be a subclass of this class that is callable, and allows
a new tuple to be returned using a user-supplied value for any adjustable
parameters to the bounding box.
"""
_model = None
def __new__(cls, input_, _model=None):
self = super().__new__(cls, input_)
if _model is not None:
# Bind this _BoundingBox (most likely a subclass) to a Model
# instance so that its __call__ can access the model
self._model = _model
return self
def __call__(self, *args, **kwargs):
raise NotImplementedError(
"This bounding box is fixed by the model and does not have "
"adjustable parameters.")
@classmethod
def validate(cls, model, bounding_box):
"""
Validate a given bounding box sequence against the given model (which
may be either a subclass of `~astropy.modeling.Model` or an instance
thereof, so long as the ``.inputs`` attribute is defined.
Currently this just checks that the bounding_box is either a 2-tuple
of lower and upper bounds for 1-D models, or an N-tuple of 2-tuples
for N-D models.
This also returns a normalized version of the bounding_box input to
ensure it is always an N-tuple (even for the 1-D case).
"""
nd = model.n_inputs
if nd == 1:
if (not isiterable(bounding_box)
or np.shape(bounding_box) not in ((2,), (1, 2))):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"2 consisting of a lower and upper bound, or a 1-tuple "
"containing such a sequence as its sole element.".format(
model.name))
if len(bounding_box) == 1:
return cls((tuple(bounding_box[0]),))
else:
return cls(tuple(bounding_box))
else:
if (not isiterable(bounding_box)
or np.shape(bounding_box) != (nd, 2)):
raise ValueError(
"Bounding box for {0} model must be a sequence of length "
"{1} (the number of model inputs) consisting of pairs of "
"lower and upper bounds for those inputs on which to "
"evaluate the model.".format(model.name, nd))
return cls(tuple(bounds) for bounds in bounding_box)
def make_binary_operator_eval(oper, f, g):
"""
Given a binary operator (as a callable of two arguments) ``oper`` and
two callables ``f`` and ``g`` which accept the same arguments,
returns a *new* function that takes the same arguments as ``f`` and ``g``,
but passes the outputs of ``f`` and ``g`` in the given ``oper``.
``f`` and ``g`` are assumed to return tuples (which may be 1-tuples). The
given operator is applied element-wise to tuple outputs).
Example
-------
>>> from operator import add
>>> def prod(x, y):
... return (x * y,)
...
>>> sum_of_prod = make_binary_operator_eval(add, prod, prod)
>>> sum_of_prod(3, 5)
(30,)
"""
return lambda inputs, params: \
tuple(oper(x, y) for x, y in zip(f(inputs, params),
g(inputs, params)))
def poly_map_domain(oldx, domain, window):
"""
Map domain into window by shifting and scaling.
Parameters
----------
oldx : array
original coordinates
domain : list or tuple of length 2
function domain
window : list or tuple of length 2
range into which to map the domain
"""
domain = np.array(domain, dtype=np.float64)
window = np.array(window, dtype=np.float64)
scl = (window[1] - window[0]) / (domain[1] - domain[0])
off = (window[0] * domain[1] - window[1] * domain[0]) / (domain[1] - domain[0])
return off + scl * oldx
def comb(N, k):
"""
The number of combinations of N things taken k at a time.
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
"""
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for j in range(min(k, N - k)):
val = (val * (N - j)) / (j + 1)
return val
def array_repr_oneline(array):
"""
Represents a multi-dimensional Numpy array flattened onto a single line.
"""
sep = ',' if NUMPY_LT_1_14 else ', '
r = np.array2string(array, separator=sep, suppress_small=True)
return ' '.join(l.strip() for l in r.splitlines())
def combine_labels(left, right):
"""
For use with the join operator &: Combine left input/output labels with
right input/output labels.
If none of the labels conflict then this just returns a sum of tuples.
However if *any* of the labels conflict, this appends '0' to the left-hand
labels and '1' to the right-hand labels so there is no ambiguity).
"""
if set(left).intersection(right):
left = tuple(l + '0' for l in left)
right = tuple(r + '1' for r in right)
return left + right
def ellipse_extent(a, b, theta):
"""
Calculates the extent of a box encapsulating a rotated 2D ellipse.
Parameters
----------
a : float or `~astropy.units.Quantity`
Major axis.
b : float or `~astropy.units.Quantity`
Minor axis.
theta : float or `~astropy.units.Quantity`
Rotation angle. If given as a floating-point value, it is assumed to be
in radians.
Returns
-------
offsets : tuple
The absolute value of the offset distances from the ellipse center that
define its bounding box region, ``(dx, dy)``.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Ellipse2D
from astropy.modeling.utils import ellipse_extent, render_model
amplitude = 1
x0 = 50
y0 = 50
a = 30
b = 10
theta = np.pi/4
model = Ellipse2D(amplitude, x0, y0, a, b, theta)
dx, dy = ellipse_extent(a, b, theta)
limits = [x0 - dx, x0 + dx, y0 - dy, y0 + dy]
model.bounding_box = limits
image = render_model(model)
plt.imshow(image, cmap='binary', interpolation='nearest', alpha=.5,
extent = limits)
plt.show()
"""
t = np.arctan2(-b * np.tan(theta), a)
dx = a * np.cos(t) * np.cos(theta) - b * np.sin(t) * np.sin(theta)
t = np.arctan2(b, a * np.tan(theta))
dy = b * np.sin(t) * np.cos(theta) + a * np.cos(t) * np.sin(theta)
if isinstance(dx, u.Quantity) or isinstance(dy, u.Quantity):
return np.abs(u.Quantity([dx, dy]))
else:
return np.abs([dx, dy])
def get_inputs_and_params(func):
"""
Given a callable, determine the input variables and the
parameters.
Parameters
----------
func : callable
Returns
-------
inputs, params : tuple
Each entry is a list of inspect.Parameter objects
"""
sig = signature(func)
inputs = []
params = []
for param in sig.parameters.values():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("Signature must not have *args or **kwargs")
if param.default == param.empty:
inputs.append(param)
else:
params.append(param)
return inputs, params
def _parameter_with_unit(parameter, unit):
if parameter.unit is None:
return parameter.value * unit
else:
return parameter.quantity.to(unit)
def _parameter_without_unit(value, old_unit, new_unit):
if old_unit is None:
return value
else:
return value * old_unit.to(new_unit)
def _combine_equivalency_dict(keys, eq1=None, eq2=None):
# Given two dictionaries that give equivalencies for a set of keys, for
# example input value names, return a dictionary that includes all the
# equivalencies
eq = {}
for key in keys:
eq[key] = []
if eq1 is not None and key in eq1:
eq[key].extend(eq1[key])
if eq2 is not None and key in eq2:
eq[key].extend(eq2[key])
return eq
def _to_radian(value):
""" Convert ``value`` to radian. """
if isinstance(value, u.Quantity):
return value.to(u.rad)
else:
return np.deg2rad(value)
def _to_orig_unit(value, raw_unit=None, orig_unit=None):
""" Convert value with ``raw_unit`` to ``orig_unit``. """
if raw_unit is not None:
return (value * raw_unit).to(orig_unit)
else:
return np.rad2deg(value)
|
ec60aca1b59ad1e72bc4578b6067c1bed8b18be8aa1c3c8a60898d00d55e7404 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from collections import OrderedDict
import numpy as np
from .core import (Fittable1DModel, Fittable2DModel,
ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
from ..stats.funcs import gaussian_sigma_to_fwhm
from .. import units as u
from ..units import Quantity, UnitsError
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D',
'Gaussian2D', 'Linear1D', 'Lorentz1D',
'MexicanHat1D', 'MexicanHat2D', 'RedshiftScaleFactor',
'Scale', 'Sersic1D', 'Sersic2D', 'Shift', 'Sine1D', 'Trapezoid1D',
'TrapezoidDisk2D', 'Ring2D', 'Voigt1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1)
mean = Parameter(default=0)
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * gaussian_sigma_to_fwhm
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
else:
return {'x': self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('mean', inputs_unit['x']),
('stddev', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float or None
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or None
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float, optional
Rotation angle in radians. The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1)
x_mean = Parameter(default=0)
y_mean = Parameter(default=0)
x_stddev = Parameter(default=1)
y_stddev = Parameter(default=1)
theta = Parameter(default=0.0)
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
else:
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
# TODO: Maybe it should be possible for the covariance matrix
# to be some (x, y, ..., z, 2, 2) array to be broadcast with
# other parameters of shape (x, y, ..., z)
# But that's maybe a special case to work out if/when needed
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * gaussian_sigma_to_fwhm
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * gaussian_sigma_to_fwhm
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
else:
return {'x': self.x_mean.unit,
'y': self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_mean', inputs_unit['x']),
('y_mean', inputs_unit['x']),
('x_stddev', inputs_unit['x']),
('y_stddev', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
offset = Parameter(default=0)
linear = True
input_units_strict = True
input_units_allow_dimensionless = True
@property
def input_units(self):
if self.offset.unit is None:
return None
else:
return {'x': self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
if isinstance(offset, u.Quantity):
return_unit = offset.unit
offset = offset.value
if isinstance(x, u.Quantity):
x = x.value
return (x + offset) * return_unit
else:
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
class Scale(Fittable1DModel):
"""
Multiply a model by a factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
"""
inputs = ('x',)
outputs = ('x',)
factor = Parameter(default=1)
linear = True
fittable = True
input_units_strict = True
input_units_allow_dimensionless = True
@property
def input_units(self):
if self.factor.unit is None:
return None
else:
return {'x': self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
return_unit = factor.unit
factor = factor.value
if isinstance(x, u.Quantity):
return (x.value * factor) * return_unit
else:
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='redshift', default=0)
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Central surface brightness, within r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy > 0.11.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
else:
return {'x': self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('r_eff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1)
frequency = Parameter(default=1)
phase = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
else:
return {'x': 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('frequency', inputs_unit['x'] ** -1),
('amplitude', outputs_unit['y'])])
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, slope, intercept):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
else:
return {'x': self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['y']),
('slope', outputs_unit['y'] / inputs_unit['x'])])
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the straight line in X
slope_y : float
Slope of the straight line in Y
intercept : float
Z-intercept of the straight line
See Also
--------
Linear1D, Polynomial2D
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1)
slope_y = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
fwhm = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float
Position of the peak
amplitude_L : float
The Lorentzian amplitude
fwhm_L : float
The Lorentzian full width at half maximum
fwhm_G : float
The Gaussian full width at half maximum
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Algorithm for the computation taken from
McLean, A. B., Mitchell, C. E. J. & Swanston, D. M. Implementation of an
efficient analytical approximation to the Voigt function for photoemission
lineshape analysis. Journal of Electron Spectroscopy and Related Phenomena
69, 125-132 (1994)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0)
amplitude_L = Parameter(default=1)
fwhm_L = Parameter(default=2/np.pi)
fwhm_G = Parameter(default=np.log(2))
_abcd = np.array([
[-1.2150, -1.3509, -1.2150, -1.3509], # A
[1.2359, 0.3786, -1.2359, -0.3786], # B
[-0.3085, 0.5906, -0.3085, 0.5906], # C
[0.0210, -1.1858, -0.0210, 1.1858]]) # D
@classmethod
def evaluate(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[..., np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[..., np.newaxis]
V = np.sum((C * (Y - A) + D * (X - B))/(((Y - A) ** 2 + (X - B) ** 2)), axis=-1)
return (fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G) * V
@classmethod
def fit_deriv(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[:, np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[:, np.newaxis]
constant = fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G
alpha = C * (Y - A) + D * (X - B)
beta = (Y - A) ** 2 + (X - B) ** 2
V = np.sum((alpha / beta), axis=-1)
dVdx = np.sum((D/beta - 2 * (X - B) * alpha / np.square(beta)), axis=-1)
dVdy = np.sum((C/beta - 2 * (Y - A) * alpha / np.square(beta)), axis=-1)
dyda = [-constant * dVdx * 2 * sqrt_ln2 / fwhm_G,
constant * V / amplitude_L,
constant * (V / fwhm_L + dVdy * sqrt_ln2 / fwhm_G),
-constant * (V + (sqrt_ln2 / fwhm_G) * (2 * (x - x_0) * dVdx + fwhm_L * dVdy)) / fwhm_G]
return dyda
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm_L', inputs_unit['x']),
('fwhm_G', inputs_unit['x']),
('amplitude_L', outputs_unit['y'])])
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['y'])])
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['z'])])
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
a = Parameter(default=1)
b = Parameter(default=1)
theta = Parameter(default=0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('a', inputs_unit['x']),
('b', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_in', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Delta1D(Fittable1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Fittable2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
result = np.select([inside], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@classmethod
def fit_deriv(cls, x, amplitude, x_0, width):
"""One dimensional Box model derivative with respect to parameters"""
d_amplitude = cls.evaluate(x, 1, x_0, width)
d_x_0 = np.zeros_like(x)
d_width = np.zeros_like(x)
return [d_amplitude, d_x_0, d_width]
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('slope', outputs_unit['y'] / inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('slope', outputs_unit['z'] / inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class MexicanHat1D(Fittable1DModel):
"""
One dimensional Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import MexicanHat1D
plt.figure()
s1 = MexicanHat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Mexican Hat model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class MexicanHat2D(Fittable2DModel):
"""
Two dimensional symmetric Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Mexican Hat model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
radius = Parameter(default=1)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy > 0.11.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('radius', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
d_A = (1 + (x - x_0) ** 2 / gamma ** 2) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * d_A ** alpha))
d_gamma = (2 * amplitude * alpha * d_A * (x - x_0) ** 2 /
(gamma ** 3 * d_A ** alpha))
d_alpha = -amplitude * d_A * np.log(1 + (x - x_0) ** 2 / gamma ** 2)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * self.gamma * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (-amplitude * alpha * d_A * (-2 * x + 2 * x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (-amplitude * alpha * d_A * (-2 * y + 2 * y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = 2 * amplitude * alpha * d_A * (rr_gg / (gamma * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Central surface brightness, within r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
ellip = Parameter(default=0)
theta = Parameter(default=0)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy > 0.11.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_eff', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
|
8b83f267e5e5b97cec3189fb94e17b812c437bfb40f19640f386743785ee735c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
Implements projections--particularly sky projections defined in WCS Paper II
[1]_.
All angles are set and and displayed in degrees but internally computations are
performed in radians. All functions expect inputs and outputs degrees.
References
----------
.. [1] Calabretta, M.R., Greisen, E.W., 2002, A&A, 395, 1077 (Paper II)
"""
import abc
import numpy as np
from .core import Model
from .parameters import Parameter, InputParameterError
from .. import units as u
from . import _projections
from .utils import _to_radian, _to_orig_unit
projcodes = [
'AZP', 'SZP', 'TAN', 'STG', 'SIN', 'ARC', 'ZEA', 'AIR', 'CYP',
'CEA', 'CAR', 'MER', 'SFL', 'PAR', 'MOL', 'AIT', 'COP', 'COE',
'COD', 'COO', 'BON', 'PCO', 'TSC', 'CSC', 'QSC', 'HPX', 'XPH'
]
__all__ = ['Projection', 'Pix2SkyProjection', 'Sky2PixProjection',
'Zenithal', 'Cylindrical', 'PseudoCylindrical', 'Conic',
'PseudoConic', 'QuadCube', 'HEALPix',
'AffineTransformation2D',
'projcodes',
'Pix2Sky_ZenithalPerspective', 'Sky2Pix_ZenithalPerspective',
'Pix2Sky_SlantZenithalPerspective', 'Sky2Pix_SlantZenithalPerspective',
'Pix2Sky_Gnomonic', 'Sky2Pix_Gnomonic',
'Pix2Sky_Stereographic', 'Sky2Pix_Stereographic',
'Pix2Sky_SlantOrthographic', 'Sky2Pix_SlantOrthographic',
'Pix2Sky_ZenithalEquidistant', 'Sky2Pix_ZenithalEquidistant',
'Pix2Sky_ZenithalEqualArea', 'Sky2Pix_ZenithalEqualArea',
'Pix2Sky_Airy', 'Sky2Pix_Airy',
'Pix2Sky_CylindricalPerspective', 'Sky2Pix_CylindricalPerspective',
'Pix2Sky_CylindricalEqualArea', 'Sky2Pix_CylindricalEqualArea',
'Pix2Sky_PlateCarree', 'Sky2Pix_PlateCarree',
'Pix2Sky_Mercator', 'Sky2Pix_Mercator',
'Pix2Sky_SansonFlamsteed', 'Sky2Pix_SansonFlamsteed',
'Pix2Sky_Parabolic', 'Sky2Pix_Parabolic',
'Pix2Sky_Molleweide', 'Sky2Pix_Molleweide',
'Pix2Sky_HammerAitoff', 'Sky2Pix_HammerAitoff',
'Pix2Sky_ConicPerspective', 'Sky2Pix_ConicPerspective',
'Pix2Sky_ConicEqualArea', 'Sky2Pix_ConicEqualArea',
'Pix2Sky_ConicEquidistant', 'Sky2Pix_ConicEquidistant',
'Pix2Sky_ConicOrthomorphic', 'Sky2Pix_ConicOrthomorphic',
'Pix2Sky_BonneEqualArea', 'Sky2Pix_BonneEqualArea',
'Pix2Sky_Polyconic', 'Sky2Pix_Polyconic',
'Pix2Sky_TangentialSphericalCube', 'Sky2Pix_TangentialSphericalCube',
'Pix2Sky_COBEQuadSphericalCube', 'Sky2Pix_COBEQuadSphericalCube',
'Pix2Sky_QuadSphericalCube', 'Sky2Pix_QuadSphericalCube',
'Pix2Sky_HEALPix', 'Sky2Pix_HEALPix',
'Pix2Sky_HEALPixPolar', 'Sky2Pix_HEALPixPolar',
# The following are short FITS WCS aliases
'Pix2Sky_AZP', 'Sky2Pix_AZP',
'Pix2Sky_SZP', 'Sky2Pix_SZP',
'Pix2Sky_TAN', 'Sky2Pix_TAN',
'Pix2Sky_STG', 'Sky2Pix_STG',
'Pix2Sky_SIN', 'Sky2Pix_SIN',
'Pix2Sky_ARC', 'Sky2Pix_ARC',
'Pix2Sky_ZEA', 'Sky2Pix_ZEA',
'Pix2Sky_AIR', 'Sky2Pix_AIR',
'Pix2Sky_CYP', 'Sky2Pix_CYP',
'Pix2Sky_CEA', 'Sky2Pix_CEA',
'Pix2Sky_CAR', 'Sky2Pix_CAR',
'Pix2Sky_MER', 'Sky2Pix_MER',
'Pix2Sky_SFL', 'Sky2Pix_SFL',
'Pix2Sky_PAR', 'Sky2Pix_PAR',
'Pix2Sky_MOL', 'Sky2Pix_MOL',
'Pix2Sky_AIT', 'Sky2Pix_AIT',
'Pix2Sky_COP', 'Sky2Pix_COP',
'Pix2Sky_COE', 'Sky2Pix_COE',
'Pix2Sky_COD', 'Sky2Pix_COD',
'Pix2Sky_COO', 'Sky2Pix_COO',
'Pix2Sky_BON', 'Sky2Pix_BON',
'Pix2Sky_PCO', 'Sky2Pix_PCO',
'Pix2Sky_TSC', 'Sky2Pix_TSC',
'Pix2Sky_CSC', 'Sky2Pix_CSC',
'Pix2Sky_QSC', 'Sky2Pix_QSC',
'Pix2Sky_HPX', 'Sky2Pix_HPX',
'Pix2Sky_XPH', 'Sky2Pix_XPH'
]
class Projection(Model):
"""Base class for all sky projections."""
# Radius of the generating sphere.
# This sets the circumference to 360 deg so that arc length is measured in deg.
r0 = 180 * u.deg / np.pi
_separable = False
@property
@abc.abstractmethod
def inverse(self):
"""
Inverse projection--all projection models must provide an inverse.
"""
class Pix2SkyProjection(Projection):
"""Base class for all Pix2Sky projections."""
inputs = ('x', 'y')
outputs = ('phi', 'theta')
input_units_strict = True
input_units_allow_dimensionless = True
@property
def input_units(self):
return {'x': u.deg, 'y': u.deg}
@property
def return_units(self):
return {'phi': u.deg, 'theta': u.deg}
class Sky2PixProjection(Projection):
"""Base class for all Sky2Pix projections."""
inputs = ('phi', 'theta')
outputs = ('x', 'y')
input_units_strict = True
input_units_allow_dimensionless = True
@property
def input_units(self):
return {'phi': u.deg, 'theta': u.deg}
@property
def return_units(self):
return {'x': u.deg, 'y': u.deg}
class Zenithal(Projection):
r"""Base class for all Zenithal projections.
Zenithal (or azimuthal) projections map the sphere directly onto a
plane. All zenithal projections are specified by defining the
radius as a function of native latitude, :math:`R_\theta`.
The pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg(-y, x) \\
R_\theta &= \sqrt{x^2 + y^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin \phi \\
y &= R_\theta \cos \phi
"""
_separable = False
class Pix2Sky_ZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Zenithal perspective projection - pixel to sky.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
\phi &= \arg(-y \cos \gamma, x) \\
\theta &= \left\{\genfrac{}{}{0pt}{}{\psi - \omega}{\psi + \omega + 180^{\circ}}\right.
where:
.. math::
\psi &= \arg(\rho, 1) \\
\omega &= \sin^{-1}\left(\frac{\rho \mu}{\sqrt{\rho^2 + 1}}\right) \\
\rho &= \frac{R}{\frac{180^{\circ}}{\pi}(\mu + 1) + y \sin \gamma} \\
R &= \sqrt{x^2 + y^2 \cos^2 \gamma}
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
def __init__(self, mu=mu.default, gamma=gamma.default, **kwargs):
# units : mu - in spherical radii, gamma - in deg
# TODO: Support quantity objects here and in similar contexts
super().__init__(mu, gamma, **kwargs)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Sky2Pix_ZenithalPerspective(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, x, y, mu, gamma):
return _projections.azpx2s(x, y, mu, _to_orig_unit(gamma))
Pix2Sky_AZP = Pix2Sky_ZenithalPerspective
class Sky2Pix_ZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``AZP`` projection in FITS WCS.
.. math::
x &= R \sin \phi \\
y &= -R \sec \gamma \cos \theta
where:
.. math::
R = \frac{180^{\circ}}{\pi} \frac{(\mu + 1) \cos \theta}{(\mu + \sin \theta) + \cos \theta \cos \phi \tan \gamma}
Parameters
----------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
gamma : float
Look angle γ in degrees. Default is 0°.
"""
mu = Parameter(default=0.0)
gamma = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@mu.validator
def mu(self, value):
if np.any(value == -1):
raise InputParameterError(
"Zenithal perspective projection is not defined for mu = -1")
@property
def inverse(self):
return Pix2Sky_AZP(self.mu.value, self.gamma.value)
@classmethod
def evaluate(cls, phi, theta, mu, gamma):
return _projections.azps2x(
phi, theta, mu, _to_orig_unit(gamma))
Sky2Pix_AZP = Sky2Pix_ZenithalPerspective
class Pix2Sky_SlantZenithalPerspective(Pix2SkyProjection, Zenithal):
r"""
Slant zenithal perspective projection - pixel to sky.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
--------------
mu : float
Distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError(
"Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Sky2Pix_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, x, y, mu, phi0, theta0):
return _projections.szpx2s(
x, y, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Pix2Sky_SZP = Pix2Sky_SlantZenithalPerspective
class Sky2Pix_SlantZenithalPerspective(Sky2PixProjection, Zenithal):
r"""
Zenithal perspective projection - sky to pixel.
Corresponds to the ``SZP`` projection in FITS WCS.
Parameters
----------
mu : float
distance from point of projection to center of sphere
in spherical radii, μ. Default is 0.
phi0 : float
The longitude φ₀ of the reference point, in degrees. Default
is 0°.
theta0 : float
The latitude θ₀ of the reference point, in degrees. Default
is 90°.
"""
def _validate_mu(mu):
if np.asarray(mu == -1).any():
raise ValueError("Zenithal perspective projection is not defined for mu=-1")
return mu
mu = Parameter(default=0.0, setter=_validate_mu)
phi0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
theta0 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
@property
def inverse(self):
return Pix2Sky_SlantZenithalPerspective(
self.mu.value, self.phi0.value, self.theta0.value)
@classmethod
def evaluate(cls, phi, theta, mu, phi0, theta0):
return _projections.szps2x(
phi, theta, mu, _to_orig_unit(phi0), _to_orig_unit(theta0))
Sky2Pix_SZP = Sky2Pix_SlantZenithalPerspective
class Pix2Sky_Gnomonic(Pix2SkyProjection, Zenithal):
r"""
Gnomonic projection - pixel to sky.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = \tan^{-1}\left(\frac{180^{\circ}}{\pi R_\theta}\right)
"""
@property
def inverse(self):
return Sky2Pix_Gnomonic()
@classmethod
def evaluate(cls, x, y):
return _projections.tanx2s(x, y)
Pix2Sky_TAN = Pix2Sky_Gnomonic
class Sky2Pix_Gnomonic(Sky2PixProjection, Zenithal):
r"""
Gnomonic Projection - sky to pixel.
Corresponds to the ``TAN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cot \theta
"""
@property
def inverse(self):
return Pix2Sky_Gnomonic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tans2x(phi, theta)
Sky2Pix_TAN = Sky2Pix_Gnomonic
class Pix2Sky_Stereographic(Pix2SkyProjection, Zenithal):
r"""
Stereographic Projection - pixel to sky.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^{\circ} - 2 \tan^{-1}\left(\frac{\pi R_\theta}{360^{\circ}}\right)
"""
@property
def inverse(self):
return Sky2Pix_Stereographic()
@classmethod
def evaluate(cls, x, y):
return _projections.stgx2s(x, y)
Pix2Sky_STG = Pix2Sky_Stereographic
class Sky2Pix_Stereographic(Sky2PixProjection, Zenithal):
r"""
Stereographic Projection - sky to pixel.
Corresponds to the ``STG`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\frac{2 \cos \theta}{1 + \sin \theta}
"""
@property
def inverse(self):
return Pix2Sky_Stereographic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.stgs2x(phi, theta)
Sky2Pix_STG = Sky2Pix_Stereographic
class Pix2Sky_SlantOrthographic(Pix2SkyProjection, Zenithal):
r"""
Slant orthographic projection - pixel to sky.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
\theta = \cos^{-1}\left(\frac{\pi}{180^{\circ}}R_\theta\right)
The parameters :math:`\xi` and :math:`\eta` are defined from the
reference point :math:`(\phi_c, \theta_c)` as:
.. math::
\xi &= \cot \theta_c \sin \phi_c \\
\eta &= - \cot \theta_c \cos \phi_c
Parameters
----------
xi : float
Obliqueness parameter, ξ. Default is 0.0.
eta : float
Obliqueness parameter, η. Default is 0.0.
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Sky2Pix_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, x, y, xi, eta):
return _projections.sinx2s(x, y, xi, eta)
Pix2Sky_SIN = Pix2Sky_SlantOrthographic
class Sky2Pix_SlantOrthographic(Sky2PixProjection, Zenithal):
r"""
Slant orthographic projection - sky to pixel.
Corresponds to the ``SIN`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
The following transformation applies when :math:`\xi` and
:math:`\eta` are both zero.
.. math::
R_\theta = \frac{180^{\circ}}{\pi}\cos \theta
But more specifically are:
.. math::
x &= \frac{180^\circ}{\pi}[\cos \theta \sin \phi + \xi(1 - \sin \theta)] \\
y &= \frac{180^\circ}{\pi}[\cos \theta \cos \phi + \eta(1 - \sin \theta)]
"""
xi = Parameter(default=0.0)
eta = Parameter(default=0.0)
@property
def inverse(self):
return Pix2Sky_SlantOrthographic(self.xi.value, self.eta.value)
@classmethod
def evaluate(cls, phi, theta, xi, eta):
return _projections.sins2x(phi, theta, xi, eta)
Sky2Pix_SIN = Sky2Pix_SlantOrthographic
class Pix2Sky_ZenithalEquidistant(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - R_\theta
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEquidistant()
@classmethod
def evaluate(cls, x, y):
return _projections.arcx2s(x, y)
Pix2Sky_ARC = Pix2Sky_ZenithalEquidistant
class Sky2Pix_ZenithalEquidistant(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ARC`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = 90^\circ - \theta
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEquidistant()
@classmethod
def evaluate(cls, phi, theta):
return _projections.arcs2x(phi, theta)
Sky2Pix_ARC = Sky2Pix_ZenithalEquidistant
class Pix2Sky_ZenithalEqualArea(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
@property
def inverse(self):
return Sky2Pix_ZenithalEqualArea()
@classmethod
def evaluate(cls, x, y):
return _projections.zeax2s(x, y)
Pix2Sky_ZEA = Pix2Sky_ZenithalEqualArea
class Sky2Pix_ZenithalEqualArea(Sky2PixProjection, Zenithal):
r"""
Zenithal equidistant projection - sky to pixel.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta &= \frac{180^\circ}{\pi} \sqrt{2(1 - \sin\theta)} \\
&= \frac{360^\circ}{\pi} \sin\left(\frac{90^\circ - \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_ZenithalEqualArea()
@classmethod
def evaluate(cls, phi, theta):
return _projections.zeas2x(phi, theta)
Sky2Pix_ZEA = Sky2Pix_ZenithalEqualArea
class Pix2Sky_Airy(Pix2SkyProjection, Zenithal):
r"""
Airy projection - pixel to sky.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Sky2Pix_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, x, y, theta_b):
return _projections.airx2s(x, y, theta_b)
Pix2Sky_AIR = Pix2Sky_Airy
class Sky2Pix_Airy(Sky2PixProjection, Zenithal):
r"""
Airy - sky to pixel.
Corresponds to the ``AIR`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
R_\theta = -2 \frac{180^\circ}{\pi}\left(\frac{\ln(\cos \xi)}{\tan \xi} + \frac{\ln(\cos \xi_b)}{\tan^2 \xi_b} \tan \xi \right)
where:
.. math::
\xi &= \frac{90^\circ - \theta}{2} \\
\xi_b &= \frac{90^\circ - \theta_b}{2}
Parameters
----------
theta_b : float
The latitude :math:`\theta_b` at which to minimize the error,
in degrees. Default is 90°.
"""
theta_b = Parameter(default=90.0)
@property
def inverse(self):
return Pix2Sky_Airy(self.theta_b.value)
@classmethod
def evaluate(cls, phi, theta, theta_b):
return _projections.airs2x(phi, theta, theta_b)
Sky2Pix_AIR = Sky2Pix_Airy
class Cylindrical(Projection):
r"""Base class for Cylindrical projections.
Cylindrical projections are so-named because the surface of
projection is a cylinder.
"""
_separable = True
class Pix2Sky_CylindricalPerspective(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical perspective - pixel to sky.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\lambda} \\
\theta &= \arg(1, \eta) + \sin{-1}\left(\frac{\eta \mu}{\sqrt{\eta^2 + 1}}\right)
where:
.. math::
\eta = \frac{\pi}{180^{\circ}}\frac{y}{\mu + \lambda}
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 1.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 1.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Sky2Pix_CylindricalPerspective(self.mu.value, self.lam.value)
@classmethod
def evaluate(cls, x, y, mu, lam):
return _projections.cypx2s(x, y, mu, lam)
Pix2Sky_CYP = Pix2Sky_CylindricalPerspective
class Sky2Pix_CylindricalPerspective(Sky2PixProjection, Cylindrical):
r"""
Cylindrical Perspective - sky to pixel.
Corresponds to the ``CYP`` projection in FITS WCS.
.. math::
x &= \lambda \phi \\
y &= \frac{180^{\circ}}{\pi}\left(\frac{\mu + \lambda}{\mu + \cos \theta}\right)\sin \theta
Parameters
----------
mu : float
Distance from center of sphere in the direction opposite the
projected surface, in spherical radii, μ. Default is 0.
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
mu = Parameter(default=1.0)
lam = Parameter(default=1.0)
@mu.validator
def mu(self, value):
if np.any(value == -self.lam):
raise InputParameterError(
"CYP projection is not defined for mu = -lambda")
@lam.validator
def lam(self, value):
if np.any(value == -self.mu):
raise InputParameterError(
"CYP projection is not defined for lambda = -mu")
@property
def inverse(self):
return Pix2Sky_CylindricalPerspective(self.mu, self.lam)
@classmethod
def evaluate(cls, phi, theta, mu, lam):
return _projections.cyps2x(phi, theta, mu, lam)
Sky2Pix_CYP = Sky2Pix_CylindricalPerspective
class Pix2Sky_CylindricalEqualArea(Pix2SkyProjection, Cylindrical):
r"""
Cylindrical equal area projection - pixel to sky.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^{\circ}}\lambda y\right)
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Sky2Pix_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, x, y, lam):
return _projections.ceax2s(x, y, lam)
Pix2Sky_CEA = Pix2Sky_CylindricalEqualArea
class Sky2Pix_CylindricalEqualArea(Sky2PixProjection, Cylindrical):
r"""
Cylindrical equal area projection - sky to pixel.
Corresponds to the ``CEA`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\frac{\sin \theta}{\lambda}
Parameters
----------
lam : float
Radius of the cylinder in spherical radii, λ. Default is 0.
"""
lam = Parameter(default=1)
@property
def inverse(self):
return Pix2Sky_CylindricalEqualArea(self.lam)
@classmethod
def evaluate(cls, phi, theta, lam):
return _projections.ceas2x(phi, theta, lam)
Sky2Pix_CEA = Sky2Pix_CylindricalEqualArea
class Pix2Sky_PlateCarree(Pix2SkyProjection, Cylindrical):
r"""
Plate carrée projection - pixel to sky.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_PlateCarree()
@staticmethod
def evaluate(x, y):
# The intermediate variables are only used here for clarity
phi = np.array(x, copy=True)
theta = np.array(y, copy=True)
return phi, theta
Pix2Sky_CAR = Pix2Sky_PlateCarree
class Sky2Pix_PlateCarree(Sky2PixProjection, Cylindrical):
r"""
Plate carrée projection - sky to pixel.
Corresponds to the ``CAR`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_PlateCarree()
@staticmethod
def evaluate(phi, theta):
# The intermediate variables are only used here for clarity
x = np.array(phi, copy=True)
y = np.array(theta, copy=True)
return x, y
Sky2Pix_CAR = Sky2Pix_PlateCarree
class Pix2Sky_Mercator(Pix2SkyProjection, Cylindrical):
r"""
Mercator - pixel to sky.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
\phi &= x \\
\theta &= 2 \tan^{-1}\left(e^{y \pi / 180^{\circ}}\right)-90^{\circ}
"""
@property
def inverse(self):
return Sky2Pix_Mercator()
@classmethod
def evaluate(cls, x, y):
return _projections.merx2s(x, y)
Pix2Sky_MER = Pix2Sky_Mercator
class Sky2Pix_Mercator(Sky2PixProjection, Cylindrical):
r"""
Mercator - sky to pixel.
Corresponds to the ``MER`` projection in FITS WCS.
.. math::
x &= \phi \\
y &= \frac{180^{\circ}}{\pi}\ln \tan \left(\frac{90^{\circ} + \theta}{2}\right)
"""
@property
def inverse(self):
return Pix2Sky_Mercator()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mers2x(phi, theta)
Sky2Pix_MER = Sky2Pix_Mercator
class PseudoCylindrical(Projection):
r"""Base class for pseudocylindrical projections.
Pseudocylindrical projections are like cylindrical projections
except the parallels of latitude are projected at diminishing
lengths toward the polar regions in order to reduce lateral
distortion there. Consequently, the meridians are curved.
"""
_separable = True
class Pix2Sky_SansonFlamsteed(Pix2SkyProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - pixel to sky.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
\phi &= \frac{x}{\cos y} \\
\theta &= y
"""
@property
def inverse(self):
return Sky2Pix_SansonFlamsteed()
@classmethod
def evaluate(cls, x, y):
return _projections.sflx2s(x, y)
Pix2Sky_SFL = Pix2Sky_SansonFlamsteed
class Sky2Pix_SansonFlamsteed(Sky2PixProjection, PseudoCylindrical):
r"""
Sanson-Flamsteed projection - sky to pixel.
Corresponds to the ``SFL`` projection in FITS WCS.
.. math::
x &= \phi \cos \theta \\
y &= \theta
"""
@property
def inverse(self):
return Pix2Sky_SansonFlamsteed()
@classmethod
def evaluate(cls, phi, theta):
return _projections.sfls2x(phi, theta)
Sky2Pix_SFL = Sky2Pix_SansonFlamsteed
class Pix2Sky_Parabolic(Pix2SkyProjection, PseudoCylindrical):
r"""
Parabolic projection - pixel to sky.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
\phi &= \frac{180^\circ}{\pi} \frac{x}{1 - 4(y / 180^\circ)^2} \\
\theta &= 3 \sin^{-1}\left(\frac{y}{180^\circ}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Parabolic()
@classmethod
def evaluate(cls, x, y):
return _projections.parx2s(x, y)
Pix2Sky_PAR = Pix2Sky_Parabolic
class Sky2Pix_Parabolic(Sky2PixProjection, PseudoCylindrical):
r"""
Parabolic projection - sky to pixel.
Corresponds to the ``PAR`` projection in FITS WCS.
.. math::
x &= \phi \left(2\cos\frac{2\theta}{3} - 1\right) \\
y &= 180^\circ \sin \frac{\theta}{3}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Parabolic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pars2x(phi, theta)
Sky2Pix_PAR = Sky2Pix_Parabolic
class Pix2Sky_Molleweide(Pix2SkyProjection, PseudoCylindrical):
r"""
Molleweide's projection - pixel to sky.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi x}{2 \sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}} \\
\theta &= \sin^{-1}\left(\frac{1}{90^\circ}\sin^{-1}\left(\frac{\pi}{180^\circ}\frac{y}{\sqrt{2}}\right) + \frac{y}{180^\circ}\sqrt{2 - \left(\frac{\pi}{180^\circ}y\right)^2}\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Molleweide()
@classmethod
def evaluate(cls, x, y):
return _projections.molx2s(x, y)
Pix2Sky_MOL = Pix2Sky_Molleweide
class Sky2Pix_Molleweide(Sky2PixProjection, PseudoCylindrical):
r"""
Molleweide's projection - sky to pixel.
Corresponds to the ``MOL`` projection in FITS WCS.
.. math::
x &= \frac{2 \sqrt{2}}{\pi} \phi \cos \gamma \\
y &= \sqrt{2} \frac{180^\circ}{\pi} \sin \gamma
where :math:`\gamma` is defined as the solution of the
transcendental equation:
.. math::
\sin \theta = \frac{\gamma}{90^\circ} + \frac{\sin 2 \gamma}{\pi}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Molleweide()
@classmethod
def evaluate(cls, phi, theta):
return _projections.mols2x(phi, theta)
Sky2Pix_MOL = Sky2Pix_Molleweide
class Pix2Sky_HammerAitoff(Pix2SkyProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - pixel to sky.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
\phi &= 2 \arg \left(2Z^2 - 1, \frac{\pi}{180^\circ} \frac{Z}{2}x\right) \\
\theta &= \sin^{-1}\left(\frac{\pi}{180^\circ}yZ\right)
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HammerAitoff()
@classmethod
def evaluate(cls, x, y):
return _projections.aitx2s(x, y)
Pix2Sky_AIT = Pix2Sky_HammerAitoff
class Sky2Pix_HammerAitoff(Sky2PixProjection, PseudoCylindrical):
r"""
Hammer-Aitoff projection - sky to pixel.
Corresponds to the ``AIT`` projection in FITS WCS.
.. math::
x &= 2 \gamma \cos \theta \sin \frac{\phi}{2} \\
y &= \gamma \sin \theta
where:
.. math::
\gamma = \frac{180^\circ}{\pi} \sqrt{\frac{2}{1 + \cos \theta \cos(\phi / 2)}}
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HammerAitoff()
@classmethod
def evaluate(cls, phi, theta):
return _projections.aits2x(phi, theta)
Sky2Pix_AIT = Sky2Pix_HammerAitoff
class Conic(Projection):
r"""Base class for conic projections.
In conic projections, the sphere is thought to be projected onto
the surface of a cone which is then opened out.
In a general sense, the pixel-to-sky transformation is defined as:
.. math::
\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right) / C \\
R_\theta &= \mathrm{sign} \theta_a \sqrt{x^2 + (Y_0 - y)^2}
and the inverse (sky-to-pixel) is defined as:
.. math::
x &= R_\theta \sin (C \phi) \\
y &= R_\theta \cos (C \phi) + Y_0
where :math:`C` is the "constant of the cone":
.. math::
C = \frac{180^\circ \cos \theta}{\pi R_\theta}
"""
sigma = Parameter(default=90.0, getter=_to_orig_unit, setter=_to_radian)
delta = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = False
class Pix2Sky_ConicPerspective(Pix2SkyProjection, Conic):
r"""
Colles' conic perspective projection - pixel to sky.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.copx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COP = Pix2Sky_ConicPerspective
class Sky2Pix_ConicPerspective(Sky2PixProjection, Conic):
r"""
Colles' conic perspective projection - sky to pixel.
Corresponds to the ``COP`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \sin \theta_a \\
R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\
Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicPerspective(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cops2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COP = Sky2Pix_ConicPerspective
class Pix2Sky_ConicEqualArea(Pix2SkyProjection, Conic):
r"""
Alber's conic equal area projection - pixel to sky.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coex2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COE = Pix2Sky_ConicEqualArea
class Sky2Pix_ConicEqualArea(Sky2PixProjection, Conic):
r"""
Alber's conic equal area projection - sky to pixel.
Corresponds to the ``COE`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \gamma / 2 \\
R_\theta &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin \theta} \\
Y_0 &= \frac{180^\circ}{\pi} \frac{2}{\gamma} \sqrt{1 + \sin \theta_1 \sin \theta_2 - \gamma \sin((\theta_1 + \theta_2)/2)}
where:
.. math::
\gamma = \sin \theta_1 + \sin \theta_2
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEqualArea(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coes2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COE = Sky2Pix_ConicEqualArea
class Pix2Sky_ConicEquidistant(Pix2SkyProjection, Conic):
r"""
Conic equidistant projection - pixel to sky.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.codx2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COD = Pix2Sky_ConicEquidistant
class Sky2Pix_ConicEquidistant(Sky2PixProjection, Conic):
r"""
Conic equidistant projection - sky to pixel.
Corresponds to the ``COD`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{180^\circ}{\pi} \frac{\sin\theta_a\sin\eta}{\eta} \\
R_\theta &= \theta_a - \theta + \eta\cot\eta\cot\theta_a \\
Y_0 = \eta\cot\eta\cot\theta_a
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicEquidistant(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.cods2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COD = Sky2Pix_ConicEquidistant
class Pix2Sky_ConicOrthomorphic(Pix2SkyProjection, Conic):
r"""
Conic orthomorphic projection - pixel to sky.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Sky2Pix_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, x, y, sigma, delta):
return _projections.coox2s(x, y, _to_orig_unit(sigma), _to_orig_unit(delta))
Pix2Sky_COO = Pix2Sky_ConicOrthomorphic
class Sky2Pix_ConicOrthomorphic(Sky2PixProjection, Conic):
r"""
Conic orthomorphic projection - sky to pixel.
Corresponds to the ``COO`` projection in FITS WCS.
See `Conic` for a description of the entire equation.
The projection formulæ are:
.. math::
C &= \frac{\ln \left( \frac{\cos\theta_2}{\cos\theta_1} \right)}
{\ln \left[ \frac{\tan\left(\frac{90^\circ-\theta_2}{2}\right)}
{\tan\left(\frac{90^\circ-\theta_1}{2}\right)} \right] } \\
R_\theta &= \psi \left[ \tan \left( \frac{90^\circ - \theta}{2} \right) \right]^C \\
Y_0 &= \psi \left[ \tan \left( \frac{90^\circ - \theta_a}{2} \right) \right]^C
where:
.. math::
\psi = \frac{180^\circ}{\pi} \frac{\cos \theta}
{C\left[\tan\left(\frac{90^\circ-\theta}{2}\right)\right]^C}
Parameters
----------
sigma : float
:math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 90.
delta : float
:math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and
:math:`\theta_2` are the latitudes of the standard parallels,
in degrees. Default is 0.
"""
@property
def inverse(self):
return Pix2Sky_ConicOrthomorphic(self.sigma.value, self.delta.value)
@classmethod
def evaluate(cls, phi, theta, sigma, delta):
return _projections.coos2x(phi, theta,
_to_orig_unit(sigma), _to_orig_unit(delta))
Sky2Pix_COO = Sky2Pix_ConicOrthomorphic
class PseudoConic(Projection):
r"""Base class for pseudoconic projections.
Pseudoconics are a subclass of conics with concentric parallels.
"""
class Pix2Sky_BonneEqualArea(Pix2SkyProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - pixel to sky.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
\phi &= \frac{\pi}{180^\circ} A_\phi R_\theta / \cos \theta \\
\theta &= Y_0 - R_\theta
where:
.. math::
R_\theta &= \mathrm{sign} \theta_1 \sqrt{x^2 + (Y_0 - y)^2} \\
A_\phi &= \arg\left(\frac{Y_0 - y}{R_\theta}, \frac{x}{R_\theta}\right)
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Sky2Pix_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, x, y, theta1):
return _projections.bonx2s(x, y, _to_orig_unit(theta1))
Pix2Sky_BON = Pix2Sky_BonneEqualArea
class Sky2Pix_BonneEqualArea(Sky2PixProjection, PseudoConic):
r"""
Bonne's equal area pseudoconic projection - sky to pixel.
Corresponds to the ``BON`` projection in FITS WCS.
.. math::
x &= R_\theta \sin A_\phi \\
y &= -R_\theta \cos A_\phi + Y_0
where:
.. math::
A_\phi &= \frac{180^\circ}{\pi R_\theta} \phi \cos \theta \\
R_\theta &= Y_0 - \theta \\
Y_0 &= \frac{180^\circ}{\pi} \cot \theta_1 + \theta_1
Parameters
----------
theta1 : float
Bonne conformal latitude, in degrees.
"""
theta1 = Parameter(default=0.0, getter=_to_orig_unit, setter=_to_radian)
_separable = True
@property
def inverse(self):
return Pix2Sky_BonneEqualArea(self.theta1.value)
@classmethod
def evaluate(cls, phi, theta, theta1):
return _projections.bons2x(phi, theta,
_to_orig_unit(theta1))
Sky2Pix_BON = Sky2Pix_BonneEqualArea
class Pix2Sky_Polyconic(Pix2SkyProjection, PseudoConic):
r"""
Polyconic projection - pixel to sky.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_Polyconic()
@classmethod
def evaluate(cls, x, y):
return _projections.pcox2s(x, y)
Pix2Sky_PCO = Pix2Sky_Polyconic
class Sky2Pix_Polyconic(Sky2PixProjection, PseudoConic):
r"""
Polyconic projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_Polyconic()
@classmethod
def evaluate(cls, phi, theta):
return _projections.pcos2x(phi, theta)
Sky2Pix_PCO = Sky2Pix_Polyconic
class QuadCube(Projection):
r"""Base class for quad cube projections.
Quadrilateralized spherical cube (quad-cube) projections belong to
the class of polyhedral projections in which the sphere is
projected onto the surface of an enclosing polyhedron.
The six faces of the quad-cube projections are numbered and laid
out as::
0
4 3 2 1 4 3 2
5
"""
class Pix2Sky_TangentialSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_TangentialSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.tscx2s(x, y)
Pix2Sky_TSC = Pix2Sky_TangentialSphericalCube
class Sky2Pix_TangentialSphericalCube(Sky2PixProjection, QuadCube):
r"""
Tangential spherical cube projection - sky to pixel.
Corresponds to the ``PCO`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_TangentialSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.tscs2x(phi, theta)
Sky2Pix_TSC = Sky2Pix_TangentialSphericalCube
class Pix2Sky_COBEQuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.cscx2s(x, y)
Pix2Sky_CSC = Pix2Sky_COBEQuadSphericalCube
class Sky2Pix_COBEQuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_COBEQuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.cscs2x(phi, theta)
Sky2Pix_CSC = Sky2Pix_COBEQuadSphericalCube
class Pix2Sky_QuadSphericalCube(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_QuadSphericalCube()
@classmethod
def evaluate(cls, x, y):
return _projections.qscx2s(x, y)
Pix2Sky_QSC = Pix2Sky_QuadSphericalCube
class Sky2Pix_QuadSphericalCube(Sky2PixProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_QuadSphericalCube()
@classmethod
def evaluate(cls, phi, theta):
return _projections.qscs2x(phi, theta)
Sky2Pix_QSC = Sky2Pix_QuadSphericalCube
class HEALPix(Projection):
r"""Base class for HEALPix projections.
"""
class Pix2Sky_HEALPix(Pix2SkyProjection, HEALPix):
r"""
HEALPix - pixel to sky.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Sky2Pix_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, x, y, H, X):
return _projections.hpxx2s(x, y, H, X)
Pix2Sky_HPX = Pix2Sky_HEALPix
class Sky2Pix_HEALPix(Sky2PixProjection, HEALPix):
r"""
HEALPix projection - sky to pixel.
Corresponds to the ``HPX`` projection in FITS WCS.
Parameters
----------
H : float
The number of facets in longitude direction.
X : float
The number of facets in latitude direction.
"""
_separable = True
H = Parameter(default=4.0)
X = Parameter(default=3.0)
@property
def inverse(self):
return Pix2Sky_HEALPix(self.H.value, self.X.value)
@classmethod
def evaluate(cls, phi, theta, H, X):
return _projections.hpxs2x(phi, theta, H, X)
Sky2Pix_HPX = Sky2Pix_HEALPix
class Pix2Sky_HEALPixPolar(Pix2SkyProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Sky2Pix_HEALPix()
@classmethod
def evaluate(cls, x, y):
return _projections.xphx2s(x, y)
Pix2Sky_XPH = Pix2Sky_HEALPixPolar
class Sky2Pix_HEALPixPolar(Sky2PixProjection, HEALPix):
r"""
HEALPix polar, aka "butterfly" projection - pixel to sky.
Corresponds to the ``XPH`` projection in FITS WCS.
"""
_separable = False
@property
def inverse(self):
return Pix2Sky_HEALPix()
@classmethod
def evaluate(cls, phi, theta):
return _projections.hpxs2x(phi, theta)
Sky2Pix_XPH = Sky2Pix_HEALPixPolar
class AffineTransformation2D(Model):
"""
Perform an affine transformation in 2 dimensions.
Parameters
----------
matrix : array
A 2x2 matrix specifying the linear transformation to apply to the
inputs
translation : array
A 2D vector (given as either a 2x1 or 1x2 array) specifying a
translation to apply to the inputs
"""
inputs = ('x', 'y')
outputs = ('x', 'y')
standard_broadcasting = False
_separable = False
matrix = Parameter(default=[[1.0, 0.0], [0.0, 1.0]])
translation = Parameter(default=[0.0, 0.0])
@matrix.validator
def matrix(self, value):
"""Validates that the input matrix is a 2x2 2D array."""
if np.shape(value) != (2, 2):
raise InputParameterError(
"Expected transformation matrix to be a 2x2 array")
@translation.validator
def translation(self, value):
"""
Validates that the translation vector is a 2D vector. This allows
either a "row" vector or a "column" vector where in the latter case the
resultant Numpy array has ``ndim=2`` but the shape is ``(1, 2)``.
"""
if not ((np.ndim(value) == 1 and np.shape(value) == (2,)) or
(np.ndim(value) == 2 and np.shape(value) == (1, 2))):
raise InputParameterError(
"Expected translation vector to be a 2 element row or column "
"vector array")
@property
def inverse(self):
"""
Inverse transformation.
Raises `~astropy.modeling.InputParameterError` if the transformation cannot be inverted.
"""
det = np.linalg.det(self.matrix.value)
if det == 0:
raise InputParameterError(
"Transformation matrix is singular; {0} model does not "
"have an inverse".format(self.__class__.__name__))
matrix = np.linalg.inv(self.matrix.value)
if self.matrix.unit is not None:
matrix = matrix * self.matrix.unit
# If matrix has unit then translation has unit, so no need to assign it.
translation = -np.dot(matrix, self.translation.value)
return self.__class__(matrix=matrix, translation=translation)
@classmethod
def evaluate(cls, x, y, matrix, translation):
"""
Apply the transformation to a set of 2D Cartesian coordinates given as
two lists--one for the x coordinates and one for a y coordinates--or a
single coordinate pair.
Parameters
----------
x, y : array, float
x and y coordinates
"""
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
shape = x.shape or (1,)
inarr = np.vstack([x.flatten(), y.flatten(), np.ones(x.size)])
if inarr.shape[0] != 3 or inarr.ndim != 2:
raise ValueError("Incompatible input shapes")
augmented_matrix = cls._create_augmented_matrix(matrix, translation)
result = np.dot(augmented_matrix, inarr)
x, y = result[0], result[1]
x.shape = y.shape = shape
return x, y
@staticmethod
def _create_augmented_matrix(matrix, translation):
unit = None
if any([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
if not all([hasattr(translation, 'unit'), hasattr(matrix, 'unit')]):
raise ValueError("To use AffineTransformation with quantities, "
"both matrix and unit need to be quantities.")
unit = translation.unit
# matrix should have the same units as translation
if not (matrix.unit / translation.unit) == u.dimensionless_unscaled:
raise ValueError("matrix and translation must have the same units.")
augmented_matrix = np.empty((3, 3), dtype=float)
augmented_matrix[0:2, 0:2] = matrix
augmented_matrix[0:2, 2:].flat = translation
augmented_matrix[2] = [0, 0, 1]
if unit is not None:
return augmented_matrix * unit
else:
return augmented_matrix
@property
def input_units(self):
if self.translation.unit is None and self.matrix.unit is None:
return None
elif self.translation.unit is not None:
return {'x': self.translation.unit,
'y': self.translation.unit
}
else:
return {'x': self.matrix.unit,
'y': self.matrix.unit
}
|
9a90ca5eabae74e6c1b92f532ea899b8b0d54605008bbff7ac3ed1b3cbf60b73 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains models representing polynomials and polynomial series.
"""
from collections import OrderedDict
import numpy as np
from .core import FittableModel, Model
from .functional_models import Shift
from .parameters import Parameter
from .utils import poly_map_domain, comb
from ..utils import indent, check_broadcast
from ..units import Quantity
__all__ = [
'Chebyshev1D', 'Chebyshev2D', 'Hermite1D', 'Hermite2D',
'InverseSIP', 'Legendre1D', 'Legendre2D', 'Polynomial1D',
'Polynomial2D', 'SIP', 'OrthoPolynomialBase',
'PolynomialModel'
]
class PolynomialBase(FittableModel):
"""
Base class for all polynomial-like models with an arbitrary number of
parameters in the form of coefficients.
In this case Parameter instances are returned through the class's
``__getattr__`` rather than through class descriptors.
"""
# Default _param_names list; this will be filled in by the implementation's
# __init__
_param_names = ()
linear = True
col_fit_deriv = False
@property
def param_names(self):
"""Coefficient names generated based on the model's polynomial degree
and number of dimensions.
Subclasses should implement this to return parameter names in the
desired format.
On most `Model` classes this is a class attribute, but for polynomial
models it is an instance attribute since each polynomial model instance
can have different parameters depending on the degree of the polynomial
and the number of dimensions, for example.
"""
return self._param_names
def __getattr__(self, attr):
if self._param_names and attr in self._param_names:
return Parameter(attr, default=0.0, model=self)
raise AttributeError(attr)
def __setattr__(self, attr, value):
# TODO: Support a means of specifying default values for coefficients
# Check for self._ndim first--if it hasn't been defined then the
# instance hasn't been initialized yet and self.param_names probably
# won't work.
# This has to vaguely duplicate the functionality of
# Parameter.__set__.
# TODO: I wonder if there might be a way around that though...
if attr[0] != '_' and self._param_names and attr in self._param_names:
param = Parameter(attr, default=0.0, model=self)
# This is a little hackish, but we can actually reuse the
# Parameter.__set__ method here
param.__set__(self, value)
else:
super().__setattr__(attr, value)
class PolynomialModel(PolynomialBase):
"""
Base class for polynomial models.
Its main purpose is to determine how many coefficients are needed
based on the polynomial order and dimension and to provide their
default values, names and ordering.
"""
def __init__(self, degree, n_models=None, model_set_axis=None,
name=None, meta=None, **params):
self._degree = degree
self._order = self.get_num_coeff(self.n_inputs)
self._param_names = self._generate_coeff_names(self.n_inputs)
super().__init__(
n_models=n_models, model_set_axis=model_set_axis, name=name,
meta=meta, **params)
def __repr__(self):
return self._format_repr([self.degree])
def __str__(self):
return self._format_str([('Degree', self.degree)])
@property
def degree(self):
"""Degree of polynomial."""
return self._degree
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one parameter set
"""
if self.degree < 0:
raise ValueError("Degree of polynomial must be positive or null")
# deg+1 is used to account for the difference between iraf using
# degree and numpy using exact degree
if ndim != 1:
nmixed = comb(self.degree, ndim)
else:
nmixed = 0
numc = self.degree * ndim + nmixed + 1
return numc
def _invlex(self):
c = []
lencoeff = self.degree + 1
for i in range(lencoeff):
for j in range(lencoeff):
if i + j <= self.degree:
c.append((j, i))
return c[::-1]
def _generate_coeff_names(self, ndim):
names = []
if ndim == 1:
for n in range(self._order):
names.append('c{0}'.format(n))
else:
for i in range(self.degree + 1):
names.append('c{0}_{1}'.format(i, 0))
for i in range(1, self.degree + 1):
names.append('c{0}_{1}'.format(0, i))
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j < self.degree + 1:
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
class OrthoPolynomialBase(PolynomialBase):
"""
This is a base class for the 2D Chebyshev and Legendre models.
The polynomials implemented here require a maximum degree in x and y.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
x_window : list or None, optional
range of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
{keyword: value} pairs, representing {parameter_name: value}
"""
inputs = ('x', 'y')
outputs = ('z',)
def __init__(self, x_degree, y_degree, x_domain=None, x_window=None,
y_domain=None, y_window=None, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
# TODO: Perhaps some of these other parameters should be properties?
# TODO: An awful lot of the functionality in this method is still
# shared by PolynomialModel; perhaps some of it can be generalized in
# PolynomialBase
self.x_degree = x_degree
self.y_degree = y_degree
self._order = self.get_num_coeff()
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
self._param_names = self._generate_coeff_names()
super().__init__(
n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr([self.x_degree, self.y_degree])
def __str__(self):
return self._format_str(
[('X-Degree', self.x_degree),
('Y-Degree', self.y_degree)])
def get_num_coeff(self):
"""
Determine how many coefficients are needed
Returns
-------
numc : int
number of coefficients
"""
return (self.x_degree + 1) * (self.y_degree + 1)
def _invlex(self):
# TODO: This is a very slow way to do this; fix it and related methods
# like _alpha
c = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
c.append((i, j))
return np.array(c[::-1])
def invlex_coeff(self, coeffs):
invlex_coeffs = []
xvar = np.arange(self.x_degree + 1)
yvar = np.arange(self.y_degree + 1)
for j in yvar:
for i in xvar:
name = 'c{0}_{1}'.format(i, j)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return np.array(invlex_coeffs[::-1])
def _alpha(self):
invlexdeg = self._invlex()
invlexdeg[:, 1] = invlexdeg[:, 1] + self.x_degree + 1
nx = self.x_degree + 1
ny = self.y_degree + 1
alpha = np.zeros((ny * nx + 3, ny + nx))
for n in range(len(invlexdeg)):
alpha[n][invlexdeg[n]] = [1, 1]
alpha[-2, 0] = 1
alpha[-3, nx] = 1
return alpha
def imhorner(self, x, y, coeff):
_coeff = list(coeff)
_coeff.extend([0, 0, 0])
alpha = self._alpha()
r0 = _coeff[0]
nalpha = len(alpha)
karr = np.diff(alpha, axis=0)
kfunc = self._fcache(x, y)
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
nterms = x_terms + y_terms
for n in range(1, nterms + 1 + 3):
setattr(self, 'r' + str(n), 0.)
for n in range(1, nalpha):
k = karr[n - 1].nonzero()[0].max() + 1
rsum = 0
for i in range(1, k + 1):
rsum = rsum + getattr(self, 'r' + str(i))
val = kfunc[k - 1] * (r0 + rsum)
setattr(self, 'r' + str(k), val)
r0 = _coeff[n]
for i in range(1, k):
setattr(self, 'r' + str(i), 0.)
result = r0
for i in range(1, nterms + 1 + 3):
result = result + getattr(self, 'r' + str(i))
return result
def _generate_coeff_names(self):
names = []
for j in range(self.y_degree + 1):
for i in range(self.x_degree + 1):
names.append('c{0}_{1}'.format(i, j))
return tuple(names)
def _fcache(self, x, y):
# TODO: Write a docstring explaining the actual purpose of this method
"""To be implemented by subclasses"""
raise NotImplementedError("Subclasses should implement this")
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
return self.imhorner(x, y, invcoeff)
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
class Chebyshev1D(PolynomialModel):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
class Hermite1D(PolynomialModel):
r"""
Univariate Hermite series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * H_{i}(x)
where ``H_i(x)`` is the corresponding Hermite polynomial ("Physicist's kind").
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Hermite polynomial (H2) is 4x^2-2, but if x was specified with units,
4x^2 and -2 would have incompatible units.
"""
inputs = ('x')
outputs = ('y')
_separable = True
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = 2 * x
for i in range(2, self.degree + 1):
v[i] = x2 * v[i - 1] - 2 * (i - 1) * v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
x2 = x * 2
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
temp = c0
nd = nd - 1
c0 = coeffs[-i] - c1 * (2 * (nd - 1))
c1 = temp + c1 * x2
return c0 + c1 * x2
class Hermite2D(OrthoPolynomialBase):
r"""
Bivariate Hermite series.
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} H_n(x) H_m(y)
where ``H_n(x)`` and ``H_m(y)`` are Hermite polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Hermite polynomials is a polynomial in x and/or y - since the
coefficients within each Hermite polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Hermite polynomial (H2) is 4x^2-2, but if x was
specified with units, 4x^2 and -2 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Hermite functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = 2 * x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = 2 * y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - 2 * (n - 1) * kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Hermite polynomials:
.. math::
H_{x_0}H_{y_0}, H_{x_1}H_{y_0}...H_{x_n}H_{y_0}...H_{x_n}H_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._hermderiv1d(x, self.x_degree + 1).T
y_deriv = self._hermderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _hermderiv1d(self, x, deg):
"""
Derivative of 1D Hermite series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x2
for i in range(2, deg + 1):
d[i] = x2 * d[i - 1] - 2 * (i - 1) * d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre1D(PolynomialModel):
r"""
Univariate Legendre series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_i(x)`` is the corresponding Legendre polynomial.
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
inputs = ('x',)
outputs = ('y',)
_separable = False
def __init__(self, degree, domain=None, window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = \
super(PolynomialModel, self).prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = (v[i - 1] * x * (2 * i - 1) - v[i - 2] * (i - 1)) / i
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def clenshaw(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
nd = len(coeffs)
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
nd = nd - 1
c0 = coeffs[-i] - (c1 * (nd - 1)) / nd
c1 = tmp + (c1 * x * (2 * nd - 1)) / nd
return c0 + c1 * x
class Polynomial1D(PolynomialModel):
r"""
1D Polynomial model.
It is defined as:
.. math::
P = \sum_{i=0}^{i=n}C_{i} * x^{i}
Parameters
----------
degree : int
degree of the series
domain : list or None, optional
window : list or None, optional
If None, it is set to [-1,1]
Fitters will remap the domain to this window
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
def __init__(self, degree, domain=[-1, 1], window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.domain = domain
self.window = window
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def prepare_inputs(self, x, **kwargs):
inputs, format_info = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), format_info
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.horner(x, coeffs)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
v = np.empty((self.degree + 1,) + x.shape, dtype=float)
v[0] = 1
if self.degree > 0:
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x
return np.rollaxis(v, 0, v.ndim)
@staticmethod
def horner(x, coeffs):
if len(coeffs) == 1:
c0 = coeffs[-1] * np.ones_like(x, subok=False)
else:
c0 = coeffs[-1]
for i in range(2, len(coeffs) + 1):
c0 = coeffs[-i] + c0 * x
return c0
@property
def input_units(self):
if self.degree == 0 or self.c1.unit is None:
return None
else:
return {'x': self.c0.unit / self.c1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
par = getattr(self, 'c{0}'.format(i))
mapping.append((par.name, outputs_unit['y'] / inputs_unit['x'] ** i))
return OrderedDict(mapping)
class Polynomial2D(PolynomialModel):
"""
2D Polynomial model.
Represents a general polynomial of degree n:
.. math::
P(x,y) = c_{00} + c_{10}x + ...+ c_{n0}x^n + c_{01}y + ...+ c_{0n}y^n
+ c_{11}xy + c_{12}xy^2 + ... + c_{1(n-1)}xy^{n-1}+ ... + c_{(n-1)1}x^{n-1}y
Parameters
----------
degree : int
highest power of the polynomial,
the number of terms is degree+1
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
"""
inputs = ('x', 'y')
outputs = ('z',)
_separable = False
def __init__(self, degree, x_domain=[-1, 1], y_domain=[-1, 1],
x_window=[-1, 1], y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
degree, n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
self.x_domain = x_domain
self.y_domain = y_domain
self.x_window = x_window
self.y_window = y_window
def prepare_inputs(self, x, y, **kwargs):
inputs, format_info = super().prepare_inputs(x, y, **kwargs)
x, y = inputs
if x.shape != y.shape:
raise ValueError("Expected input arrays to have the same shape")
return (x, y), format_info
def evaluate(self, x, y, *coeffs):
if self.x_domain is not None:
x = poly_map_domain(x, self.x_domain, self.x_window)
if self.y_domain is not None:
y = poly_map_domain(y, self.y_domain, self.y_window)
invcoeff = self.invlex_coeff(coeffs)
result = self.multivariate_horner(x, y, invcoeff)
# Special case for degree==0 to ensure that the shape of the output is
# still as expected by the broadcasting rules, even though the x and y
# inputs are not used in the evaluation
if self.degree == 0:
output_shape = check_broadcast(np.shape(coeffs[0]), x.shape)
if output_shape:
new_result = np.empty(output_shape)
new_result[:] = result
result = new_result
return result
def fit_deriv(self, x, y, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.ndim == 2:
x = x.flatten()
if y.ndim == 2:
y = y.flatten()
if x.size != y.size:
raise ValueError('Expected x and y to be of equal size')
designx = x[:, None] ** np.arange(self.degree + 1)
designy = y[:, None] ** np.arange(1, self.degree + 1)
designmixed = []
for i in range(1, self.degree):
for j in range(1, self.degree):
if i + j <= self.degree:
designmixed.append((x ** i) * (y ** j))
designmixed = np.array(designmixed).T
if designmixed.any():
v = np.hstack([designx, designy, designmixed])
else:
v = np.hstack([designx, designy])
return v
def invlex_coeff(self, coeffs):
invlex_coeffs = []
lencoeff = range(self.degree + 1)
for i in lencoeff:
for j in lencoeff:
if i + j <= self.degree:
name = 'c{0}_{1}'.format(j, i)
coeff = coeffs[self.param_names.index(name)]
invlex_coeffs.append(coeff)
return invlex_coeffs[::-1]
def multivariate_horner(self, x, y, coeffs):
"""
Multivariate Horner's scheme
Parameters
----------
x, y : array
coeffs : array of coefficients in inverse lexical order
"""
alpha = self._invlex()
r0 = coeffs[0]
r1 = r0 * 0.0
r2 = r0 * 0.0
karr = np.diff(alpha, axis=0)
for n in range(len(karr)):
if karr[n, 1] != 0:
r2 = y * (r0 + r1 + r2)
r1 = np.zeros_like(coeffs[0], subok=False)
else:
r1 = x * (r0 + r1)
r0 = coeffs[n + 1]
return r0 + r1 + r2
@property
def input_units(self):
if self.degree == 0 or (self.c1_0.unit is None and self.c0_1.unit is None):
return None
else:
return {'x': self.c0_0.unit / self.c1_0.unit,
'y': self.c0_0.unit / self.c0_1.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
mapping = []
for i in range(self.degree + 1):
for j in range(self.degree + 1):
if i + j > 2:
continue
par = getattr(self, 'c{0}_{1}'.format(i, j))
mapping.append((par.name, outputs_unit['z'] / inputs_unit['x'] ** i / inputs_unit['y'] ** j))
return OrderedDict(mapping)
class Chebyshev2D(OrthoPolynomialBase):
r"""
Bivariate Chebyshev series..
It is defined as
.. math:: P_{nm}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} T_n(x ) T_m(y)
where ``T_n(x)`` and ``T_m(y)`` are Chebyshev polynomials of the first kind.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x and/or y - since
the coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x and/or y since the units would not be compatible. For
example, the third Chebyshev polynomial (T2) is 2x^2-1, but if x was
specified with units, 2x^2 and -1 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Chebyshev functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = 2 * x * kfunc[n - 1] - kfunc[n - 2]
for n in range(x_terms + 2, x_terms + y_terms):
kfunc[n] = 2 * y * kfunc[n - 1] - kfunc[n - 2]
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Chebyshev polynomials:
.. math::
T_{x_0}T_{y_0}, T_{x_1}T_{y_0}...T_{x_n}T_{y_0}...T_{x_n}T_{y_m}
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._chebderiv1d(x, self.x_degree + 1).T
y_deriv = self._chebderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _chebderiv1d(self, x, deg):
"""
Derivative of 1D Chebyshev series
"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1, len(x)), dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
x2 = 2 * x
d[1] = x
for i in range(2, deg + 1):
d[i] = d[i - 1] * x2 - d[i - 2]
return np.rollaxis(d, 0, d.ndim)
class Legendre2D(OrthoPolynomialBase):
r"""
Bivariate Legendre series.
Defined as:
.. math:: P_{n_m}(x,y) = \sum_{n,m=0}^{n=d,m=d}C_{nm} L_n(x ) L_m(y)
where ``L_n(x)`` and ``L_m(y)`` are Legendre polynomials.
Parameters
----------
x_degree : int
degree in x
y_degree : int
degree in y
x_domain : list or None, optional
domain of the x independent variable
y_domain : list or None, optional
domain of the y independent variable
x_window : list or None, optional
range of the x independent variable
y_window : list or None, optional
range of the y independent variable
**params : dict
keyword: value pairs, representing parameter_name: value
Notes
-----
Model formula:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * L_{i}(x)
where ``L_{i}`` is the corresponding Legendre polynomial.
This model does not support the use of units/quantities, because each term
in the sum of Legendre polynomials is a polynomial in x - since the
coefficients within each Legendre polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Legendre polynomial (P2) is 1.5x^2-0.5, but if x was specified with
units, 1.5x^2 and -0.5 would have incompatible units.
"""
_separable = False
def __init__(self, x_degree, y_degree, x_domain=None, x_window=[-1, 1],
y_domain=None, y_window=[-1, 1], n_models=None,
model_set_axis=None, name=None, meta=None, **params):
super().__init__(
x_degree, y_degree, x_domain=x_domain, y_domain=y_domain,
x_window=x_window, y_window=y_window, n_models=n_models,
model_set_axis=model_set_axis, name=name, meta=meta, **params)
def _fcache(self, x, y):
"""
Calculate the individual Legendre functions once and store them in a
dictionary to be reused.
"""
x_terms = self.x_degree + 1
y_terms = self.y_degree + 1
kfunc = {}
kfunc[0] = np.ones(x.shape)
kfunc[1] = x.copy()
kfunc[x_terms] = np.ones(y.shape)
kfunc[x_terms + 1] = y.copy()
for n in range(2, x_terms):
kfunc[n] = (((2 * (n - 1) + 1) * x * kfunc[n - 1] -
(n - 1) * kfunc[n - 2]) / n)
for n in range(2, y_terms):
kfunc[n + x_terms] = ((2 * (n - 1) + 1) * y * kfunc[n + x_terms - 1] -
(n - 1) * kfunc[n + x_terms - 2]) / (n)
return kfunc
def fit_deriv(self, x, y, *params):
"""
Derivatives with respect to the coefficients.
This is an array with Legendre polynomials:
Lx0Ly0 Lx1Ly0...LxnLy0...LxnLym
Parameters
----------
x : ndarray
input
y : ndarray
input
params : throw away parameter
parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
if x.shape != y.shape:
raise ValueError("x and y must have the same shape")
x = x.flatten()
y = y.flatten()
x_deriv = self._legendderiv1d(x, self.x_degree + 1).T
y_deriv = self._legendderiv1d(y, self.y_degree + 1).T
ij = []
for i in range(self.y_degree + 1):
for j in range(self.x_degree + 1):
ij.append(x_deriv[j] * y_deriv[i])
v = np.array(ij)
return v.T
def _legendderiv1d(self, x, deg):
"""Derivative of 1D Legendre polynomial"""
x = np.array(x, dtype=float, copy=False, ndmin=1)
d = np.empty((deg + 1,) + x.shape, dtype=x.dtype)
d[0] = x * 0 + 1
if deg > 0:
d[1] = x
for i in range(2, deg + 1):
d[i] = (d[i - 1] * x * (2 * i - 1) - d[i - 2] * (i - 1)) / i
return np.rollaxis(d, 0, d.ndim)
class _SIP1D(PolynomialBase):
"""
This implements the Simple Imaging Polynomial Model (SIP) in 1D.
It's unlikely it will be used in 1D so this class is private
and SIP should be used instead.
"""
inputs = ('u', 'v')
outputs = ('w',)
_separable = False
def __init__(self, order, coeff_prefix, n_models=None,
model_set_axis=None, name=None, meta=None, **params):
self.order = order
self.coeff_prefix = coeff_prefix
self._param_names = self._generate_coeff_names(coeff_prefix)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta, **params)
def __repr__(self):
return self._format_repr(args=[self.order, self.coeff_prefix])
def __str__(self):
return self._format_str(
[('Order', self.order),
('Coeff. Prefix', self.coeff_prefix)])
def evaluate(self, x, y, *coeffs):
# TODO: Rewrite this so that it uses a simpler method of determining
# the matrix based on the number of given coefficients.
mcoef = self._coeff_matrix(self.coeff_prefix, coeffs)
return self._eval_sip(x, y, mcoef)
def get_num_coeff(self, ndim):
"""
Return the number of coefficients in one param set
"""
if self.order < 2 or self.order > 9:
raise ValueError("Degree of polynomial must be 2< deg < 9")
nmixed = comb(self.order, ndim)
# remove 3 terms because SIP deg >= 2
numc = self.order * ndim + nmixed - 2
return numc
def _generate_coeff_names(self, coeff_prefix):
names = []
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, 0))
for i in range(2, self.order + 1):
names.append('{0}_{1}_{2}'.format(coeff_prefix, 0, i))
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
names.append('{0}_{1}_{2}'.format(coeff_prefix, i, j))
return names
def _coeff_matrix(self, coeff_prefix, coeffs):
mat = np.zeros((self.order + 1, self.order + 1))
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, 0)
mat[i, 0] = coeffs[self.param_names.index(attr)]
for i in range(2, self.order + 1):
attr = '{0}_{1}_{2}'.format(coeff_prefix, 0, i)
mat[0, i] = coeffs[self.param_names.index(attr)]
for i in range(1, self.order):
for j in range(1, self.order):
if i + j < self.order + 1:
attr = '{0}_{1}_{2}'.format(coeff_prefix, i, j)
mat[i, j] = coeffs[self.param_names.index(attr)]
return mat
def _eval_sip(self, x, y, coef):
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
if self.coeff_prefix == 'A':
result = np.zeros(x.shape)
else:
result = np.zeros(y.shape)
for i in range(coef.shape[0]):
for j in range(coef.shape[1]):
if 1 < i + j < self.order + 1:
result = result + coef[i, j] * x ** i * y ** j
return result
class SIP(Model):
"""
Simple Imaging Polynomial (SIP) model.
The SIP convention is used to represent distortions in FITS image headers.
See [1]_ for a description of the SIP convention.
Parameters
----------
crpix : list or ndarray of length(2)
CRPIX values
a_order : int
SIP polynomial order for first axis
b_order : int
SIP order for second axis
a_coeff : dict
SIP coefficients for first axis
b_coeff : dict
SIP coefficients for the second axis
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
References
----------
.. [1] `David Shupe, et al, ADASS, ASP Conference Series, Vol. 347, 2005 <http://adsabs.harvard.edu/abs/2005ASPC..347..491S>`_
"""
inputs = ('u', 'v')
outputs = ('x', 'y')
_separable = False
def __init__(self, crpix, a_order, b_order, a_coeff={}, b_coeff={},
ap_order=None, bp_order=None, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._crpix = crpix
self._a_order = a_order
self._b_order = b_order
self._a_coeff = a_coeff
self._b_coeff = b_coeff
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
self.shift_a = Shift(-crpix[0])
self.shift_b = Shift(-crpix[1])
self.sip1d_a = _SIP1D(a_order, coeff_prefix='A', n_models=n_models,
model_set_axis=model_set_axis, **a_coeff)
self.sip1d_b = _SIP1D(b_order, coeff_prefix='B', n_models=n_models,
model_set_axis=model_set_axis, **b_coeff)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.shift_a, self.shift_b, self.sip1d_a, self.sip1d_b]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
@property
def inverse(self):
if (self._ap_order is not None and self._bp_order is not None):
return InverseSIP(self._ap_order, self._bp_order,
self._ap_coeff, self._bp_coeff)
else:
raise NotImplementedError("SIP inverse coefficients are not available.")
def evaluate(self, x, y):
u = self.shift_a.evaluate(x, *self.shift_a.param_sets)
v = self.shift_b.evaluate(y, *self.shift_b.param_sets)
f = self.sip1d_a.evaluate(u, v, *self.sip1d_a.param_sets)
g = self.sip1d_b.evaluate(u, v, *self.sip1d_b.param_sets)
return f, g
class InverseSIP(Model):
"""
Inverse Simple Imaging Polynomial
Parameters
----------
ap_order : int
order for the inverse transformation (AP coefficients)
bp_order : int
order for the inverse transformation (BP coefficients)
ap_coeff : dict
coefficients for the inverse transform
bp_coeff : dict
coefficients for the inverse transform
"""
inputs = ('x', 'y')
outputs = ('u', 'v')
_separable = False
def __init__(self, ap_order, bp_order, ap_coeff={}, bp_coeff={},
n_models=None, model_set_axis=None, name=None, meta=None):
self._ap_order = ap_order
self._bp_order = bp_order
self._ap_coeff = ap_coeff
self._bp_coeff = bp_coeff
# define the 0th term in order to use Polynomial2D
ap_coeff.setdefault('AP_0_0', 0)
bp_coeff.setdefault('BP_0_0', 0)
ap_coeff_params = dict((k.replace('AP_', 'c'), v)
for k, v in ap_coeff.items())
bp_coeff_params = dict((k.replace('BP_', 'c'), v)
for k, v in bp_coeff.items())
self.sip1d_ap = Polynomial2D(degree=ap_order,
model_set_axis=model_set_axis,
**ap_coeff_params)
self.sip1d_bp = Polynomial2D(degree=bp_order,
model_set_axis=model_set_axis,
**bp_coeff_params)
super().__init__(n_models=n_models, model_set_axis=model_set_axis,
name=name, meta=meta)
def __repr__(self):
return '<{0}({1!r})>'.format(self.__class__.__name__,
[self.sip1d_ap, self.sip1d_bp])
def __str__(self):
parts = ['Model: {0}'.format(self.__class__.__name__)]
for model in [self.sip1d_ap, self.sip1d_bp]:
parts.append(indent(str(model), width=4))
parts.append('')
return '\n'.join(parts)
def evaluate(self, x, y):
x1 = self.sip1d_ap.evaluate(x, y, *self.sip1d_ap.param_sets)
y1 = self.sip1d_bp.evaluate(x, y, *self.sip1d_bp.param_sets)
return x1, y1
|
00b7e4aa1e91a0180bf2b2538d2c2ac6c46e3bdbdfefaddef203c2bfc93f8bd3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tabular models.
Tabular models of any dimension can be created using `tabular_model`.
For convenience `Tabular1D` and `Tabular2D` are provided.
Examples
--------
>>> table = np.array([[ 3., 0., 0.],
... [ 0., 2., 0.],
... [ 0., 0., 0.]])
>>> points = ([1, 2, 3], [1, 2, 3])
>>> t2 = Tabular2D(points, lookup_table=table, bounds_error=False,
... fill_value=None, method='nearest')
"""
import abc
import numpy as np
from .core import Model
from .. import units as u
from ..utils import minversion
try:
import scipy
from scipy.interpolate import interpn
has_scipy = True
except ImportError:
has_scipy = False
has_scipy = has_scipy and minversion(scipy, "0.14")
__all__ = ['tabular_model', 'Tabular1D', 'Tabular2D']
__doctest_requires__ = {('tabular_model'): ['scipy']}
class _Tabular(Model):
"""
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, ..., mn, ...)
The data on a regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float or `~astropy.units.Quantity`, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d". If Quantity is given, it will be converted to the unit of
``lookup_table``, if applicable.
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
linear = False
fittable = False
standard_broadcasting = False
outputs = ('y',)
@property
@abc.abstractmethod
def lookup_table(self):
pass
_is_dynamic = True
_id = 0
def __init__(self, points=None, lookup_table=None, method='linear',
bounds_error=True, fill_value=np.nan, **kwargs):
n_models = kwargs.get('n_models', 1)
if n_models > 1:
raise NotImplementedError('Only n_models=1 is supported.')
super().__init__(**kwargs)
if lookup_table is None:
raise ValueError('Must provide a lookup table.')
if not isinstance(lookup_table, u.Quantity):
lookup_table = np.asarray(lookup_table)
if self.lookup_table.ndim != lookup_table.ndim:
raise ValueError("lookup_table should be an array with "
"{0} dimensions.".format(self.lookup_table.ndim))
if points is None:
points = tuple(np.arange(x, dtype=float)
for x in lookup_table.shape)
else:
if lookup_table.ndim == 1 and not isinstance(points, tuple):
points = (points,)
npts = len(points)
if npts != lookup_table.ndim:
raise ValueError(
"Expected grid points in "
"{0} directions, got {1}.".format(lookup_table.ndim, npts))
if (npts > 1 and isinstance(points[0], u.Quantity) and
len(set([getattr(p, 'unit', None) for p in points])) > 1):
raise ValueError('points must all have the same unit.')
if isinstance(fill_value, u.Quantity):
if not isinstance(lookup_table, u.Quantity):
raise ValueError('fill value is in {0} but expected to be '
'unitless.'.format(fill_value.unit))
fill_value = fill_value.to(lookup_table.unit).value
self.points = points
self.lookup_table = lookup_table
self.bounds_error = bounds_error
self.method = method
self.fill_value = fill_value
def __repr__(self):
fmt = "<{0}(points={1}, lookup_table={2})>".format(
self.__class__.__name__, self.points, self.lookup_table)
return fmt
def __str__(self):
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Parameters', ""),
(' points', self.points),
(' lookup_table', self.lookup_table),
(' method', self.method),
(' fill_value', self.fill_value),
(' bounds_error', self.bounds_error)
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords
if value is not None]
return '\n'.join(parts)
@property
def input_units(self):
pts = self.points[0]
if not isinstance(pts, u.Quantity):
return None
else:
return dict([(x, pts.unit) for x in self.inputs])
@property
def return_units(self):
if not isinstance(self.lookup_table, u.Quantity):
return None
else:
return {'y': self.lookup_table.unit}
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits,
``(points_low, points_high)``.
Examples
--------
>>> from astropy.modeling.models import Tabular1D, Tabular2D
>>> t1 = Tabular1D(points=[1, 2, 3], lookup_table=[10, 20, 30])
>>> t1.bounding_box
(1, 3)
>>> t2 = Tabular2D(points=[[1, 2, 3], [2, 3, 4]],
... lookup_table=[[10, 20, 30], [20, 30, 40]])
>>> t2.bounding_box
((2, 4), (1, 3))
"""
bbox = [(min(p), max(p)) for p in self.points][::-1]
if len(bbox) == 1:
bbox = bbox[0]
return tuple(bbox)
def evaluate(self, *inputs):
"""
Return the interpolated values at the input coordinates.
Parameters
----------
inputs : list of scalars or ndarrays
Input coordinates. The number of inputs must be equal
to the dimensions of the lookup table.
"""
if isinstance(inputs, u.Quantity):
inputs = inputs.value
inputs = [inp.flatten() for inp in inputs[: self.n_inputs]]
inputs = np.array(inputs).T
if not has_scipy: # pragma: no cover
raise ImportError("This model requires scipy >= v0.14")
result = interpn(self.points, self.lookup_table, inputs,
method=self.method, bounds_error=self.bounds_error,
fill_value=self.fill_value)
# return_units not respected when points has no units
if (isinstance(self.lookup_table, u.Quantity) and
not isinstance(self.points[0], u.Quantity)):
result = result * self.lookup_table.unit
return result
def tabular_model(dim, name=None):
"""
Make a ``Tabular`` model where ``n_inputs`` is
based on the dimension of the lookup_table.
This model has to be further initialized and when evaluated
returns the interpolated values.
Parameters
----------
dim : int
Dimensions of the lookup table.
name : str
Name for the class.
Examples
--------
>>> table = np.array([[3., 0., 0.],
... [0., 2., 0.],
... [0., 0., 0.]])
>>> tab = tabular_model(2, name='Tabular2D')
>>> print(tab)
<class 'abc.Tabular2D'>
Name: Tabular2D
Inputs: (u'x0', u'x1')
Outputs: (u'y',)
>>> points = ([1, 2, 3], [1, 2, 3])
Setting fill_value to None, allows extrapolation.
>>> m = tab(points, lookup_table=table, name='my_table',
... bounds_error=False, fill_value=None, method='nearest')
>>> xinterp = [0, 1, 1.5, 2.72, 3.14]
>>> m(xinterp, xinterp) # doctest: +FLOAT_CMP
array([3., 3., 3., 0., 0.])
"""
if dim < 1:
raise ValueError('Lookup table must have at least one dimension.')
table = np.zeros([2] * dim)
inputs = tuple('x{0}'.format(idx) for idx in range(table.ndim))
members = {'lookup_table': table, 'inputs': inputs}
if dim == 1:
members['_separable'] = True
else:
members['_separable'] = False
if name is None:
model_id = _Tabular._id
_Tabular._id += 1
name = 'Tabular{0}'.format(model_id)
return type(str(name), (_Tabular,), members)
Tabular1D = tabular_model(1, name='Tabular1D')
Tabular2D = tabular_model(2, name='Tabular2D')
_tab_docs = """
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then ``fill_value`` is used.
fill_value : float, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
value : ndarray
Interpolated values at input coordinates.
Raises
------
ImportError
Scipy is not installed.
Notes
-----
Uses `scipy.interpolate.interpn`.
"""
Tabular1D.__doc__ = """
Tabular model in 1D.
Returns an interpolated lookup table value.
Parameters
----------
points : array-like of float of ndim=1.
The points defining the regular grid in n dimensions.
lookup_table : array-like, of ndim=1.
The data in one dimensions.
""" + _tab_docs
Tabular2D.__doc__ = """
Tabular model in 2D.
Returns an interpolated lookup table value.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, m2), optional
The points defining the regular grid in n dimensions.
lookup_table : array-like, shape (m1, m2)
The data on a regular grid in 2 dimensions.
""" + _tab_docs
|
715e1d31ff069e56d5200f432e64600c6e66698ca84b44a91a2272e58fb36196 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Statistic functions used in `~astropy.modeling.fitting`.
"""
import numpy as np
__all__ = ['leastsquare']
def leastsquare(measured_vals, updated_model, weights, x, y=None):
"""
Least square statistic with optional weights.
Parameters
----------
measured_vals : `~numpy.ndarray`
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : `~numpy.ndarray`
Array of weights to apply to each residual.
x : `~numpy.ndarray`
Independent variable "x" to evaluate the model on.
y : `~numpy.ndarray`, optional
Independent variable "y" to evaluate the model on, for 2D models.
Returns
-------
res : float
The sum of least squares.
"""
if y is None:
model_vals = updated_model(x)
else:
model_vals = updated_model(x, y)
if weights is None:
return np.sum((model_vals - measured_vals) ** 2)
else:
return np.sum((weights * (model_vals - measured_vals)) ** 2)
|
fbc852b8f0c7c95d6d62c34727f4a05ab6d8868aad5019a3b17871397592a9d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants
"""
from collections import OrderedDict
import numpy as np
from .core import Fittable1DModel
from .parameters import Parameter, InputParameterError
from ..units import Quantity
__all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D',
'ExponentialCutoffPowerLaw1D', 'LogParabola1D']
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters"""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=1)
alpha_2 = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_break', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(default=1, min=0)
x_break = Parameter(default=1)
alpha_1 = Parameter(default=-2)
alpha_2 = Parameter(default=2)
delta = Parameter(default=1, min=1.e-3)
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError(
"amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError(
"delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function"""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = logt < -threshold
if (i.max()):
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
i = np.abs(logt) <= threshold
if (i.max()):
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
else:
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if (i.max()):
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if (i.max()):
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = f[i] * (alpha_1 - alpha_2) \
* (np.log(r) - t / (1. + t) / delta * np.log(xx[i]))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.unit is None:
return None
else:
return {'x': self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_break', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
x_cutoff = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law derivative with respect to parameters"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('x_cutoff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}} \\right )}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=1)
alpha = Parameter(default=1)
beta = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function"""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx ** exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters"""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx ** exponent
d_beta = -amplitude * d_amplitude * log_xx ** 2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
|
39e241058122a1605440a3956bbcd3626214ab8b122556d4cc17339b70b984e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import time
import uuid
import warnings
import queue
import xmlrpc.client as xmlrpc
from urllib.parse import urlunparse
from .. import log
from .constants import SAMP_STATUS_OK
from .constants import __profile_version__
from .errors import SAMPWarning, SAMPHubError, SAMPProxyError
from .utils import internet_on, ServerProxyPool, _HubAsClient
from .lockfile_helpers import read_lockfile, create_lock_file
from .standard_profile import ThreadingXMLRPCServer
from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog
__all__ = ['SAMPHubServer', 'WebProfileDialog']
__doctest_skip__ = ['.', 'SAMPHubServer.*']
class SAMPHubServer:
"""
SAMP Hub Server.
Parameters
----------
secret : str, optional
The secret code to use for the SAMP lockfile. If none is is specified,
the :func:`uuid.uuid1` function is used to generate one.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
lockfile : str, optional
Custom lockfile name.
timeout : int, optional
Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically
stops after an inactivity period longer than ``timeout`` seconds. By
default ``timeout`` is set to 0 (Hub never expires).
client_timeout : int, optional
Client inactivity timeout. If ``client_timeout > 0`` then the Hub
automatically unregisters the clients which result inactive for a
period longer than ``client_timeout`` seconds. By default
``client_timeout`` is set to 0 (clients never expire).
mode : str, optional
Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub
runs using the standard ``.samp`` lock-file, having a single instance
for user desktop session. Otherwise, if ``mode`` is ``'multiple'``,
then the Hub runs using a non-standard lock-file, placed in
``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where
``<UUID>`` is a unique UUID assigned to the hub.
label : str, optional
A string used to label the Hub with a human readable name. This string
is written in the lock-file assigned to the ``hub.label`` token.
web_profile : bool, optional
Enables or disables the Web Profile support.
web_profile_dialog : class, optional
Allows a class instance to be specified using ``web_profile_dialog``
to replace the terminal-based message with e.g. a GUI pop-up. Two
`queue.Queue` instances will be added to the instance as attributes
``queue_request`` and ``queue_result``. When a request is received via
the ``queue_request`` queue, the pop-up should be displayed, and a
value of `True` or `False` should be added to ``queue_result``
depending on whether the user accepted or refused the connection.
web_port : int, optional
The port to use for web SAMP. This should not be changed except for
testing purposes, since web SAMP should always use port 21012.
pool_size : int, optional
The number of socket connections opened to communicate with the
clients.
"""
def __init__(self, secret=None, addr=None, port=0, lockfile=None,
timeout=0, client_timeout=0, mode='single', label="",
web_profile=True, web_profile_dialog=None, web_port=21012,
pool_size=20):
# Generate random ID for the hub
self._id = str(uuid.uuid1())
# General settings
self._is_running = False
self._customlockfilename = lockfile
self._lockfile = None
self._addr = addr
self._port = port
self._mode = mode
self._label = label
self._timeout = timeout
self._client_timeout = client_timeout
self._pool_size = pool_size
# Web profile specific attributes
self._web_profile = web_profile
self._web_profile_dialog = web_profile_dialog
self._web_port = web_port
self._web_profile_server = None
self._web_profile_callbacks = {}
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name,
self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
# Threading stuff
self._thread_lock = threading.Lock()
self._thread_run = None
self._thread_hub_timeout = None
self._thread_client_timeout = None
self._launched_threads = []
# Variables for timeout testing:
self._last_activity_time = None
self._client_activity_time = {}
# Hub message id counter, used to create hub msg ids
self._hub_msg_id_counter = 0
# Hub secret code
self._hub_secret_code_customized = secret
self._hub_secret = self._create_secret_code()
# Hub public id (as SAMP client)
self._hub_public_id = ""
# Client ids
# {private_key: (public_id, timestamp)}
self._private_keys = {}
# Metadata per client
# {private_key: metadata}
self._metadata = {}
# List of subscribed clients per MType
# {mtype: private_key list}
self._mtype2ids = {}
# List of subscribed MTypes per client
# {private_key: mtype list}
self._id2mtypes = {}
# List of XML-RPC addresses per client
# {public_id: (XML-RPC address, ServerProxyPool instance)}
self._xmlrpc_endpoints = {}
# Synchronous message id heap
self._sync_msg_ids_heap = {}
# Public ids counter
self._client_id_counter = -1
@property
def id(self):
"""
The unique hub ID.
"""
return self._id
def _register_standard_api(self, server):
# Standard Profile only operations
server.register_function(self._ping, 'samp.hub.ping')
server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback')
# Standard API operations
server.register_function(self._register, 'samp.hub.register')
server.register_function(self._unregister, 'samp.hub.unregister')
server.register_function(self._declare_metadata, 'samp.hub.declareMetadata')
server.register_function(self._get_metadata, 'samp.hub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients')
server.register_function(self._notify, 'samp.hub.notify')
server.register_function(self._notify_all, 'samp.hub.notifyAll')
server.register_function(self._call, 'samp.hub.call')
server.register_function(self._call_all, 'samp.hub.callAll')
server.register_function(self._call_and_wait, 'samp.hub.callAndWait')
server.register_function(self._reply, 'samp.hub.reply')
def _register_web_profile_api(self, server):
# Web Profile methods like Standard Profile
server.register_function(self._ping, 'samp.webhub.ping')
server.register_function(self._unregister, 'samp.webhub.unregister')
server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata')
server.register_function(self._get_metadata, 'samp.webhub.getMetadata')
server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions')
server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions')
server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients')
server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients')
server.register_function(self._notify, 'samp.webhub.notify')
server.register_function(self._notify_all, 'samp.webhub.notifyAll')
server.register_function(self._call, 'samp.webhub.call')
server.register_function(self._call_all, 'samp.webhub.callAll')
server.register_function(self._call_and_wait, 'samp.webhub.callAndWait')
server.register_function(self._reply, 'samp.webhub.reply')
# Methods particularly for Web Profile
server.register_function(self._web_profile_register, 'samp.webhub.register')
server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks')
server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks')
def _start_standard_server(self):
self._server = ThreadingXMLRPCServer(
(self._addr or self._host_name, self._port or 0),
log, logRequests=False, allow_none=True)
prot = 'http'
self._port = self._server.socket.getsockname()[1]
addr = "{0}:{1}".format(self._addr or self._host_name, self._port)
self._url = urlunparse((prot, addr, '', '', '', ''))
self._server.register_introspection_functions()
self._register_standard_api(self._server)
def _start_web_profile_server(self):
self._web_profile_requests_queue = queue.Queue(1)
self._web_profile_requests_result = queue.Queue(1)
self._web_profile_requests_semaphore = queue.Queue(1)
if self._web_profile_dialog is not None:
# TODO: Some sort of duck-typing on the web_profile_dialog object
self._web_profile_dialog.queue_request = \
self._web_profile_requests_queue
self._web_profile_dialog.queue_result = \
self._web_profile_requests_result
try:
self._web_profile_server = WebProfileXMLRPCServer(
('localhost', self._web_port), log, logRequests=False,
allow_none=True)
self._web_port = self._web_profile_server.socket.getsockname()[1]
self._web_profile_server.register_introspection_functions()
self._register_web_profile_api(self._web_profile_server)
log.info("Hub set to run with Web Profile support enabled.")
except socket.error:
log.warning("Port {0} already in use. Impossible to run the "
"Hub with Web Profile support.".format(self._web_port),
SAMPWarning)
self._web_profile = False
# Cleanup
self._web_profile_requests_queue = None
self._web_profile_requests_result = None
self._web_profile_requests_semaphore = None
def _launch_thread(self, group=None, target=None, name=None, args=None):
# Remove inactive threads
remove = []
for t in self._launched_threads:
if not t.is_alive():
remove.append(t)
for t in remove:
self._launched_threads.remove(t)
# Start new thread
t = threading.Thread(group=group, target=target, name=name, args=args)
t.start()
# Add to list of launched threads
self._launched_threads.append(t)
def _join_launched_threads(self, timeout=None):
for t in self._launched_threads:
t.join(timeout=timeout)
def _timeout_test_hub(self):
if self._timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
with self._thread_lock:
if self._last_activity_time is not None:
if now - self._last_activity_time >= self._timeout:
warnings.warn("Timeout expired, Hub is shutting down!",
SAMPWarning)
self.stop()
return
last = now
def _timeout_test_client(self):
if self._client_timeout == 0:
return
last = time.time()
while self._is_running:
time.sleep(0.05) # keep this small to check _is_running often
now = time.time()
if now - last > 1.:
for private_key in self._client_activity_time.keys():
if (now - self._client_activity_time[private_key] > self._client_timeout
and private_key != self._hub_private_key):
warnings.warn(
"Client {} timeout expired!".format(private_key),
SAMPWarning)
self._notify_disconnection(private_key)
self._unregister(private_key)
last = now
def _hub_as_client_request_handler(self, method, args):
if method == 'samp.client.receiveCall':
return self._receive_call(*args)
elif method == 'samp.client.receiveNotification':
return self._receive_notification(*args)
elif method == 'samp.client.receiveResponse':
return self._receive_response(*args)
elif method == 'samp.app.ping':
return self._ping(*args)
def _setup_hub_as_client(self):
hub_metadata = {"samp.name": "Astropy SAMP Hub",
"samp.description.text": self._label,
"author.name": "The Astropy Collaboration",
"samp.documentation.url": "http://docs.astropy.org/en/stable/samp",
"samp.icon.url": self._url + "/samp/icon"}
result = self._register(self._hub_secret)
self._hub_public_id = result["samp.self-id"]
self._hub_private_key = result["samp.private-key"]
self._set_xmlrpc_callback(self._hub_private_key, self._url)
self._declare_metadata(self._hub_private_key, hub_metadata)
self._declare_subscriptions(self._hub_private_key,
{"samp.app.ping": {},
"x-samp.query.by-meta": {}})
def start(self, wait=False):
"""
Start the current SAMP Hub instance and create the lock file. Hub
start-up can be blocking or non blocking depending on the ``wait``
parameter.
Parameters
----------
wait : bool
If `True` then the Hub process is joined with the caller, blocking
the code flow. Usually `True` option is used to run a stand-alone
Hub in an executable script. If `False` (default), then the Hub
process runs in a separated thread. `False` is usually used in a
Python shell.
"""
if self._is_running:
raise SAMPHubError("Hub is already running")
if self._lockfile is not None:
raise SAMPHubError("Hub is not running but lockfile is set")
if self._web_profile:
self._start_web_profile_server()
self._start_standard_server()
self._lockfile = create_lock_file(lockfilename=self._customlockfilename,
mode=self._mode, hub_id=self.id,
hub_params=self.params)
self._update_last_activity_time()
self._setup_hub_as_client()
self._start_threads()
log.info("Hub started")
if wait and self._is_running:
self._thread_run.join()
self._thread_run = None
@property
def params(self):
"""
The hub parameters (which are written to the logfile)
"""
params = {}
# Keys required by standard profile
params['samp.secret'] = self._hub_secret
params['samp.hub.xmlrpc.url'] = self._url
params['samp.profile.version'] = __profile_version__
# Custom keys
params['hub.id'] = self.id
params['hub.label'] = self._label or "Hub {0}".format(self.id)
return params
def _start_threads(self):
self._thread_run = threading.Thread(target=self._serve_forever)
self._thread_run.daemon = True
if self._timeout > 0:
self._thread_hub_timeout = threading.Thread(
target=self._timeout_test_hub,
name="Hub timeout test")
self._thread_hub_timeout.daemon = True
else:
self._thread_hub_timeout = None
if self._client_timeout > 0:
self._thread_client_timeout = threading.Thread(
target=self._timeout_test_client,
name="Client timeout test")
self._thread_client_timeout.daemon = True
else:
self._thread_client_timeout = None
self._is_running = True
self._thread_run.start()
if self._thread_hub_timeout is not None:
self._thread_hub_timeout.start()
if self._thread_client_timeout is not None:
self._thread_client_timeout.start()
def _create_secret_code(self):
if self._hub_secret_code_customized is not None:
return self._hub_secret_code_customized
else:
return str(uuid.uuid1())
def stop(self):
"""
Stop the current SAMP Hub instance and delete the lock file.
"""
if not self._is_running:
return
log.info("Hub is stopping...")
self._notify_shutdown()
self._is_running = False
if self._lockfile and os.path.isfile(self._lockfile):
lockfiledict = read_lockfile(self._lockfile)
if lockfiledict['samp.secret'] == self._hub_secret:
os.remove(self._lockfile)
self._lockfile = None
# Reset variables
# TODO: What happens if not all threads are stopped after timeout?
self._join_all_threads(timeout=10.)
self._hub_msg_id_counter = 0
self._hub_secret = self._create_secret_code()
self._hub_public_id = ""
self._metadata = {}
self._private_keys = {}
self._mtype2ids = {}
self._id2mtypes = {}
self._xmlrpc_endpoints = {}
self._last_activity_time = None
log.info("Hub stopped.")
def _join_all_threads(self, timeout=None):
# In some cases, ``stop`` may be called from some of the sub-threads,
# so we just need to make sure that we don't try and shut down the
# calling thread.
current_thread = threading.current_thread()
if self._thread_run is not current_thread:
self._thread_run.join(timeout=timeout)
if not self._thread_run.is_alive():
self._thread_run = None
if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread:
self._thread_hub_timeout.join(timeout=timeout)
if not self._thread_hub_timeout.is_alive():
self._thread_hub_timeout = None
if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread:
self._thread_client_timeout.join(timeout=timeout)
if not self._thread_client_timeout.is_alive():
self._thread_client_timeout = None
self._join_launched_threads(timeout=timeout)
@property
def is_running(self):
"""Return an information concerning the Hub running status.
Returns
-------
running : bool
Is the hub running?
"""
return self._is_running
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self._server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._server.handle_request()
if self._web_profile:
# We now check if there are any connection requests from the
# web profile, and if so, we initialize the pop-up.
if self._web_profile_dialog is None:
try:
request = self._web_profile_requests_queue.get_nowait()
except queue.Empty:
pass
else:
web_profile_text_dialog(request, self._web_profile_requests_result)
# We now check for requests over the web profile socket, and we
# also update the pop-up in case there are any changes.
try:
read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0]
except OSError as exc:
warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self._web_profile_server.handle_request()
self._server.server_close()
if self._web_profile_server is not None:
self._web_profile_server.server_close()
def _notify_shutdown(self):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown")
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
self._notify_(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.shutdown",
"samp.params": {}})
def _notify_register(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.register",
"samp.params": {"id": public_id}})
def _notify_unregister(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.unregister",
"samp.params": {"id": public_id}})
def _notify_metadata(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
# if key != private_key:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.metadata",
"samp.params": {"id": public_id,
"metadata": self._metadata[private_key]}
})
def _notify_subscriptions(self, private_key):
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions")
for mtype in msubs:
if mtype in self._mtype2ids:
public_id = self._private_keys[private_key][0]
for key in self._mtype2ids[mtype]:
self._notify(self._hub_private_key,
self._private_keys[key][0],
{"samp.mtype": "samp.hub.event.subscriptions",
"samp.params": {"id": public_id,
"subscriptions": self._id2mtypes[private_key]}
})
def _notify_disconnection(self, private_key):
def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message):
endpoint.samp.client.receiveNotification(private_key, hub_public_id, message)
msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect")
public_id = self._private_keys[private_key][0]
endpoint = self._xmlrpc_endpoints[public_id][1]
for mtype in msubs:
if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]:
log.debug("notify disconnection to {}".format(public_id))
self._launch_thread(target=_xmlrpc_call_disconnect,
args=(endpoint, private_key,
self._hub_public_id,
{"samp.mtype": "samp.hub.disconnect",
"samp.params": {"reason": "Timeout expired!"}}))
def _ping(self):
self._update_last_activity_time()
log.debug("ping")
return "1"
def _query_by_metadata(self, key, value):
public_id_list = []
for private_id in self._metadata:
if key in self._metadata[private_id]:
if self._metadata[private_id][key] == value:
public_id_list.append(self._private_keys[private_id][0])
return public_id_list
def _set_xmlrpc_callback(self, private_key, xmlrpc_addr):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if private_key == self._hub_private_key:
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = \
(xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler))
return ""
# Dictionary stored with the public id
log.debug("set_xmlrpc_callback: {} {}".format(private_key,
xmlrpc_addr))
server_proxy_pool = None
server_proxy_pool = ServerProxyPool(self._pool_size,
xmlrpc.ServerProxy,
xmlrpc_addr, allow_none=1)
public_id = self._private_keys[private_key][0]
self._xmlrpc_endpoints[public_id] = (xmlrpc_addr,
server_proxy_pool)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _perform_standard_register(self):
with self._thread_lock:
private_key, public_id = self._get_new_ids()
self._private_keys[private_key] = (public_id, time.time())
self._update_last_activity_time(private_key)
self._notify_register(private_key)
log.debug("register: private-key = {} and self-id = {}"
.format(private_key, public_id))
return {"samp.self-id": public_id,
"samp.private-key": private_key,
"samp.hub-id": self._hub_public_id}
def _register(self, secret):
self._update_last_activity_time()
if secret == self._hub_secret:
return self._perform_standard_register()
else:
# return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""}
raise SAMPProxyError(7, "Bad secret code")
def _get_new_ids(self):
private_key = str(uuid.uuid1())
self._client_id_counter += 1
public_id = 'cli#hub'
if self._client_id_counter > 0:
public_id = "cli#{}".format(self._client_id_counter)
return private_key, public_id
def _unregister(self, private_key):
self._update_last_activity_time()
public_key = ""
self._notify_unregister(private_key)
with self._thread_lock:
if private_key in self._private_keys:
public_key = self._private_keys[private_key][0]
del self._private_keys[private_key]
else:
return ""
if private_key in self._metadata:
del self._metadata[private_key]
if private_key in self._id2mtypes:
del self._id2mtypes[private_key]
for mtype in self._mtype2ids.keys():
if private_key in self._mtype2ids[mtype]:
self._mtype2ids[mtype].remove(private_key)
if public_key in self._xmlrpc_endpoints:
del self._xmlrpc_endpoints[public_key]
if private_key in self._client_activity_time:
del self._client_activity_time[private_key]
if self._web_profile:
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
self._web_profile_server.remove_client(private_key)
log.debug("unregister {} ({})".format(public_key, private_key))
return ""
def _declare_metadata(self, private_key, metadata):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_metadata: private-key = {} metadata = {}"
.format(private_key, str(metadata)))
self._metadata[private_key] = metadata
self._notify_metadata(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_metadata(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
log.debug("get_metadata: private-key = {} client-id = {}"
.format(private_key, client_id))
if client_private_key is not None:
if client_private_key in self._metadata:
log.debug("--> metadata = {}"
.format(self._metadata[client_private_key]))
return self._metadata[client_private_key]
else:
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _declare_subscriptions(self, private_key, mtypes):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
log.debug("declare_subscriptions: private-key = {} mtypes = {}"
.format(private_key, str(mtypes)))
# remove subscription to previous mtypes
if private_key in self._id2mtypes:
prev_mtypes = self._id2mtypes[private_key]
for mtype in prev_mtypes:
try:
self._mtype2ids[mtype].remove(private_key)
except ValueError: # private_key is not in list
pass
self._id2mtypes[private_key] = copy.deepcopy(mtypes)
# remove duplicated MType for wildcard overwriting
original_mtypes = copy.deepcopy(mtypes)
for mtype in original_mtypes:
if mtype.endswith("*"):
for mtype2 in original_mtypes:
if mtype2.startswith(mtype[:-1]) and \
mtype2 != mtype:
if mtype2 in mtypes:
del(mtypes[mtype2])
log.debug("declare_subscriptions: subscriptions accepted from "
"{} => {}".format(private_key, str(mtypes)))
for mtype in mtypes:
if mtype in self._mtype2ids:
if private_key not in self._mtype2ids[mtype]:
self._mtype2ids[mtype].append(private_key)
else:
self._mtype2ids[mtype] = [private_key]
self._notify_subscriptions(private_key)
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _get_subscriptions(self, private_key, client_id):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
client_private_key = self._public_id_to_private_key(client_id)
if client_private_key is not None:
if client_private_key in self._id2mtypes:
log.debug("get_subscriptions: client-id = {} mtypes = {}"
.format(client_id,
str(self._id2mtypes[client_private_key])))
return self._id2mtypes[client_private_key]
else:
log.debug("get_subscriptions: client-id = {} mtypes = "
"missing".format(client_id))
return {}
else:
raise SAMPProxyError(6, "Invalid client ID")
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_registered_clients(self, private_key):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
reg_clients = []
for pkey in self._private_keys.keys():
if pkey != private_key:
reg_clients.append(self._private_keys[pkey][0])
log.debug("get_registered_clients: private_key = {} clients = {}"
.format(private_key, reg_clients))
return reg_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _get_subscribed_clients(self, private_key, mtype):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
sub_clients = {}
for pkey in self._private_keys.keys():
if pkey != private_key and self._is_subscribed(pkey, mtype):
sub_clients[self._private_keys[pkey][0]] = {}
log.debug("get_subscribed_clients: private_key = {} mtype = {} "
"clients = {}".format(private_key, mtype, sub_clients))
return sub_clients
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
@staticmethod
def get_mtype_subtypes(mtype):
"""
Return a list containing all the possible wildcarded subtypes of MType.
Parameters
----------
mtype : str
MType to be parsed.
Returns
-------
types : list
List of subtypes
Examples
--------
>>> from astropy.samp import SAMPHubServer
>>> SAMPHubServer.get_mtype_subtypes("samp.app.ping")
['samp.app.ping', 'samp.app.*', 'samp.*', '*']
"""
subtypes = []
msubs = mtype.split(".")
indexes = list(range(len(msubs)))
indexes.reverse()
indexes.append(-1)
for i in indexes:
tmp_mtype = ".".join(msubs[:i + 1])
if tmp_mtype != mtype:
if tmp_mtype != "":
tmp_mtype = tmp_mtype + ".*"
else:
tmp_mtype = "*"
subtypes.append(tmp_mtype)
return subtypes
def _is_subscribed(self, private_key, mtype):
subscribed = False
msubs = SAMPHubServer.get_mtype_subtypes(mtype)
for msub in msubs:
if msub in self._mtype2ids:
if private_key in self._mtype2ids[msub]:
subscribed = True
return subscribed
def _notify(self, private_key, recipient_id, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
self._launch_thread(target=self._notify_, args=(private_key,
recipient_id,
message))
return {}
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_(self, sender_private_key, recipient_public_id, message):
if sender_private_key not in self._private_keys:
return
sender_public_id = self._private_keys[sender_private_key][0]
try:
log.debug("notify {} from {} to {}".format(
message["samp.mtype"], sender_public_id,
recipient_public_id))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, message)
samp_method_name = "receiveNotification"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} notification from client {} to client {} "
"failed [{}]".format(message["samp.mtype"],
sender_public_id,
recipient_public_id, exc),
SAMPWarning)
def _notify_all(self, private_key, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing")
recipient_ids = self._notify_all_(private_key, message)
return recipient_ids
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _notify_all_(self, sender_private_key, message):
recipient_ids = []
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_recipient_id = self._private_keys[key][0]
recipient_ids.append(_recipient_id)
self._launch_thread(target=self._notify,
args=(sender_private_key,
_recipient_id, message)
)
return recipient_ids
def _call(self, private_key, recipient_id, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if self._is_subscribed(self._public_id_to_private_key(recipient_id),
message["samp.mtype"]) is False:
raise SAMPProxyError(2, "Client {} not subscribed to MType {}"
.format(recipient_id, message["samp.mtype"]))
public_id = self._private_keys[private_key][0]
msg_id = self._get_new_hub_msg_id(public_id, msg_tag)
self._launch_thread(target=self._call_, args=(private_key, public_id,
recipient_id, msg_id,
message))
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_(self, sender_private_key, sender_public_id,
recipient_public_id, msg_id, message):
if sender_private_key not in self._private_keys:
return
try:
log.debug("call {} from {} to {} ({})".format(
msg_id.split(";;")[0], sender_public_id,
recipient_public_id, message["samp.mtype"]))
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (sender_public_id, msg_id, message)
samp_methodName = "receiveCall"
self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params)
except Exception as exc:
warnings.warn("{} call {} from client {} to client {} failed "
"[{},{}]".format(message["samp.mtype"],
msg_id.split(";;")[0],
sender_public_id,
recipient_public_id, type(exc), exc),
SAMPWarning)
def _call_all(self, private_key, msg_tag, message):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
if "samp.mtype" not in message:
raise SAMPProxyError(3, "samp.mtype keyword is missing in "
"message tagged as {}".format(msg_tag))
public_id = self._private_keys[private_key][0]
msg_id = self._call_all_(private_key, public_id, msg_tag, message)
return msg_id
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _call_all_(self, sender_private_key, sender_public_id, msg_tag,
message):
msg_id = {}
msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"])
for mtype in msubs:
if mtype in self._mtype2ids:
for key in self._mtype2ids[mtype]:
if key != sender_private_key:
_msg_id = self._get_new_hub_msg_id(sender_public_id,
msg_tag)
receiver_public_id = self._private_keys[key][0]
msg_id[receiver_public_id] = _msg_id
self._launch_thread(target=self._call_,
args=(sender_private_key,
sender_public_id,
receiver_public_id, _msg_id,
message))
return msg_id
def _call_and_wait(self, private_key, recipient_id, message, timeout):
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
timeout = int(timeout)
now = time.time()
response = {}
msg_id = self._call(private_key, recipient_id, "samp::sync::call",
message)
self._sync_msg_ids_heap[msg_id] = None
while self._is_running:
if 0 < timeout <= time.time() - now:
del(self._sync_msg_ids_heap[msg_id])
raise SAMPProxyError(1, "Timeout expired!")
if self._sync_msg_ids_heap[msg_id] is not None:
response = copy.deepcopy(self._sync_msg_ids_heap[msg_id])
del(self._sync_msg_ids_heap[msg_id])
break
time.sleep(0.01)
return response
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
def _reply(self, private_key, msg_id, response):
"""
The main method that gets called for replying. This starts up an
asynchronous reply thread and returns.
"""
self._update_last_activity_time(private_key)
if private_key in self._private_keys:
self._launch_thread(target=self._reply_, args=(private_key, msg_id,
response))
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return {}
def _reply_(self, responder_private_key, msg_id, response):
if responder_private_key not in self._private_keys or not msg_id:
return
responder_public_id = self._private_keys[responder_private_key][0]
counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3)
try:
log.debug("reply {} from {} to {}".format(
counter, responder_public_id, recipient_public_id))
if recipient_msg_tag == "samp::sync::call":
if msg_id in self._sync_msg_ids_heap.keys():
self._sync_msg_ids_heap[msg_id] = response
else:
recipient_private_key = self._public_id_to_private_key(recipient_public_id)
arg_params = (responder_public_id, recipient_msg_tag, response)
samp_method_name = "receiveResponse"
self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params)
except Exception as exc:
warnings.warn("{} reply from client {} to client {} failed [{}]"
.format(recipient_msg_tag, responder_public_id,
recipient_public_id, exc),
SAMPWarning)
def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params):
"""
This method is used to retry a SAMP call several times.
Parameters
----------
recipient_private_key
The private key of the receiver of the call
recipient_public_key
The public key of the receiver of the call
samp_method_name : str
The name of the SAMP method to call
arg_params : tuple
Any additional arguments to be passed to the SAMP method
"""
if recipient_private_key is None:
raise SAMPHubError("Invalid client ID")
from . import conf
for attempt in range(conf.n_retries):
if not self._is_running:
time.sleep(0.01)
continue
try:
if (self._web_profile and
recipient_private_key in self._web_profile_callbacks):
# Web Profile
callback = {"samp.methodName": samp_method_name,
"samp.params": arg_params}
self._web_profile_callbacks[recipient_private_key].put(callback)
else:
# Standard Profile
hub = self._xmlrpc_endpoints[recipient_public_id][1]
getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params)
except xmlrpc.Fault as exc:
log.debug("{} XML-RPC endpoint error (attempt {}): {}"
.format(recipient_public_id, attempt + 1,
exc.faultString))
time.sleep(0.01)
else:
return
# If we are here, then the above attempts failed
error_message = samp_method_name + " failed after " + conf.n_retries + " attempts"
raise SAMPHubError(error_message)
def _public_id_to_private_key(self, public_id):
for private_key in self._private_keys.keys():
if self._private_keys[private_key][0] == public_id:
return private_key
return None
def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id):
with self._thread_lock:
self._hub_msg_id_counter += 1
return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter,
self._hub_public_id,
sender_public_id, sender_msg_id)
def _update_last_activity_time(self, private_key=None):
with self._thread_lock:
self._last_activity_time = time.time()
if private_key is not None:
self._client_activity_time[private_key] = time.time()
def _receive_notification(self, private_key, sender_id, message):
return ""
def _receive_call(self, private_key, sender_id, msg_id, message):
if private_key == self._hub_private_key:
if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping":
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK, "samp.result": {}})
elif ("samp.mtype" in message and
(message["samp.mtype"] == "x-samp.query.by-meta" or
message["samp.mtype"] == "samp.query.by-meta")):
ids_list = self._query_by_metadata(message["samp.params"]["key"],
message["samp.params"]["value"])
self._reply(self._hub_private_key, msg_id,
{"samp.status": SAMP_STATUS_OK,
"samp.result": {"ids": ids_list}})
return ""
else:
return ""
def _receive_response(self, private_key, responder_id, msg_tag, response):
return ""
def _web_profile_register(self, identity_info,
client_address=("unknown", 0),
origin="unknown"):
self._update_last_activity_time()
if not client_address[0] in ["localhost", "127.0.0.1"]:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub.")
if not origin:
origin = "unknown"
if isinstance(identity_info, dict):
# an old version of the protocol provided just a string with the app name
if "samp.name" not in identity_info:
raise SAMPProxyError(403, "Request of registration rejected "
"by the Hub (application name not "
"provided).")
# Red semaphore for the other threads
self._web_profile_requests_semaphore.put("wait")
# Set the request to be displayed for the current thread
self._web_profile_requests_queue.put((identity_info, client_address,
origin))
# Get the popup dialogue response
response = self._web_profile_requests_result.get()
# OK, semaphore green
self._web_profile_requests_semaphore.get()
if response:
register_map = self._perform_standard_register()
translator_url = ("http://localhost:{}/translator/{}?ref="
.format(self._web_port, register_map["samp.private-key"]))
register_map["samp.url-translator"] = translator_url
self._web_profile_server.add_client(register_map["samp.private-key"])
return register_map
else:
raise SAMPProxyError(403, "Request of registration rejected by "
"the user.")
def _web_profile_allowReverseCallbacks(self, private_key, allow):
self._update_last_activity_time()
if private_key in self._private_keys:
if allow == "0":
if private_key in self._web_profile_callbacks:
del self._web_profile_callbacks[private_key]
else:
self._web_profile_callbacks[private_key] = queue.Queue()
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
return ""
def _web_profile_pullCallbacks(self, private_key, timeout_secs):
self._update_last_activity_time()
if private_key in self._private_keys:
callback = []
callback_queue = self._web_profile_callbacks[private_key]
try:
while self._is_running:
item_queued = callback_queue.get_nowait()
callback.append(item_queued)
except queue.Empty:
pass
return callback
else:
raise SAMPProxyError(5, "Private-key {} expired or invalid."
.format(private_key))
class WebProfileDialog:
"""
A base class to make writing Web Profile GUI consent dialogs
easier.
The concrete class must:
1) Poll ``handle_queue`` periodically, using the timer services
of the GUI's event loop. This function will call
``self.show_dialog`` when a request requires authorization.
``self.show_dialog`` will be given the arguments:
- ``samp_name``: The name of the application making the request.
- ``details``: A dictionary of details about the client
making the request.
- ``client``: A hostname, port pair containing the client
address.
- ``origin``: A string containing the origin of the
request.
2) Call ``consent`` or ``reject`` based on the user's response to
the dialog.
"""
def handle_queue(self):
try:
request = self.queue_request.get_nowait()
except queue.Empty: # queue is set but empty
pass
except AttributeError: # queue has not been set yet
pass
else:
if isinstance(request[0], str): # To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
self.show_dialog(samp_name, request[0], request[1], request[2])
def consent(self):
self.queue_result.put(True)
def reject(self):
self.queue_result.put(False)
|
1513a729a2ebb529b7bbd2d6938e632d23554e2d0f788d171fb949010f5c7b41 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TODO: this file should be refactored to use a more thread-safe and
# race-condition-safe lockfile mechanism.
import datetime
import os
import socket
import stat
import warnings
from contextlib import suppress
from urllib.parse import urlparse
import xmlrpc.client as xmlrpc
from ..config.paths import _find_home
from .. import log
from ..utils.data import get_readable_fileobj
from .errors import SAMPHubError, SAMPWarning
def read_lockfile(lockfilename):
"""
Read in the lockfile given by ``lockfilename`` into a dictionary.
"""
# lockfilename may be a local file or a remote URL, but
# get_readable_fileobj takes care of this.
lockfiledict = {}
with get_readable_fileobj(lockfilename) as f:
for line in f:
if not line.startswith("#"):
kw, val = line.split("=")
lockfiledict[kw.strip()] = val.strip()
return lockfiledict
def write_lockfile(lockfilename, lockfiledict):
lockfile = open(lockfilename, "w")
lockfile.close()
os.chmod(lockfilename, stat.S_IREAD + stat.S_IWRITE)
lockfile = open(lockfilename, "w")
now_iso = datetime.datetime.now().isoformat()
lockfile.write("# SAMP lockfile written on {}\n".format(now_iso))
lockfile.write("# Standard Profile required keys\n")
for key, value in lockfiledict.items():
lockfile.write("{0}={1}\n".format(key, value))
lockfile.close()
def create_lock_file(lockfilename=None, mode=None, hub_id=None,
hub_params=None):
# Remove lock-files of dead hubs
remove_garbage_lock_files()
lockfiledir = ""
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
lockfile_parsed = urlparse(lockfilename)
if lockfile_parsed[0] != 'file':
warnings.warn("Unable to start a Hub with lockfile {}. "
"Start-up process aborted.".format(lockfilename),
SAMPWarning)
return False
else:
lockfilename = lockfile_parsed[2]
else:
# If it is a fresh Hub instance
if lockfilename is None:
log.debug("Running mode: " + mode)
if mode == 'single':
lockfilename = os.path.join(_find_home(), ".samp")
else:
lockfiledir = os.path.join(_find_home(), ".samp-1")
# If missing create .samp-1 directory
try:
os.mkdir(lockfiledir)
except OSError:
pass # directory already exists
finally:
os.chmod(lockfiledir,
stat.S_IREAD + stat.S_IWRITE + stat.S_IEXEC)
lockfilename = os.path.join(lockfiledir,
"samp-hub-{}".format(hub_id))
else:
log.debug("Running mode: multiple")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
warnings.warn("Another SAMP Hub is already running. Start-up process "
"aborted.", SAMPWarning)
return False
log.debug("Lock-file: " + lockfilename)
write_lockfile(lockfilename, hub_params)
return lockfilename
def get_main_running_hub():
"""
Get either the hub given by the environment variable SAMP_HUB, or the one
given by the lockfile .samp in the user home directory.
"""
hubs = get_running_hubs()
if not hubs:
raise SAMPHubError("Unable to find a running SAMP Hub.")
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
raise SAMPHubError("SAMP Hub profile not supported.")
else:
lockfilename = os.path.join(_find_home(), ".samp")
return hubs[lockfilename]
def get_running_hubs():
"""
Return a dictionary containing the lock-file contents of all the currently
running hubs (single and/or multiple mode).
The dictionary format is:
``{<lock-file>: {<token-name>: <token-string>, ...}, ...}``
where ``{<lock-file>}`` is the lock-file name, ``{<token-name>}`` and
``{<token-string>}`` are the lock-file tokens (name and content).
Returns
-------
running_hubs : dict
Lock-file contents of all the currently running hubs.
"""
hubs = {}
lockfilename = ""
# HUB SINGLE INSTANCE MODE
# CHECK FOR SAMP_HUB ENVIRONMENT VARIABLE
if "SAMP_HUB" in os.environ:
# For the time being I assume just the std profile supported.
if os.environ["SAMP_HUB"].startswith("std-lockurl:"):
lockfilename = os.environ["SAMP_HUB"][len("std-lockurl:"):]
else:
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
# HUB MULTIPLE INSTANCE MODE
lockfiledir = ""
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if hub_is_running:
hubs[lockfilename] = lockfiledict
return hubs
def check_running_hub(lockfilename):
"""
Test whether a hub identified by ``lockfilename`` is running or not.
Parameters
----------
lockfilename : str
Lock-file name (path + file name) of the Hub to be tested.
Returns
-------
is_running : bool
Whether the hub is running
hub_params : dict
If the hub is running this contains the parameters from the lockfile
"""
is_running = False
lockfiledict = {}
# Check whether a lockfile already exists
try:
lockfiledict = read_lockfile(lockfilename)
except OSError:
return is_running, lockfiledict
if "samp.hub.xmlrpc.url" in lockfiledict:
try:
proxy = xmlrpc.ServerProxy(lockfiledict["samp.hub.xmlrpc.url"]
.replace("\\", ""), allow_none=1)
proxy.samp.hub.ping()
is_running = True
except xmlrpc.ProtocolError:
# There is a protocol error (e.g. for authentication required),
# but the server is alive
is_running = True
except socket.error:
pass
return is_running, lockfiledict
def remove_garbage_lock_files():
lockfilename = ""
# HUB SINGLE INSTANCE MODE
lockfilename = os.path.join(_find_home(), ".samp")
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
# HUB MULTIPLE INSTANCE MODE
lockfiledir = os.path.join(_find_home(), ".samp-1")
if os.path.isdir(lockfiledir):
for filename in os.listdir(lockfiledir):
if filename.startswith('samp-hub'):
lockfilename = os.path.join(lockfiledir, filename)
hub_is_running, lockfiledict = check_running_hub(lockfilename)
if not hub_is_running:
# If lockfilename belongs to a dead hub, then it is deleted
if os.path.isfile(lockfilename):
with suppress(OSError):
os.remove(lockfilename)
|
ca15eb1658e3b872c90d62c8b2568d2374e204a06da87d8ada35f3d60e77b5b8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import time
import sys
import argparse
from .. import log, __version__
from .hub import SAMPHubServer
__all__ = ['main']
def hub_script(timeout=0):
"""
This main function is executed by the ``samp_hub`` command line tool.
"""
parser = argparse.ArgumentParser(prog="samp_hub " + __version__)
parser.add_argument("-k", "--secret", dest="secret", metavar="CODE",
help="custom secret code.")
parser.add_argument("-d", "--addr", dest="addr", metavar="ADDR",
help="listening address (or IP).")
parser.add_argument("-p", "--port", dest="port", metavar="PORT", type=int,
help="listening port number.")
parser.add_argument("-f", "--lockfile", dest="lockfile", metavar="FILE",
help="custom lockfile.")
parser.add_argument("-w", "--no-web-profile", dest="web_profile", action="store_false",
help="run the Hub disabling the Web Profile.", default=True)
parser.add_argument("-P", "--pool-size", dest="pool_size", metavar="SIZE", type=int,
help="the socket connections pool size.", default=20)
timeout_group = parser.add_argument_group("Timeout group",
"Special options to setup hub and client timeouts."
"It contains a set of special options that allows to set up the Hub and "
"clients inactivity timeouts, that is the Hub or client inactivity time "
"interval after which the Hub shuts down or unregisters the client. "
"Notification of samp.hub.disconnect MType is sent to the clients "
"forcibly unregistered for timeout expiration.")
timeout_group.add_argument("-t", "--timeout", dest="timeout", metavar="SECONDS",
help="set the Hub inactivity timeout in SECONDS. By default it "
"is set to 0, that is the Hub never expires.", type=int, default=0)
timeout_group.add_argument("-c", "--client-timeout", dest="client_timeout", metavar="SECONDS",
help="set the client inactivity timeout in SECONDS. By default it "
"is set to 0, that is the client never expires.", type=int, default=0)
parser.add_argument_group(timeout_group)
log_group = parser.add_argument_group("Logging options",
"Additional options which allow to customize the logging output. By "
"default the SAMP Hub uses the standard output and standard error "
"devices to print out INFO level logging messages. Using the options "
"here below it is possible to modify the logging level and also "
"specify the output files where redirect the logging messages.")
log_group.add_argument("-L", "--log-level", dest="loglevel", metavar="LEVEL",
help="set the Hub instance log level (OFF, ERROR, WARNING, INFO, DEBUG).",
type=str, choices=["OFF", "ERROR", "WARNING", "INFO", "DEBUG"], default='INFO')
log_group.add_argument("-O", "--log-output", dest="logout", metavar="FILE",
help="set the output file for the log messages.", default="")
parser.add_argument_group(log_group)
adv_group = parser.add_argument_group("Advanced group",
"Advanced options addressed to facilitate administrative tasks and "
"allow new non-standard Hub behaviors. In particular the --label "
"options is used to assign a value to hub.label token and is used to "
"assign a name to the Hub instance. "
"The very special --multi option allows to start a Hub in multi-instance mode. "
"Multi-instance mode is a non-standard Hub behavior that enables "
"multiple contemporaneous running Hubs. Multi-instance hubs place "
"their non-standard lock-files within the <home directory>/.samp-1 "
"directory naming them making use of the format: "
"samp-hub-<PID>-<ID>, where PID is the Hub process ID while ID is an "
"internal ID (integer).")
adv_group.add_argument("-l", "--label", dest="label", metavar="LABEL",
help="assign a LABEL to the Hub.", default="")
adv_group.add_argument("-m", "--multi", dest="mode",
help="run the Hub in multi-instance mode generating a custom "
"lockfile with a random name.",
action="store_const", const='multiple', default='single')
parser.add_argument_group(adv_group)
options = parser.parse_args()
try:
if options.loglevel in ("OFF", "ERROR", "WARNING", "DEBUG", "INFO"):
log.setLevel(options.loglevel)
if options.logout != "":
context = log.log_to_file(options.logout)
else:
class dummy_context:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
context = dummy_context()
with context:
args = copy.deepcopy(options.__dict__)
del(args["loglevel"])
del(args["logout"])
hub = SAMPHubServer(**args)
hub.start(False)
if not timeout:
while hub.is_running:
time.sleep(0.01)
else:
time.sleep(timeout)
hub.stop()
except KeyboardInterrupt:
try:
hub.stop()
except NameError:
pass
except OSError as e:
print("[SAMP] Error: I/O error({0}): {1}".format(e.errno, e.strerror))
sys.exit(1)
except SystemExit:
pass
|
c692d281d305330486ada8b88459bdbe028ffe8c7a53fb242cff087fc4a158f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines custom errors and exceptions used in `astropy.samp`.
"""
import xmlrpc.client as xmlrpc
from ..utils.exceptions import AstropyUserWarning
__all__ = ['SAMPWarning', 'SAMPHubError', 'SAMPClientError', 'SAMPProxyError']
class SAMPWarning(AstropyUserWarning):
"""
SAMP-specific Astropy warning class
"""
class SAMPHubError(Exception):
"""
SAMP Hub exception.
"""
class SAMPClientError(Exception):
"""
SAMP Client exceptions.
"""
class SAMPProxyError(xmlrpc.Fault):
"""
SAMP Proxy Hub exception
"""
|
30f5e1d868844e138c342963522851e4b3f928adc7d0ca0cdf59b2701f792035 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from urllib.parse import parse_qs
from urllib.request import urlopen
from ..utils.data import get_pkg_data_contents
from .standard_profile import (SAMPSimpleXMLRPCRequestHandler,
ThreadingXMLRPCServer)
__all__ = []
CROSS_DOMAIN = get_pkg_data_contents('data/crossdomain.xml')
CLIENT_ACCESS_POLICY = get_pkg_data_contents('data/clientaccesspolicy.xml')
class WebProfileRequestHandler(SAMPSimpleXMLRPCRequestHandler):
"""
Handler of XMLRPC requests performed through the Web Profile.
"""
def _send_CORS_header(self):
if self.headers.get('Origin') is not None:
method = self.headers.get('Access-Control-Request-Method')
if method and self.command == "OPTIONS":
# Preflight method
self.send_header('Content-Length', '0')
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Methods', method)
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
else:
# Simple method
self.send_header('Access-Control-Allow-Origin',
self.headers.get('Origin'))
self.send_header('Access-Control-Allow-Headers', 'Content-Type')
self.send_header('Access-Control-Allow-Credentials', 'true')
def end_headers(self):
self._send_CORS_header()
SAMPSimpleXMLRPCRequestHandler.end_headers(self)
def _serve_cross_domain_xml(self):
cross_domain = False
if self.path == "/crossdomain.xml":
# Adobe standard
response = CROSS_DOMAIN
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/x-cross-domain-policy')
self.send_header("Content-Length", "{0}".format(len(response)))
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
elif self.path == "/clientaccesspolicy.xml":
# Microsoft standard
response = CLIENT_ACCESS_POLICY
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/xml')
self.send_header("Content-Length", "{0}".format(len(response)))
self.end_headers()
self.wfile.write(response.encode('utf-8'))
self.wfile.flush()
cross_domain = True
return cross_domain
def do_POST(self):
if self._serve_cross_domain_xml():
return
return SAMPSimpleXMLRPCRequestHandler.do_POST(self)
def do_HEAD(self):
if not self.is_http_path_valid():
self.report_404()
return
if self._serve_cross_domain_xml():
return
def do_OPTIONS(self):
self.send_response(200, 'OK')
self.end_headers()
def do_GET(self):
if not self.is_http_path_valid():
self.report_404()
return
split_path = self.path.split('?')
if split_path[0] in ['/translator/{}'.format(clid) for clid in self.server.clients]:
# Request of a file proxying
urlpath = parse_qs(split_path[1])
try:
proxyfile = urlopen(urlpath["ref"][0])
self.send_response(200, 'OK')
self.end_headers()
self.wfile.write(proxyfile.read())
proxyfile.close()
except OSError:
self.report_404()
return
if self._serve_cross_domain_xml():
return
def is_http_path_valid(self):
valid_paths = (["/clientaccesspolicy.xml", "/crossdomain.xml"] +
['/translator/{}'.format(clid) for clid in self.server.clients])
return self.path.split('?')[0] in valid_paths
class WebProfileXMLRPCServer(ThreadingXMLRPCServer):
"""
XMLRPC server supporting the SAMP Web Profile.
"""
def __init__(self, addr, log=None, requestHandler=WebProfileRequestHandler,
logRequests=True, allow_none=True, encoding=None):
self.clients = []
ThreadingXMLRPCServer.__init__(self, addr, log, requestHandler,
logRequests, allow_none, encoding)
def add_client(self, client_id):
self.clients.append(client_id)
def remove_client(self, client_id):
try:
self.clients.remove(client_id)
except ValueError:
# No warning here because this method gets called for all clients,
# not just web clients, and we expect it to fail for non-web
# clients.
pass
def web_profile_text_dialog(request, queue):
samp_name = "unknown"
if isinstance(request[0], str):
# To support the old protocol version
samp_name = request[0]
else:
samp_name = request[0]["samp.name"]
text = \
"""A Web application which declares to be
Name: {}
Origin: {}
is requesting to be registered with the SAMP Hub.
Pay attention that if you permit its registration, such
application will acquire all current user privileges, like
file read/write.
Do you give your consent? [yes|no]""".format(samp_name, request[2])
print(text)
answer = input(">>> ")
queue.put(answer.lower() in ["yes", "y"])
|
9d9475e638e9a55ef2f476ded6d6a1defda8a6d54604c5da57959ca024610ba1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .client import SAMPClient
from .hub_proxy import SAMPHubProxy
__all__ = ['SAMPIntegratedClient']
__doctest_skip__ = ['SAMPIntegratedClient.*']
class SAMPIntegratedClient:
"""
A Simple SAMP client.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.SAMPClient` and
:class:`~astropy.samp.SAMPHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None,
addr=None, port=0, callable=True):
self.hub = SAMPHubProxy()
self.client_arguments = {
'name': name,
'description': description,
'metadata': metadata,
'addr': addr,
'port': port,
'callable': callable,
}
"""
Collected arguments that should be passed on to the SAMPClient below.
The SAMPClient used to be instantiated in __init__; however, this
caused problems with disconnecting and reconnecting to the HUB.
The client_arguments is used to maintain backwards compatibility.
"""
self.client = None
"The client will be instantiated upon connect()."
# GENERAL
@property
def is_connected(self):
"""
Testing method to verify the client connection with a running Hub.
Returns
-------
is_connected : bool
True if the client is connected to a Hub, False otherwise.
"""
return self.hub.is_connected and self.client.is_running
def connect(self, hub=None, hub_params=None, pool_size=20):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
hub : `~astropy.samp.SAMPHubServer`, optional
The hub to connect to.
hub_params : dict, optional
Optional dictionary containing the lock-file content of the Hub
with which to connect. This dictionary has the form
``{<token-name>: <token-string>, ...}``.
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(hub, hub_params, pool_size)
# The client has to be instantiated here and not in __init__() because
# this allows disconnecting and reconnecting to the HUB. Nonetheless,
# the client_arguments are set in __init__() because the
# instantiation of the client used to happen there and this retains
# backwards compatibility.
self.client = SAMPClient(
self.hub,
**self.client_arguments
)
self.client.start()
self.client.register()
def disconnect(self):
"""
Unregister the client from the current SAMP Hub, stop the client and
disconnect from the Hub.
"""
if self.is_connected:
try:
self.client.unregister()
finally:
if self.client.is_running:
self.client.stop()
self.hub.disconnect()
# HUB
def ping(self):
"""
Proxy to ``ping`` SAMP Hub method (Standard Profile only).
"""
return self.hub.ping()
def declare_metadata(self, metadata):
"""
Proxy to ``declareMetadata`` SAMP Hub method.
"""
return self.client.declare_metadata(metadata)
def get_metadata(self, client_id):
"""
Proxy to ``getMetadata`` SAMP Hub method.
"""
return self.hub.get_metadata(self.get_private_key(), client_id)
def get_subscriptions(self, client_id):
"""
Proxy to ``getSubscriptions`` SAMP Hub method.
"""
return self.hub.get_subscriptions(self.get_private_key(), client_id)
def get_registered_clients(self):
"""
Proxy to ``getRegisteredClients`` SAMP Hub method.
This returns all the registered clients, excluding the current client.
"""
return self.hub.get_registered_clients(self.get_private_key())
def get_subscribed_clients(self, mtype):
"""
Proxy to ``getSubscribedClients`` SAMP Hub method.
"""
return self.hub.get_subscribed_clients(self.get_private_key(), mtype)
def _format_easy_msg(self, mtype, params):
msg = {}
if "extra_kws" in params:
extra = params["extra_kws"]
del(params["extra_kws"])
msg = {"samp.mtype": mtype, "samp.params": params}
msg.update(extra)
else:
msg = {"samp.mtype": mtype, "samp.params": params}
return msg
def notify(self, recipient_id, message):
"""
Proxy to ``notify`` SAMP Hub method.
"""
return self.hub.notify(self.get_private_key(), recipient_id, message)
def enotify(self, recipient_id, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.notify`.
This is a proxy to ``notify`` method that allows to send the
notification message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
mtype : str
the MType to be notified
params : dict or set of keywords
Variable keyword set which contains the list of parameters for the
specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.enotify("samp.msg.progress", msgid = "xyz", txt = "initialization",
... percent = "10", extra_kws = {"my.extra.info": "just an example"})
"""
return self.notify(recipient_id, self._format_easy_msg(mtype, params))
def notify_all(self, message):
"""
Proxy to ``notifyAll`` SAMP Hub method.
"""
return self.hub.notify_all(self.get_private_key(), message)
def enotify_all(self, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.notify_all`.
This is a proxy to ``notifyAll`` method that allows to send the
notification message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
mtype : str
MType to be notified.
params : dict or set of keywords
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.enotify_all("samp.msg.progress", txt = "initialization",
... percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.notify_all(self._format_easy_msg(mtype, params))
def call(self, recipient_id, msg_tag, message):
"""
Proxy to ``call`` SAMP Hub method.
"""
return self.hub.call(self.get_private_key(), recipient_id, msg_tag, message)
def ecall(self, recipient_id, msg_tag, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call`.
This is a proxy to ``call`` method that allows to send a call message
in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
msg_tag : str
Message tag to use
mtype : str
MType to be sent
params : dict of set of keywords
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> msgid = cli.ecall("abc", "xyz", "samp.msg.progress",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.call(recipient_id, msg_tag, self._format_easy_msg(mtype, params))
def call_all(self, msg_tag, message):
"""
Proxy to ``callAll`` SAMP Hub method.
"""
return self.hub.call_all(self.get_private_key(), msg_tag, message)
def ecall_all(self, msg_tag, mtype, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call_all`.
This is a proxy to ``callAll`` method that allows to send the call
message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
msg_tag : str
Message tag to use
mtype : str
MType to be sent
params : dict of set of keywords
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> msgid = cli.ecall_all("xyz", "samp.msg.progress",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
self.call_all(msg_tag, self._format_easy_msg(mtype, params))
def call_and_wait(self, recipient_id, message, timeout):
"""
Proxy to ``callAndWait`` SAMP Hub method.
"""
return self.hub.call_and_wait(self.get_private_key(), recipient_id, message, timeout)
def ecall_and_wait(self, recipient_id, mtype, timeout, **params):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.call_and_wait`.
This is a proxy to ``callAndWait`` method that allows to send the call
message in a simplified way.
Note that reserved ``extra_kws`` keyword is a dictionary with the
special meaning of being used to add extra keywords, in addition to
the standard ``samp.mtype`` and ``samp.params``, to the message sent.
Parameters
----------
recipient_id : str
Recipient ID
mtype : str
MType to be sent
timeout : str
Call timeout in seconds
params : dict of set of keywords
Variable keyword set which contains the list of parameters for
the specified MType.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.ecall_and_wait("xyz", "samp.msg.progress", "5",
... txt = "initialization", percent = "10",
... extra_kws = {"my.extra.info": "just an example"})
"""
return self.call_and_wait(recipient_id, self._format_easy_msg(mtype, params), timeout)
def reply(self, msg_id, response):
"""
Proxy to ``reply`` SAMP Hub method.
"""
return self.hub.reply(self.get_private_key(), msg_id, response)
def _format_easy_response(self, status, result, error):
msg = {"samp.status": status}
if result is not None:
msg.update({"samp.result": result})
if error is not None:
msg.update({"samp.error": error})
return msg
def ereply(self, msg_id, status, result=None, error=None):
"""
Easy to use version of :meth:`~astropy.samp.integrated_client.SAMPIntegratedClient.reply`.
This is a proxy to ``reply`` method that allows to send a reply
message in a simplified way.
Parameters
----------
msg_id : str
Message ID to which reply.
status : str
Content of the ``samp.status`` response keyword.
result : dict
Content of the ``samp.result`` response keyword.
error : dict
Content of the ``samp.error`` response keyword.
Examples
--------
>>> from astropy.samp import SAMPIntegratedClient, SAMP_STATUS_ERROR
>>> cli = SAMPIntegratedClient()
>>> ...
>>> cli.ereply("abd", SAMP_STATUS_ERROR, result={},
... error={"samp.errortxt": "Test error message"})
"""
return self.reply(msg_id, self._format_easy_response(status, result, error))
# CLIENT
def receive_notification(self, private_key, sender_id, message):
return self.client.receive_notification(private_key, sender_id, message)
receive_notification.__doc__ = SAMPClient.receive_notification.__doc__
def receive_call(self, private_key, sender_id, msg_id, message):
return self.client.receive_call(private_key, sender_id, msg_id, message)
receive_call.__doc__ = SAMPClient.receive_call.__doc__
def receive_response(self, private_key, responder_id, msg_tag, response):
return self.client.receive_response(private_key, responder_id, msg_tag, response)
receive_response.__doc__ = SAMPClient.receive_response.__doc__
def bind_receive_message(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_message(mtype, function, declare=True, metadata=None)
bind_receive_message.__doc__ = SAMPClient.bind_receive_message.__doc__
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_notification(mtype, function, declare, metadata)
bind_receive_notification.__doc__ = SAMPClient.bind_receive_notification.__doc__
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
self.client.bind_receive_call(mtype, function, declare, metadata)
bind_receive_call.__doc__ = SAMPClient.bind_receive_call.__doc__
def bind_receive_response(self, msg_tag, function):
self.client.bind_receive_response(msg_tag, function)
bind_receive_response.__doc__ = SAMPClient.bind_receive_response.__doc__
def unbind_receive_notification(self, mtype, declare=True):
self.client.unbind_receive_notification(mtype, declare)
unbind_receive_notification.__doc__ = SAMPClient.unbind_receive_notification.__doc__
def unbind_receive_call(self, mtype, declare=True):
self.client.unbind_receive_call(mtype, declare)
unbind_receive_call.__doc__ = SAMPClient.unbind_receive_call.__doc__
def unbind_receive_response(self, msg_tag):
self.client.unbind_receive_response(msg_tag)
unbind_receive_response.__doc__ = SAMPClient.unbind_receive_response.__doc__
def declare_subscriptions(self, subscriptions=None):
self.client.declare_subscriptions(subscriptions)
declare_subscriptions.__doc__ = SAMPClient.declare_subscriptions.__doc__
def get_private_key(self):
return self.client.get_private_key()
get_private_key.__doc__ = SAMPClient.get_private_key.__doc__
def get_public_id(self):
return self.client.get_public_id()
get_public_id.__doc__ = SAMPClient.get_public_id.__doc__
|
faca97c7571b8ba4671ac8fb7604af055e7c59239ff69b90a6806c0f02152016 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.samp` to use "
"the internet, if available.",
aliases=['astropy.samp.utils.use_internet'])
n_retries = _config.ConfigItem(10,
"How many times to retry communications when they fail")
conf = Conf()
|
305e16ee68c793e1ec16331107f881cc2743ff58d3855755bf5ccb7d9d73d3b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import os
import select
import socket
import threading
import warnings
from urllib.parse import urlunparse
from .constants import SAMP_STATUS_OK, SAMP_STATUS_WARNING
from .hub import SAMPHubServer
from .errors import SAMPClientError, SAMPWarning
from .utils import internet_on, get_num_args
from .standard_profile import ThreadingXMLRPCServer
__all__ = ['SAMPClient']
class SAMPClient:
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable client application.
Parameters
----------
hub : :class:`~astropy.samp.SAMPHubProxy`
An instance of :class:`~astropy.samp.SAMPHubProxy` to be
used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
addr : str, optional
Listening address (or IP). This defaults to 127.0.0.1 if the internet
is not reachable, otherwise it defaults to the host name.
port : int, optional
Listening XML-RPC server socket port. If left set to 0 (the default),
the operating system will select a free port.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
# TODO: define what is meant by callable
def __init__(self, hub, name=None, description=None, metadata=None,
addr=None, port=0, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._addr = addr
self._port = port
self._xmlrpcAddr = None
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}]}
self._response_bindings = {}
self._host_name = "127.0.0.1"
if internet_on():
try:
self._host_name = socket.getfqdn()
socket.getaddrinfo(self._addr or self._host_name, self._port or 0)
except socket.error:
self._host_name = "127.0.0.1"
self.hub = hub
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
self.client = ThreadingXMLRPCServer((self._addr or self._host_name,
self._port), logRequests=False, allow_none=True)
self.client.register_introspection_functions()
self.client.register_function(self.receive_notification, 'samp.client.receiveNotification')
self.client.register_function(self.receive_call, 'samp.client.receiveCall')
self.client.register_function(self.receive_response, 'samp.client.receiveResponse')
# If the port was set to zero, then the operating system has
# selected a free port. We now check what this port number is.
if self._port == 0:
self._port = self.client.socket.getsockname()[1]
protocol = 'http'
self._xmlrpcAddr = urlunparse((protocol,
'{0}:{1}'.format(self._addr or self._host_name,
self._port),
'', '', '', ''))
def start(self):
"""
Start the client in a separate thread (non-blocking).
This only has an effect if ``callable`` was set to `True` when
initializing the client.
"""
if self._callable:
self._is_running = True
self._run_client()
def stop(self, timeout=10.):
"""
Stop the client.
Parameters
----------
timeout : float
Timeout after which to give up if the client cannot be cleanly
shut down.
"""
# Setting _is_running to False causes the loop in _serve_forever to
# exit. The thread should then stop running. We wait for the thread to
# terminate until the timeout, then we continue anyway.
self._is_running = False
if self._callable and self._thread.is_alive():
self._thread.join(timeout)
if self._thread.is_alive():
raise SAMPClientError("Client was not shut down successfully "
"(timeout={0}s)".format(timeout))
@property
def is_running(self):
"""
Whether the client is currently running.
"""
return self._is_running
@property
def is_registered(self):
"""
Whether the client is currently registered.
"""
return self._is_registered
def _run_client(self):
if self._callable:
self._thread.start()
def _serve_forever(self):
while self._is_running:
try:
read_ready = select.select([self.client.socket], [], [], 0.1)[0]
except OSError as exc:
warnings.warn("Call to select in SAMPClient failed: {0}".format(exc),
SAMPWarning)
else:
if read_ready:
self.client.handle_request()
self.client.server_close()
def _ping(self, private_key, sender_id, msg_id, msg_mtype, msg_params,
message):
reply = {"samp.status": SAMP_STATUS_OK, "samp.result": {}}
self.hub.reply(private_key, msg_id, reply)
def _client_env_get(self, private_key, sender_id, msg_id, msg_mtype,
msg_params, message):
if msg_params["name"] in os.environ:
reply = {"samp.status": SAMP_STATUS_OK,
"samp.result": {"value": os.environ[msg_params["name"]]}}
else:
reply = {"samp.status": SAMP_STATUS_WARNING,
"samp.result": {"value": ""},
"samp.error": {"samp.errortxt":
"Environment variable not defined."}}
self.hub.reply(private_key, msg_id, reply)
def _handle_notification(self, private_key, sender_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._notification_bindings:
bound_func = self._notification_bindings[mtype][0]
if get_num_args(bound_func) == 5:
bound_func(private_key, sender_id, msg_mtype,
msg_params, message)
else:
bound_func(private_key, sender_id, None, msg_mtype,
msg_params, message)
return ""
def receive_notification(self, private_key, sender_id, message):
"""
Standard callable client ``receive_notification`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
method is used to bind distinct operations to MTypes. In case of a
customized callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_notification(private_key, sender_id, message)
def _handle_call(self, private_key, sender_id, msg_id, message):
if private_key == self.get_private_key() and "samp.mtype" in message:
msg_mtype = message["samp.mtype"]
del message["samp.mtype"]
msg_params = message["samp.params"]
del message["samp.params"]
msubs = SAMPHubServer.get_mtype_subtypes(msg_mtype)
for mtype in msubs:
if mtype in self._call_bindings:
self._call_bindings[mtype][0](private_key, sender_id,
msg_id, msg_mtype,
msg_params, message)
return ""
def receive_call(self, private_key, sender_id, msg_id, message):
"""
Standard callable client ``receive_call`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_call` method is
used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
sender_id : str
Sender public ID.
msg_id : str
Message ID received.
message : dict
Received message.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_call(private_key, sender_id, msg_id, message)
def _handle_response(self, private_key, responder_id, msg_tag, response):
if (private_key == self.get_private_key() and
msg_tag in self._response_bindings):
self._response_bindings[msg_tag](private_key, responder_id,
msg_tag, response)
return ""
def receive_response(self, private_key, responder_id, msg_tag, response):
"""
Standard callable client ``receive_response`` method.
This method is automatically handled when the
:meth:`~astropy.samp.client.SAMPClient.bind_receive_response` method
is used to bind distinct operations to MTypes. In case of a customized
callable client implementation that inherits from the
:class:`~astropy.samp.SAMPClient` class this method should be
overwritten.
.. note:: When overwritten, this method must always return
a string result (even empty).
Parameters
----------
private_key : str
Client private key.
responder_id : str
Responder public ID.
msg_tag : str
Response message tag.
response : dict
Received response.
Returns
-------
confirmation : str
Any confirmation string.
"""
return self._handle_response(private_key, responder_id, msg_tag,
response)
def bind_receive_message(self, mtype, function, declare=True,
metadata=None):
"""
Bind a specific MType to a function or class method, being intended for
a call or a notification.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id (calls only,
otherwise is `None`), ``mtype`` is the message MType, ``params`` is the
message parameter set (content of ``"samp.params"``) and ``extra`` is a
dictionary containing any extra message map entry. The client is
automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be catched.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
self.bind_receive_call(mtype, function, declare=declare,
metadata=metadata)
self.bind_receive_notification(mtype, function, declare=declare,
metadata=metadata)
def bind_receive_notification(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType notification to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, mtype,
params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``mtype`` is the message MType, ``params`` is
the notified message parameter set (content of ``"samp.params"``) and
``extra`` is a dictionary containing any extra message map entry. The
client is automatically declared subscribed to the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._notification_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_call(self, mtype, function, declare=True, metadata=None):
"""
Bind a specific MType call to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, sender_id, msg_id,
mtype, params, extra)
where ``private_key`` is the client private-key, ``sender_id`` is the
notification sender ID, ``msg_id`` is the Hub message-id, ``mtype`` is
the message MType, ``params`` is the message parameter set (content of
``"samp.params"``) and ``extra`` is a dictionary containing any extra
message map entry. The client is automatically declared subscribed to
the MType by default.
Parameters
----------
mtype : str
MType to be caught.
function : callable
Application function to be used when ``mtype`` is received.
declare : bool, optional
Specify whether the client must be automatically declared as
subscribed to the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
metadata : dict, optional
Dictionary containing additional metadata to declare associated
with the MType subscribed to (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
if not metadata:
metadata = {}
self._call_bindings[mtype] = [function, metadata]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def bind_receive_response(self, msg_tag, function):
"""
Bind a specific msg-tag response to a function or class method.
The function must be of the form::
def my_function_or_method(<self,> private_key, responder_id,
msg_tag, response)
where ``private_key`` is the client private-key, ``responder_id`` is
the message responder ID, ``msg_tag`` is the message-tag provided at
call time and ``response`` is the response received.
Parameters
----------
msg_tag : str
Message-tag to be caught.
function : callable
Application function to be used when ``msg_tag`` is received.
"""
if self._callable:
self._response_bindings[msg_tag] = function
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_notification(self, mtype, declare=True):
"""
Remove from the notifications binding table the specified MType and
unsubscribe the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._notification_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_call(self, mtype, declare=True):
"""
Remove from the calls binding table the specified MType and unsubscribe
the client from it (if required).
Parameters
----------
mtype : str
MType to be removed.
declare : bool
Specify whether the client must be automatically declared as
unsubscribed from the MType (see also
:meth:`~astropy.samp.client.SAMPClient.declare_subscriptions`).
"""
if self._callable:
del self._call_bindings[mtype]
if declare:
self._declare_subscriptions()
else:
raise SAMPClientError("Client not callable.")
def unbind_receive_response(self, msg_tag):
"""
Remove from the responses binding table the specified message-tag.
Parameters
----------
msg_tag : str
Message-tag to be removed.
"""
if self._callable:
del self._response_bindings[msg_tag]
else:
raise SAMPClientError("Client not callable.")
def declare_subscriptions(self, subscriptions=None):
"""
Declares the MTypes the client wishes to subscribe to, implicitly
defined with the MType binding methods
:meth:`~astropy.samp.client.SAMPClient.bind_receive_notification`
and :meth:`~astropy.samp.client.SAMPClient.bind_receive_call`.
An optional ``subscriptions`` map can be added to the final map passed
to the :meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
Parameters
----------
subscriptions : dict, optional
Dictionary containing the list of MTypes to subscribe to, with the
same format of the ``subscriptions`` map passed to the
:meth:`~astropy.samp.hub_proxy.SAMPHubProxy.declare_subscriptions`
method.
"""
if self._callable:
self._declare_subscriptions(subscriptions)
else:
raise SAMPClientError("Client not callable.")
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register(self.hub.lockfile["samp.secret"])
if result["samp.self-id"] == "":
raise SAMPClientError("Registration failed - "
"samp.self-id was not set by the hub.")
if result["samp.private-key"] == "":
raise SAMPClientError("Registration failed - "
"samp.private-key was not set by the hub.")
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._set_xmlrpc_callback()
self._declare_subscriptions()
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
else:
raise SAMPClientError("Unable to register to the SAMP Hub. "
"Hub proxy not connected.")
def unregister(self):
"""
Unregister the client from the SAMP Hub.
"""
if self.hub.is_connected:
self._is_registered = False
self.hub.unregister(self._private_key)
self._hub_id = None
self._public_id = None
self._private_key = None
else:
raise SAMPClientError("Unable to unregister from the SAMP Hub. "
"Hub proxy not connected.")
def _set_xmlrpc_callback(self):
if self.hub.is_connected and self._private_key is not None:
self.hub.set_xmlrpc_callback(self._private_key,
self._xmlrpcAddr)
def _declare_subscriptions(self, subscriptions=None):
if self.hub.is_connected and self._private_key is not None:
mtypes_dict = {}
# Collect notification mtypes and metadata
for mtype in self._notification_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._notification_bindings[mtype][1])
# Collect notification mtypes and metadata
for mtype in self._call_bindings.keys():
mtypes_dict[mtype] = copy.deepcopy(self._call_bindings[mtype][1])
# Add optional subscription map
if subscriptions:
mtypes_dict.update(copy.deepcopy(subscriptions))
self.hub.declare_subscriptions(self._private_key, mtypes_dict)
else:
raise SAMPClientError("Unable to declare subscriptions. Hub "
"unreachable or not connected or client "
"not registered.")
def declare_metadata(self, metadata=None):
"""
Declare the client application metadata supported.
Parameters
----------
metadata : dict, optional
Dictionary containing the client application metadata as defined in
the SAMP definition document. If omitted, then no metadata are
declared.
"""
if self.hub.is_connected and self._private_key is not None:
if metadata is not None:
self._metadata.update(metadata)
self.hub.declare_metadata(self._private_key, self._metadata)
else:
raise SAMPClientError("Unable to declare metadata. Hub "
"unreachable or not connected or client "
"not registered.")
def get_private_key(self):
"""
Return the client private key used for the Standard Profile
communications obtained at registration time (``samp.private-key``).
Returns
-------
key : str
Client private key.
"""
return self._private_key
def get_public_id(self):
"""
Return public client ID obtained at registration time
(``samp.self-id``).
Returns
-------
id : str
Client public ID.
"""
return self._public_id
|
28511a0670ba5f8dbe6bb3f69e7ab40287a61655d78f03e9652ac73fa8cf4b2b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
return {
'astropy.samp': [os.path.join('data', '*')],
'astropy.samp.tests': [os.path.join('data', '*')]
}
|
656b29d8b13fa8b34968162a39e209afc8b52b377d34077c9abb42a226a6b5fa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Utility functions and classes
"""
import queue
import inspect
import traceback
from io import StringIO
import xmlrpc.client as xmlrpc
from urllib.request import urlopen
from .constants import SAMP_STATUS_ERROR
from .errors import SAMPProxyError
def internet_on():
from . import conf
if not conf.use_internet:
return False
else:
try:
urlopen('http://google.com', timeout=1.)
return True
except Exception:
return False
__all__ = ["SAMPMsgReplierWrapper"]
__doctest_skip__ = ['.']
def getattr_recursive(variable, attribute):
"""
Get attributes recursively.
"""
if '.' in attribute:
top, remaining = attribute.split('.', 1)
return getattr_recursive(getattr(variable, top), remaining)
else:
return getattr(variable, attribute)
class _ServerProxyPoolMethod:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, proxies, name):
self.__proxies = proxies
self.__name = name
def __getattr__(self, name):
return _ServerProxyPoolMethod(self.__proxies, "{}.{}".format(self.__name, name))
def __call__(self, *args, **kwrds):
proxy = self.__proxies.get()
function = getattr_recursive(proxy, self.__name)
try:
response = function(*args, **kwrds)
except xmlrpc.Fault as exc:
raise SAMPProxyError(exc.faultCode, exc.faultString)
finally:
self.__proxies.put(proxy)
return response
class ServerProxyPool:
"""
A thread-safe pool of `xmlrpc.ServerProxy` objects.
"""
def __init__(self, size, proxy_class, *args, **keywords):
self._proxies = queue.Queue(size)
for i in range(size):
self._proxies.put(proxy_class(*args, **keywords))
def __getattr__(self, name):
# magic method dispatcher
return _ServerProxyPoolMethod(self._proxies, name)
class SAMPMsgReplierWrapper:
"""
Function decorator that allows to automatically grab errors and returned
maps (if any) from a function bound to a SAMP call (or notify).
Parameters
----------
cli : :class:`~astropy.samp.SAMPIntegratedClient` or :class:`~astropy.samp.SAMPClient`
SAMP client instance. Decorator initialization, accepting the instance
of the client that receives the call or notification.
"""
def __init__(self, cli):
self.cli = cli
def __call__(self, f):
def wrapped_f(*args):
if get_num_args(f) == 5 or args[2] is None: # notification
f(*args)
else: # call
try:
result = f(*args)
if result:
self.cli.hub.reply(self.cli.get_private_key(), args[2],
{"samp.status": SAMP_STATUS_ERROR,
"samp.result": result})
except Exception:
err = StringIO()
traceback.print_exc(file=err)
txt = err.getvalue()
self.cli.hub.reply(self.cli.get_private_key(), args[2],
{"samp.status": SAMP_STATUS_ERROR,
"samp.result": {"txt": txt}})
return wrapped_f
class _HubAsClient:
def __init__(self, handler):
self._handler = handler
def __getattr__(self, name):
# magic method dispatcher
return _HubAsClientMethod(self._handler, name)
class _HubAsClientMethod:
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _HubAsClientMethod(self.__send, "{}.{}".format(self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
def get_num_args(f):
"""
Find the number of arguments a function or method takes (excluding ``self``).
"""
if inspect.ismethod(f):
return f.__func__.__code__.co_argcount - 1
elif inspect.isfunction(f):
return f.__code__.co_argcount
else:
raise TypeError("f should be a function or a method")
|
48a9acf52f554461a5fe4f0a5c6883981292748b94fe0048ba53b6e5e30fdb07 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines constants used in `astropy.samp`.
"""
from ..utils.data import get_pkg_data_filename
__all__ = ['SAMP_STATUS_OK', 'SAMP_STATUS_WARNING', 'SAMP_STATUS_ERROR',
'SAFE_MTYPES', 'SAMP_ICON']
__profile_version__ = "1.3"
#: General constant for samp.ok status string
SAMP_STATUS_OK = "samp.ok"
#: General constant for samp.warning status string
SAMP_STATUS_WARNING = "samp.warning"
#: General constant for samp.error status string
SAMP_STATUS_ERROR = "samp.error"
SAFE_MTYPES = ["samp.app.*", "samp.msg.progress", "table.*", "image.*",
"coord.*", "spectrum.*", "bibcode.*", "voresource.*"]
with open(get_pkg_data_filename('data/astropy_icon.png'), 'rb') as f:
SAMP_ICON = f.read()
|
84dcb74a18b3548cf158fb06ba99ab1fd027a9f7ae738f3bf86603043c7244bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import xmlrpc.client as xmlrpc
from .errors import SAMPHubError
from .utils import ServerProxyPool
from .lockfile_helpers import get_main_running_hub
__all__ = ['SAMPHubProxy']
class SAMPHubProxy:
"""
Proxy class to simplify the client interaction with a SAMP hub (via the
standard profile).
"""
def __init__(self):
self.proxy = None
self._connected = False
@property
def is_connected(self):
"""
Whether the hub proxy is currently connected to a hub.
"""
return self._connected
def connect(self, hub=None, hub_params=None, pool_size=20):
"""
Connect to the current SAMP Hub.
Parameters
----------
hub : `~astropy.samp.SAMPHubServer`, optional
The hub to connect to.
hub_params : dict, optional
Optional dictionary containing the lock-file content of the Hub
with which to connect. This dictionary has the form
``{<token-name>: <token-string>, ...}``.
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
self.lockfile = {}
if hub is not None and hub_params is not None:
raise ValueError("Cannot specify both hub and hub_params")
if hub_params is None:
if hub is not None:
if not hub.is_running:
raise SAMPHubError("Hub is not running")
else:
hub_params = hub.params
else:
hub_params = get_main_running_hub()
try:
url = hub_params["samp.hub.xmlrpc.url"].replace("\\", "")
self.proxy = ServerProxyPool(pool_size, xmlrpc.ServerProxy,
url, allow_none=1)
self.ping()
self.lockfile = copy.deepcopy(hub_params)
self._connected = True
except xmlrpc.ProtocolError as p:
# 401 Unauthorized
if p.errcode == 401:
raise SAMPHubError("Unauthorized access. Basic Authentication "
"required or failed.")
else:
raise SAMPHubError("Protocol Error {}: {}".format(p.errcode,
p.errmsg))
def disconnect(self):
"""
Disconnect from the current SAMP Hub.
"""
self.proxy = None
self._connected = False
self.lockfile = {}
def server_close(self):
self.proxy.server_close()
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for other profiles.
"""
return self.proxy.samp.hub
def ping(self):
"""
Proxy to ``ping`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.ping()
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
"""
Proxy to ``setXmlrpcCallback`` SAMP Hub method (Standard Profile only).
"""
return self._samp_hub.setXmlrpcCallback(private_key, xmlrpc_addr)
def register(self, secret):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(secret)
def unregister(self, private_key):
"""
Proxy to ``unregister`` SAMP Hub method.
"""
return self._samp_hub.unregister(private_key)
def declare_metadata(self, private_key, metadata):
"""
Proxy to ``declareMetadata`` SAMP Hub method.
"""
return self._samp_hub.declareMetadata(private_key, metadata)
def get_metadata(self, private_key, client_id):
"""
Proxy to ``getMetadata`` SAMP Hub method.
"""
return self._samp_hub.getMetadata(private_key, client_id)
def declare_subscriptions(self, private_key, subscriptions):
"""
Proxy to ``declareSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.declareSubscriptions(private_key, subscriptions)
def get_subscriptions(self, private_key, client_id):
"""
Proxy to ``getSubscriptions`` SAMP Hub method.
"""
return self._samp_hub.getSubscriptions(private_key, client_id)
def get_registered_clients(self, private_key):
"""
Proxy to ``getRegisteredClients`` SAMP Hub method.
"""
return self._samp_hub.getRegisteredClients(private_key)
def get_subscribed_clients(self, private_key, mtype):
"""
Proxy to ``getSubscribedClients`` SAMP Hub method.
"""
return self._samp_hub.getSubscribedClients(private_key, mtype)
def notify(self, private_key, recipient_id, message):
"""
Proxy to ``notify`` SAMP Hub method.
"""
return self._samp_hub.notify(private_key, recipient_id, message)
def notify_all(self, private_key, message):
"""
Proxy to ``notifyAll`` SAMP Hub method.
"""
return self._samp_hub.notifyAll(private_key, message)
def call(self, private_key, recipient_id, msg_tag, message):
"""
Proxy to ``call`` SAMP Hub method.
"""
return self._samp_hub.call(private_key, recipient_id, msg_tag, message)
def call_all(self, private_key, msg_tag, message):
"""
Proxy to ``callAll`` SAMP Hub method.
"""
return self._samp_hub.callAll(private_key, msg_tag, message)
def call_and_wait(self, private_key, recipient_id, message, timeout):
"""
Proxy to ``callAndWait`` SAMP Hub method.
"""
return self._samp_hub.callAndWait(private_key, recipient_id, message,
timeout)
def reply(self, private_key, msg_id, response):
"""
Proxy to ``reply`` SAMP Hub method.
"""
return self._samp_hub.reply(private_key, msg_id, response)
|
82e1dcd4dcee323e9ac02d478901511cb42bc5b42a9c30297da5b8c2bbe4070a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Sundry function and class decorators."""
import functools
import inspect
import textwrap
import types
import warnings
from inspect import signature
from .codegen import make_function_with_signature
from .exceptions import (AstropyDeprecationWarning, AstropyUserWarning,
AstropyPendingDeprecationWarning)
__all__ = ['classproperty', 'deprecated', 'deprecated_attribute',
'deprecated_renamed_argument', 'format_doc',
'lazyproperty', 'sharedmethod', 'wraps']
def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message):
"""
Returns a wrapped function that displays an
``AstropyDeprecationWarning`` when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = AstropyDeprecationWarning
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__), message)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__), message)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use {} instead.'.format(alternative)
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message)
else:
return deprecate_function(obj, message)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
def deprecated_attribute(name, since, message=None, alternative=None,
pending=False):
"""
Used to mark a public attribute as deprecated. This creates a
property that will warn when the given attribute name is accessed.
To prevent the warning (i.e. for internal code), use the private
name for the attribute by prepending an underscore
(i.e. ``self._name``).
Parameters
----------
name : str
The name of the deprecated attribute.
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``name`` may be used for the name of the attribute,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function.
alternative : str, optional
An alternative attribute that the user may use in place of the
deprecated attribute. The deprecation warning will tell the
user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
Examples
--------
::
class MyClass:
# Mark the old_name as deprecated
old_name = misc.deprecated_attribute('old_name', '0.1')
def method(self):
self._old_name = 42
"""
private_name = '_' + name
@deprecated(since, name=name, obj_type='attribute')
def get(self):
return getattr(self, private_name)
@deprecated(since, name=name, obj_type='attribute')
def set(self, val):
setattr(self, private_name, val)
@deprecated(since, name=name, obj_type='attribute')
def delete(self):
delattr(self, private_name)
return property(get, set, delete)
def deprecated_renamed_argument(old_name, new_name, since,
arg_in_kwargs=False, relax=False,
pending=False):
"""Deprecate a _renamed_ function argument.
The decorator assumes that the argument with the ``old_name`` was removed
from the function signature and the ``new_name`` replaced it at the
**same position** in the signature. If the ``old_name`` argument is
given when calling the decorated function the decorator will catch it and
issue a deprecation warning and pass it on as ``new_name`` argument.
Parameters
----------
old_name : str or list/tuple thereof
The old name of the argument.
new_name : str or list/tuple thereof
The new name of the argument.
since : str or number or list/tuple thereof
The release at which the old argument became deprecated.
arg_in_kwargs : bool or list/tuple thereof, optional
If the argument is not a named argument (for example it
was meant to be consumed by ``**kwargs``) set this to
``True``. Otherwise the decorator will throw an Exception
if the ``new_name`` cannot be found in the signature of
the decorated function.
Default is ``False``.
relax : bool or list/tuple thereof, optional
If ``False`` a ``TypeError`` is raised if both ``new_name`` and
``old_name`` are given. If ``True`` the value for ``new_name`` is used
and a Warning is issued.
Default is ``False``.
pending : bool or list/tuple thereof, optional
If ``True`` this will hide the deprecation warning and ignore the
corresponding ``relax`` parameter value.
Default is ``False``.
Raises
------
TypeError
If the new argument name cannot be found in the function
signature and arg_in_kwargs was False or if it is used to
deprecate the name of the ``*args``-, ``**kwargs``-like arguments.
At runtime such an Error is raised if both the new_name
and old_name were specified when calling the function and
"relax=False".
Notes
-----
The decorator should be applied to a function where the **name**
of an argument was changed but it applies the same logic.
.. warning::
If ``old_name`` is a list or tuple the ``new_name`` and ``since`` must
also be a list or tuple with the same number of entries. ``relax`` and
``arg_in_kwarg`` can be a single bool (applied to all) or also a
list/tuple with the same number of entries like ``new_name``, etc.
Examples
--------
The deprecation warnings are not shown in the following examples.
To deprecate a positional or keyword argument::
>>> from astropy.utils.decorators import deprecated_renamed_argument
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0')
... def test(sigma):
... return sigma
>>> test(2)
2
>>> test(sigma=2)
2
>>> test(sig=2)
2
To deprecate an argument catched inside the ``**kwargs`` the
``arg_in_kwargs`` has to be set::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0',
... arg_in_kwargs=True)
... def test(**kwargs):
... return kwargs['sigma']
>>> test(sigma=2)
2
>>> test(sig=2)
2
By default providing the new and old keyword will lead to an Exception. If
a Warning is desired set the ``relax`` argument::
>>> @deprecated_renamed_argument('sig', 'sigma', '1.0', relax=True)
... def test(sigma):
... return sigma
>>> test(sig=2)
2
It is also possible to replace multiple arguments. The ``old_name``,
``new_name`` and ``since`` have to be `tuple` or `list` and contain the
same number of entries::
>>> @deprecated_renamed_argument(['a', 'b'], ['alpha', 'beta'],
... ['1.0', 1.2])
... def test(alpha, beta):
... return alpha, beta
>>> test(a=2, b=3)
(2, 3)
In this case ``arg_in_kwargs`` and ``relax`` can be a single value (which
is applied to all renamed arguments) or must also be a `tuple` or `list`
with values for each of the arguments.
"""
cls_iter = (list, tuple)
if isinstance(old_name, cls_iter):
n = len(old_name)
# Assume that new_name and since are correct (tuple/list with the
# appropriate length) in the spirit of the "consenting adults". But the
# optional parameters may not be set, so if these are not iterables
# wrap them.
if not isinstance(arg_in_kwargs, cls_iter):
arg_in_kwargs = [arg_in_kwargs] * n
if not isinstance(relax, cls_iter):
relax = [relax] * n
if not isinstance(pending, cls_iter):
pending = [pending] * n
else:
# To allow a uniform approach later on, wrap all arguments in lists.
n = 1
old_name = [old_name]
new_name = [new_name]
since = [since]
arg_in_kwargs = [arg_in_kwargs]
relax = [relax]
pending = [pending]
def decorator(function):
# The named arguments of the function.
arguments = signature(function).parameters
keys = list(arguments.keys())
position = [None] * n
for i in range(n):
# Determine the position of the argument.
if new_name[i] in arguments:
param = arguments[new_name[i]]
# There are several possibilities now:
# 1.) Positional or keyword argument:
if param.kind == param.POSITIONAL_OR_KEYWORD:
position[i] = keys.index(new_name[i])
# 2.) Keyword only argument:
elif param.kind == param.KEYWORD_ONLY:
# These cannot be specified by position.
position[i] = None
# 3.) positional-only argument, varargs, varkwargs or some
# unknown type:
else:
raise TypeError('cannot replace argument "{0}" of kind '
'{1!r}.'.format(new_name[i], param.kind))
# In case the argument is not found in the list of arguments
# the only remaining possibility is that it should be catched
# by some kind of **kwargs argument.
# This case has to be explicitly specified, otherwise throw
# an exception!
elif arg_in_kwargs[i]:
position[i] = None
else:
raise TypeError('"{}" was not specified in the function '
'signature. If it was meant to be part of '
'"**kwargs" then set "arg_in_kwargs" to "True"'
'.'.format(new_name[i]))
@functools.wraps(function)
def wrapper(*args, **kwargs):
for i in range(n):
# The only way to have oldkeyword inside the function is
# that it is passed as kwarg because the oldkeyword
# parameter was renamed to newkeyword.
if old_name[i] in kwargs:
value = kwargs.pop(old_name[i])
# Display the deprecation warning only when it's only
# pending.
if not pending[i]:
warnings.warn(
'"{0}" was deprecated in version {1} '
'and will be removed in a future version. '
'Use argument "{2}" instead.'
''.format(old_name[i], since[i], new_name[i]),
AstropyDeprecationWarning, stacklevel=2)
# Check if the newkeyword was given as well.
newarg_in_args = (position[i] is not None and
len(args) > position[i])
newarg_in_kwargs = new_name[i] in kwargs
if newarg_in_args or newarg_in_kwargs:
if not pending[i]:
# If both are given print a Warning if relax is
# True or raise an Exception is relax is False.
if relax[i]:
warnings.warn(
'"{0}" and "{1}" keywords were set. '
'Using the value of "{1}".'
''.format(old_name[i], new_name[i]),
AstropyUserWarning)
else:
raise TypeError(
'cannot specify both "{}" and "{}"'
'.'.format(old_name[i], new_name[i]))
else:
# If the new argument isn't specified just pass the old
# one with the name of the new argument to the function
kwargs[new_name[i]] = value
return function(*args, **kwargs)
return wrapper
return decorator
# TODO: This can still be made to work for setters by implementing an
# accompanying metaclass that supports it; we just don't need that right this
# second
class classproperty(property):
"""
Similar to `property`, but allows class-level properties. That is,
a property whose getter is like a `classmethod`.
The wrapped method may explicitly use the `classmethod` decorator (which
must become before this decorator), or the `classmethod` may be omitted
(it is implicit through use of this decorator).
.. note::
classproperty only works for *read-only* properties. It does not
currently allow writeable/deleteable properties, due to subtleties of how
Python descriptors work. In order to implement such properties on a class
a metaclass for that class must be implemented.
Parameters
----------
fget : callable
The function that computes the value of this property (in particular,
the function when this is used as a decorator) a la `property`.
doc : str, optional
The docstring for the property--by default inherited from the getter
function.
lazy : bool, optional
If True, caches the value returned by the first call to the getter
function, so that it is only called once (used for lazy evaluation
of an attribute). This is analogous to `lazyproperty`. The ``lazy``
argument can also be used when `classproperty` is used as a decorator
(see the third example below). When used in the decorator syntax this
*must* be passed in as a keyword argument.
Examples
--------
::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal + 1
...
>>> Foo.bar
2
>>> foo_instance = Foo()
>>> foo_instance.bar
2
>>> foo_instance._bar_internal = 2
>>> foo_instance.bar # Ignores instance attributes
2
As previously noted, a `classproperty` is limited to implementing
read-only attributes::
>>> class Foo:
... _bar_internal = 1
... @classproperty
... def bar(cls):
... return cls._bar_internal
... @bar.setter
... def bar(cls, value):
... cls._bar_internal = value
...
Traceback (most recent call last):
...
NotImplementedError: classproperty can only be read-only; use a
metaclass to implement modifiable class-level properties
When the ``lazy`` option is used, the getter is only called once::
>>> class Foo:
... @classproperty(lazy=True)
... def bar(cls):
... print("Performing complicated calculation")
... return 1
...
>>> Foo.bar
Performing complicated calculation
1
>>> Foo.bar
1
If a subclass inherits a lazy `classproperty` the property is still
re-evaluated for the subclass::
>>> class FooSub(Foo):
... pass
...
>>> FooSub.bar
Performing complicated calculation
1
>>> FooSub.bar
1
"""
def __new__(cls, fget=None, doc=None, lazy=False):
if fget is None:
# Being used as a decorator--return a wrapper that implements
# decorator syntax
def wrapper(func):
return cls(func, lazy=lazy)
return wrapper
return super().__new__(cls)
def __init__(self, fget, doc=None, lazy=False):
self._lazy = lazy
if lazy:
self._cache = {}
fget = self._wrap_fget(fget)
super().__init__(fget=fget, doc=doc)
# There is a buglet in Python where self.__doc__ doesn't
# get set properly on instances of property subclasses if
# the doc argument was used rather than taking the docstring
# from fget
# Related Python issue: https://bugs.python.org/issue24766
if doc is not None:
self.__doc__ = doc
def __get__(self, obj, objtype):
if self._lazy and objtype in self._cache:
return self._cache[objtype]
# The base property.__get__ will just return self here;
# instead we pass objtype through to the original wrapped
# function (which takes the class as its sole argument)
val = self.fget.__wrapped__(objtype)
if self._lazy:
self._cache[objtype] = val
return val
def getter(self, fget):
return super().getter(self._wrap_fget(fget))
def setter(self, fset):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
def deleter(self, fdel):
raise NotImplementedError(
"classproperty can only be read-only; use a metaclass to "
"implement modifiable class-level properties")
@staticmethod
def _wrap_fget(orig_fget):
if isinstance(orig_fget, classmethod):
orig_fget = orig_fget.__func__
# Using stock functools.wraps instead of the fancier version
# found later in this module, which is overkill for this purpose
@functools.wraps(orig_fget)
def fget(obj):
return orig_fget(obj.__class__)
return fget
class lazyproperty(property):
"""
Works similarly to property(), but computes the value only once.
This essentially memorizes the value of the property by storing the result
of its computation in the ``__dict__`` of the object instance. This is
useful for computing the value of some property that should otherwise be
invariant. For example::
>>> class LazyTest:
... @lazyproperty
... def complicated_property(self):
... print('Computing the value for complicated_property...')
... return 42
...
>>> lt = LazyTest()
>>> lt.complicated_property
Computing the value for complicated_property...
42
>>> lt.complicated_property
42
As the example shows, the second time ``complicated_property`` is accessed,
the ``print`` statement is not executed. Only the return value from the
first access off ``complicated_property`` is returned.
By default, a setter and deleter are used which simply overwrite and
delete, respectively, the value stored in ``__dict__``. Any user-specified
setter or deleter is executed before executing these default actions.
The one exception is that the default setter is not run if the user setter
already sets the new value in ``__dict__`` and returns that value and the
returned value is not ``None``.
Adapted from the recipe at
http://code.activestate.com/recipes/363602-lazy-property-evaluation
"""
def __init__(self, fget, fset=None, fdel=None, doc=None):
super().__init__(fget, fset, fdel, doc)
self._key = self.fget.__name__
def __get__(self, obj, owner=None):
try:
return obj.__dict__[self._key]
except KeyError:
val = self.fget(obj)
obj.__dict__[self._key] = val
return val
except AttributeError:
if obj is None:
return self
raise
def __set__(self, obj, val):
obj_dict = obj.__dict__
if self.fset:
ret = self.fset(obj, val)
if ret is not None and obj_dict.get(self._key) is ret:
# By returning the value set the setter signals that it took
# over setting the value in obj.__dict__; this mechanism allows
# it to override the input value
return
obj_dict[self._key] = val
def __delete__(self, obj):
if self.fdel:
self.fdel(obj)
if self._key in obj.__dict__:
del obj.__dict__[self._key]
class sharedmethod(classmethod):
"""
This is a method decorator that allows both an instancemethod and a
`classmethod` to share the same name.
When using `sharedmethod` on a method defined in a class's body, it
may be called on an instance, or on a class. In the former case it
behaves like a normal instance method (a reference to the instance is
automatically passed as the first ``self`` argument of the method)::
>>> class Example:
... @sharedmethod
... def identify(self, *args):
... print('self was', self)
... print('additional args were', args)
...
>>> ex = Example()
>>> ex.identify(1, 2)
self was <astropy.utils.decorators.Example object at 0x...>
additional args were (1, 2)
In the latter case, when the `sharedmethod` is called directly from a
class, it behaves like a `classmethod`::
>>> Example.identify(3, 4)
self was <class 'astropy.utils.decorators.Example'>
additional args were (3, 4)
This also supports a more advanced usage, where the `classmethod`
implementation can be written separately. If the class's *metaclass*
has a method of the same name as the `sharedmethod`, the version on
the metaclass is delegated to::
>>> class ExampleMeta(type):
... def identify(self):
... print('this implements the {0}.identify '
... 'classmethod'.format(self.__name__))
...
>>> class Example(metaclass=ExampleMeta):
... @sharedmethod
... def identify(self):
... print('this implements the instancemethod')
...
>>> Example().identify()
this implements the instancemethod
>>> Example.identify()
this implements the Example.identify classmethod
"""
def __get__(self, obj, objtype=None):
if obj is None:
mcls = type(objtype)
clsmeth = getattr(mcls, self.__func__.__name__, None)
if callable(clsmeth):
func = clsmeth
else:
func = self.__func__
return self._make_method(func, objtype)
else:
return self._make_method(self.__func__, obj)
@staticmethod
def _make_method(func, instance):
return types.MethodType(func, instance)
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES, exclude_args=()):
"""
An alternative to `functools.wraps` which also preserves the original
function's call signature by way of
`~astropy.utils.codegen.make_function_with_signature`.
This also adds an optional ``exclude_args`` argument. If given it should
be a sequence of argument names that should not be copied from the wrapped
function (either positional or keyword arguments).
The documentation for the original `functools.wraps` follows:
"""
wrapped_args = _get_function_args(wrapped, exclude_args=exclude_args)
def wrapper(func):
if '__name__' in assigned:
name = wrapped.__name__
else:
name = func.__name__
func = make_function_with_signature(func, name=name, **wrapped_args)
func = functools.update_wrapper(func, wrapped, assigned=assigned,
updated=updated)
return func
return wrapper
if (isinstance(wraps.__doc__, str) and
wraps.__doc__ is not None and functools.wraps.__doc__ is not None):
wraps.__doc__ += functools.wraps.__doc__
def _get_function_args_internal(func):
"""
Utility function for `wraps`.
Reads the argspec for the given function and converts it to arguments
for `make_function_with_signature`.
"""
argspec = inspect.getfullargspec(func)
if argspec.defaults:
args = argspec.args[:-len(argspec.defaults)]
kwargs = zip(argspec.args[len(args):], argspec.defaults)
else:
args = argspec.args
kwargs = []
if argspec.kwonlyargs:
kwargs.extend((argname, argspec.kwonlydefaults[argname])
for argname in argspec.kwonlyargs)
return {'args': args, 'kwargs': kwargs, 'varargs': argspec.varargs,
'varkwargs': argspec.varkw}
def _get_function_args(func, exclude_args=()):
all_args = _get_function_args_internal(func)
if exclude_args:
exclude_args = set(exclude_args)
for arg_type in ('args', 'kwargs'):
all_args[arg_type] = [arg for arg in all_args[arg_type]
if arg not in exclude_args]
for arg_type in ('varargs', 'varkwargs'):
if all_args[arg_type] in exclude_args:
all_args[arg_type] = None
return all_args
def format_doc(docstring, *args, **kwargs):
"""
Replaces the docstring of the decorated object and then formats it.
The formatting works like :meth:`str.format` and if the decorated object
already has a docstring this docstring can be included in the new
documentation if you use the ``{__doc__}`` placeholder.
Its primary use is for reusing a *long* docstring in multiple functions
when it is the same or only slightly different between them.
Parameters
----------
docstring : str or object or None
The docstring that will replace the docstring of the decorated
object. If it is an object like a function or class it will
take the docstring of this object. If it is a string it will use the
string itself. One special case is if the string is ``None`` then
it will use the decorated functions docstring and formats it.
args :
passed to :meth:`str.format`.
kwargs :
passed to :meth:`str.format`. If the function has a (not empty)
docstring the original docstring is added to the kwargs with the
keyword ``'__doc__'``.
Raises
------
ValueError
If the ``docstring`` (or interpreted docstring if it was ``None``
or not a string) is empty.
IndexError, KeyError
If a placeholder in the (interpreted) ``docstring`` was not filled. see
:meth:`str.format` for more information.
Notes
-----
Using this decorator allows, for example Sphinx, to parse the
correct docstring.
Examples
--------
Replacing the current docstring is very easy::
>>> from astropy.utils.decorators import format_doc
>>> @format_doc('''Perform num1 + num2''')
... def add(num1, num2):
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform num1 + num2
sometimes instead of replacing you only want to add to it::
>>> doc = '''
... {__doc__}
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... '''
>>> @format_doc(doc)
... def add(num1, num2):
... '''Perform addition.'''
... return num1+num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
in case one might want to format it further::
>>> doc = '''
... Perform {0}.
... Parameters
... ----------
... num1, num2 : Numbers
... Returns
... -------
... result: Number
... result of num1 {op} num2
... {__doc__}
... '''
>>> @format_doc(doc, 'addition', op='+')
... def add(num1, num2):
... return num1+num2
...
>>> @format_doc(doc, 'subtraction', op='-')
... def subtract(num1, num2):
... '''Notes: This one has additional notes.'''
... return num1-num2
...
>>> help(add) # doctest: +SKIP
Help on function add in module __main__:
<BLANKLINE>
add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
>>> help(subtract) # doctest: +SKIP
Help on function subtract in module __main__:
<BLANKLINE>
subtract(num1, num2)
Perform subtraction.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 - num2
Notes : This one has additional notes.
These methods can be combined an even taking the docstring from another
object is possible as docstring attribute. You just have to specify the
object::
>>> @format_doc(add)
... def another_add(num1, num2):
... return num1 + num2
...
>>> help(another_add) # doctest: +SKIP
Help on function another_add in module __main__:
<BLANKLINE>
another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
But be aware that this decorator *only* formats the given docstring not
the strings passed as ``args`` or ``kwargs`` (not even the original
docstring)::
>>> @format_doc(doc, 'addition', op='+')
... def yet_another_add(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(yet_another_add) # doctest: +SKIP
Help on function yet_another_add in module __main__:
<BLANKLINE>
yet_another_add(num1, num2)
Perform addition.
Parameters
----------
num1, num2 : Numbers
Returns
-------
result : Number
result of num1 + num2
This one is good for {0}.
To work around it you could specify the docstring to be ``None``::
>>> @format_doc(None, 'addition')
... def last_add_i_swear(num1, num2):
... '''This one is good for {0}.'''
... return num1 + num2
...
>>> help(last_add_i_swear) # doctest: +SKIP
Help on function last_add_i_swear in module __main__:
<BLANKLINE>
last_add_i_swear(num1, num2)
This one is good for addition.
Using it with ``None`` as docstring allows to use the decorator twice
on an object to first parse the new docstring and then to parse the
original docstring or the ``args`` and ``kwargs``.
"""
def set_docstring(obj):
if docstring is None:
# None means: use the objects __doc__
doc = obj.__doc__
# Delete documentation in this case so we don't end up with
# awkwardly self-inserted docs.
obj.__doc__ = None
elif isinstance(docstring, str):
# String: use the string that was given
doc = docstring
else:
# Something else: Use the __doc__ of this
doc = docstring.__doc__
if not doc:
# In case the docstring is empty it's probably not what was wanted.
raise ValueError('docstring must be a string or containing a '
'docstring that is not empty.')
# If the original has a not-empty docstring append it to the format
# kwargs.
kwargs['__doc__'] = obj.__doc__ or ''
obj.__doc__ = doc.format(*args, **kwargs)
return obj
return set_docstring
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.