hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
63a3dc5729503c5890bebcbbbedd943c2f6b2e9ecd8b401efb0f17e6b8ec1d5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test separability of models.
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import custom_model, models
from astropy.modeling.core import ModelDefinitionError
from astropy.modeling.models import Mapping
from astropy.modeling.separable import (
_arith_oper,
_cdot,
_coord_matrix,
_cstack,
is_separable,
separability_matrix,
)
sh1 = models.Shift(1, name="shift1")
sh2 = models.Shift(2, name="sh2")
scl1 = models.Scale(1, name="scl1")
scl2 = models.Scale(2, name="scl2")
map1 = Mapping((0, 1, 0, 1), name="map1")
map2 = Mapping((0, 0, 1), name="map2")
map3 = Mapping((0, 0), name="map3")
rot = models.Rotation2D(2, name="rotation")
p2 = models.Polynomial2D(1, name="p2")
p22 = models.Polynomial2D(2, name="p22")
p1 = models.Polynomial1D(1, name="p1")
cm_4d_expected = (
np.array([False, False, True, True]),
np.array(
[
[True, True, False, False],
[True, True, False, False],
[False, False, True, False],
[False, False, False, True],
]
),
)
compound_models = {
"cm1": (
map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1,
(
np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]),
),
),
"cm2": (
sh1 & sh2 | rot | map1 | p2 & p22,
(np.array([False, False]), np.array([[True, True], [True, True]])),
),
"cm3": (
map2 | rot & scl1,
(
np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]),
),
),
"cm4": (
sh1 & sh2 | map2 | rot & scl1,
(
np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]),
),
),
"cm5": (
map3 | sh1 & sh2 | scl1 & scl2,
(np.array([False, False]), np.array([[True], [True]])),
),
"cm7": (
map2 | p2 & sh1,
(np.array([False, True]), np.array([[True, False], [False, True]])),
),
"cm8": (rot & (sh1 & sh2), cm_4d_expected),
"cm9": (rot & sh1 & sh2, cm_4d_expected),
"cm10": ((rot & sh1) & sh2, cm_4d_expected),
"cm11": (
rot & sh1 & (scl1 & scl2),
(
np.array([False, False, True, True, True]),
np.array(
[
[True, True, False, False, False],
[True, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True],
]
),
),
),
}
def test_coord_matrix():
c = _coord_matrix(p2, "left", 2)
assert_allclose(np.array([[1, 1], [0, 0]]), c)
c = _coord_matrix(p2, "right", 2)
assert_allclose(np.array([[0, 0], [1, 1]]), c)
c = _coord_matrix(p1, "left", 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(p1, "left", 1)
assert_allclose(np.array([[1]]), c)
c = _coord_matrix(sh1, "left", 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(sh1, "right", 2)
assert_allclose(np.array([[0], [1]]), c)
c = _coord_matrix(sh1, "right", 3)
assert_allclose(np.array([[0], [0], [1]]), c)
c = _coord_matrix(map3, "left", 2)
assert_allclose(np.array([[1], [1]]), c)
c = _coord_matrix(map3, "left", 3)
assert_allclose(np.array([[1], [1], [0]]), c)
def test_cdot():
result = _cdot(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _cdot(rot, p2)
assert_allclose(result, np.array([[2, 2]]))
result = _cdot(rot, rot)
assert_allclose(result, np.array([[2, 2], [2, 2]]))
result = _cdot(Mapping((0, 0)), rot)
assert_allclose(result, np.array([[2], [2]]))
with pytest.raises(
ModelDefinitionError,
match=r"Models cannot be combined with the \"|\" operator; .*",
):
_cdot(sh1, map1)
def test_cstack():
result = _cstack(sh1, scl1)
assert_allclose(result, np.array([[1, 0], [0, 1]]))
result = _cstack(sh1, rot)
assert_allclose(result, np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]]))
result = _cstack(rot, sh1)
assert_allclose(result, np.array([[1, 1, 0], [1, 1, 0], [0, 0, 1]]))
def test_arith_oper():
# Models as inputs
result = _arith_oper(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _arith_oper(rot, rot)
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# ndarray
result = _arith_oper(np.array([[1, 2], [3, 4]]), np.array([[1, 2], [3, 4]]))
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# Error
with pytest.raises(
ModelDefinitionError, match=r"Unsupported operands for arithmetic operator: .*"
):
_arith_oper(sh1, map1)
@pytest.mark.parametrize(("compound_model", "result"), compound_models.values())
def test_separable(compound_model, result):
assert_allclose(is_separable(compound_model), result[0])
assert_allclose(separability_matrix(compound_model), result[1])
def test_custom_model_separable():
@custom_model
def model_a(x):
return x
assert model_a().separable
@custom_model
def model_c(x, y):
return x + y
assert not model_c().separable
assert np.all(separability_matrix(model_c()) == [True, True])
|
db445614d30daa9c81bdcde4e3a6db73336a30963ac9b409f788e1fb09c4bc44 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import (
InputParameterError,
Parameter,
_tofloat,
param_repr_oneline,
)
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc) ** 2 + (y - yc) ** 2
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name="alpha", default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
MESSAGE = r"Parameter of .* could not be converted to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat("test")
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
MESSAGE = r"Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(True)
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(False)
# other
class Value:
pass
MESSAGE = r"Don't know how to convert parameter of .* to float"
with pytest.raises(InputParameterError, match=MESSAGE):
_tofloat(Value)
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter("alpha", default=1)
assert p.name == "alpha"
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = "beta"
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter("alpha", default=42)
num = 42.0
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par**val == num**val
assert val**par == val**num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.0)
m1b = Parameter(default=5.0)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.0)
class M3(M2):
m3d = Parameter(default=20.0)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.0
assert mod.m1b == 5.0
assert mod.m2c == 11.0
assert mod.m3d == 20.0
for key in ["m1a", "m1b", "m2c", "m3d"]:
assert key in mod.__dict__
assert mod.param_names == ("m1a", "m1b", "m2c", "m3d")
def test_param_metric():
mod = M3()
assert mod._param_metrics["m1a"]["slice"] == slice(0, 1)
assert mod._param_metrics["m1b"]["slice"] == slice(1, 2)
assert mod._param_metrics["m2c"]["slice"] == slice(2, 3)
assert mod._param_metrics["m3d"]["slice"] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1.0, 5.0, 11.0, 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename("data/idcompspec.fits")
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields["order"])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30.0, 40.0, 50.0, 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3.0, 4.0, 5.0, 6.0, 7.0]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0.0, 0.0, 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0.0, 0.0, 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.0
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
MESSAGE = (
r"Input parameter values not compatible with the model parameters array: .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
MESSAGE = (
r"Value for parameter c0 does not match shape or size\nexpected by model .*"
r" vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
MESSAGE = (
r"Value for parameter amplitude does not match shape or size\nexpected by"
r" model .* vs .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array(
[
4826.1066602783685,
952.8943813407858,
12.641236013982386,
-1.7910672553339604,
0.90252884366711317,
]
),
rtol=10 ** (-2),
)
def testPolynomial1D(self):
d = {"c0": 11, "c1": 12, "c2": 13, "c3": 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(
p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 10.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(
p1.parameters, [10.0, 10.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {
"c0_0": [2, 3],
"c1_0": [1, 2],
"c2_0": [4, 5],
"c0_1": [1, 1],
"c0_2": [2, 2],
"c1_1": [5, 5],
}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5, 1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
MESSAGE = r"bounds may not be specified simultaneously with min or max .*"
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), max=2, name="test")
with pytest.raises(ValueError, match=MESSAGE):
Parameter(bounds=(1, 2), min=1, max=2, name="test")
# Setters
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
MESSAGE = "{} value must be a number or a Quantity"
with pytest.raises(TypeError, match=MESSAGE.format("Min")):
param.bounds = ("test", None)
with pytest.raises(TypeError, match=MESSAGE.format("Max")):
param.bounds = (None, "test")
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name="test", default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
MESSAGE = r"Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError, match=MESSAGE):
param[slice(0, 0)] = 2
MESSAGE = r"Input dimension 3 invalid for 'test' parameter with dimension 1"
with pytest.raises(InputParameterError, match=MESSAGE):
param[3] = np.array([5])
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
MESSAGE = r"Cannot attach units to parameters that were .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.m)
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# Force magnitude unit (mag=False)
MESSAGE = r"This parameter does not support the magnitude units such as .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.ABmag, True)
# Force magnitude unit (mag=True)
param._mag = True
param._set_unit(u.ABmag, True)
assert param._unit == u.ABmag
# No force Error (existing unit)
MESSAGE = r"Cannot change the unit attribute directly, instead change the .*"
with pytest.raises(ValueError, match=MESSAGE):
param._set_unit(u.K)
def test_quantity(self):
param = Parameter(name="test", default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name="test", default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
MESSAGE = r"cannot reshape array of size 4 into shape .*"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name="test", default=1)
assert param.shape == ()
# Reshape error
MESSAGE = r"Cannot assign this shape to a scalar quantity"
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = (1,)
# single value
param = Parameter(name="test", default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError, match=MESSAGE):
param.shape = (5,)
param.shape = ()
def test_size(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name="test", default=[1])
assert param.size == 1
param = Parameter(name="test", default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.std is None
assert param._std is None
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.fixed is False
assert param._fixed is False
# Set error
MESSAGE = r"Value must be boolean"
with pytest.raises(ValueError, match=MESSAGE):
param.fixed = 3
# Set
param.fixed = True
assert param.fixed is True
assert param._fixed is True
def test_tied(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.tied is False
assert param._tied is False
# Set error
MESSAGE = r"Tied must be a callable or set to False or None"
with pytest.raises(TypeError, match=MESSAGE):
param.tied = mk.NonCallableMagicMock()
# Set None
param.tied = None
assert param.tied is None
assert param._tied is None
# Set False
param.tied = False
assert param.tied is False
assert param._tied is False
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
MESSAGE = r"This decorator method expects a callable.*"
with pytest.raises(ValueError, match=MESSAGE):
param.validator(mk.NonCallableMagicMock())
def test_validate(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param.model is None
assert param._model is None
assert param._model_required is False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter0, getter0]
) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0),
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(
Parameter, "_create_value_wrapper", side_effect=[setter1, getter1]
) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1),
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name="test", default=[1, 2, 3, 4])
# Bad ufunc
MESSAGE = r"A numpy.ufunc used for Parameter getter/setter .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(np.add, mk.MagicMock())
# Good ufunc
assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required is False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required is True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, "partial", autospec=True) as mkPartial:
assert (
param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
)
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
MESSAGE = r"Parameter getter/setter must be a function .*"
with pytest.raises(TypeError, match=MESSAGE):
param._create_value_wrapper(wrapper3, mk.MagicMock())
def test_bool(self):
# single value is true
param = Parameter(name="test", default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name="test", default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name="test", default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name="test", default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name="test", default=1)
assert param_repr_oneline(param) == "1."
# Vector value no units
param = Parameter(name="test", default=[1, 2, 3, 4])
assert param_repr_oneline(param) == "[1., 2., 3., 4.]"
# Single value units
param = Parameter(name="test", default=1 * u.m)
assert param_repr_oneline(param) == "1. m"
# Vector value units
param = Parameter(name="test", default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == "[1., 2., 3., 4.] m"
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, 0.1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D(
[12, 10], [3.5, 5.2], stddev=[0.4, 0.7], n_models=2
)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[1.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array(
[
[11.0, 10],
[3.5, 5.2],
[0.4, 0.7],
]
),
)
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13.0, 10.0])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9.0, 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Not broadcastable
TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array(
[
[10, 20],
[30, 40],
[50, 60],
]
)
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(
t2.param_sets
== [
[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]],
]
)
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60, 1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 20, 30], [40, 50, 60]],
]
)
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(
("p1", "p2"),
[
(1, 2),
(1, [2, 3]),
([1, 2], 3),
([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5]),
],
)
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError, match=r".*"):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize(
"kwargs",
[
{"n_models": 2},
{"model_set_axis": 0},
{"n_models": 2, "model_set_axis": 0},
],
)
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[10, 20], [30, 40]],
[[1, 2], [3, 4]],
]
)
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]], [[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(
t2.param_sets
== [
[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]],
]
)
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
MESSAGE = (
r"Parameter .* of shape .* cannot be broadcast with parameter .* of"
r" shape .*"
)
with pytest.raises(InputParameterError, match=MESSAGE):
# Can't broadcast different array shapes
TParModel(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]],
n_models=2,
)
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]], [[1, 2], [3, 4]], n_models=2
)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]], [[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel(
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
n_models=2,
)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(
t.param_sets
== [
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
]
)
assert np.all(
t.parameters == [10, 20, 30, 40, 50, 60, 70, 80, 1, 2, 3, 4, 5, 6, 7, 8]
)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(
t.param_sets[0]
== [
[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]],
]
)
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(
t.parameters
== [10, 50, 20, 60, 30, 70, 30, 70, 40, 80, 50, 90, 1, 3, 2, 4, 3, 5]
)
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
MESSAGE = r"All parameter values must be arrays of dimension at .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
MESSAGE = r"Inconsistent dimensions for parameter .* for 2 model sets.*"
with pytest.raises(InputParameterError, match=MESSAGE):
TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11), model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
MESSAGE = (
r"Parameter '.*' of shape .* cannot be broadcast with parameter '.*' of"
r" shape .*"
)
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(model(x, y), (x + 1) ** 2 + (y - np.pi * 3) ** 2)
|
2f10840ad29a76c41916cedd2eec3b395b728040f73bc6f498a58d7ceab31fd1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for physical functions."""
# pylint: disable=no-member, invalid-name
import numpy as np
import pytest
from astropy import cosmology
from astropy import units as u
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.physical_models import NFW, BlackBody
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
__doctest_skip__ = ["*"]
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
# BlackBody tests
@pytest.mark.parametrize("temperature", (3000 * u.K, 2726.85 * u.deg_C))
def test_blackbody_evaluate(temperature):
b = BlackBody(temperature=temperature, scale=1.0)
assert_quantity_allclose(b(1.4 * u.micron), 486787299458.15656 * u.MJy / u.sr)
assert_quantity_allclose(b(214.13747 * u.THz), 486787299458.15656 * u.MJy / u.sr)
def test_blackbody_weins_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.lambda_max, 9.890006672986939 * u.micron)
assert_quantity_allclose(b.nu_max, 17.22525080856469 * u.THz)
def test_blackbody_sefanboltzman_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.bolometric_flux, 133.02471751812573 * u.W / (u.m * u.m))
def test_blackbody_input_units():
SLAM = u.erg / (u.cm**2 * u.s * u.AA * u.sr)
SNU = u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
b_lam = BlackBody(3000 * u.K, scale=1 * SLAM)
assert b_lam.input_units["x"] == u.AA
b_nu = BlackBody(3000 * u.K, scale=1 * SNU)
assert b_nu.input_units["x"] == u.Hz
def test_blackbody_return_units():
# return of evaluate has no units when temperature has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert not isinstance(b.evaluate(1.0 * u.micron, 1000.0, 1.0), u.Quantity)
# return has "standard" units when scale has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.erg / (u.cm**2 * u.s * u.Hz * u.sr)
# return has scale units when scale has units
b = BlackBody(1000.0 * u.K, scale=1.0 * u.MJy / u.sr)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.MJy / u.sr
# scale has units but evaluate scale has no units
assert_quantity_allclose(
b.evaluate(1.0 * u.micron, 1000.0 * u.K, 4.0), 89668184.86321202 * u.MJy / u.sr
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_blackbody_fit(fitter):
fitter = fitter()
if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, DogBoxLSQFitter):
rtol = 0.54
atol = 1e-15
else:
rtol = 1e-7
atol = 0
b = BlackBody(3000 * u.K, scale=5e-17 * u.Jy / u.sr)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
b_fit = fitter(b, wav, fnu, maxiter=1000)
assert_quantity_allclose(b_fit.temperature, 2840.7438355865065 * u.K, rtol=rtol)
assert_quantity_allclose(b_fit.scale, 5.803783292762381e-17, atol=atol)
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm**2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r"Input contains invalid wavelength/frequency value\(s\)",
):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(
ValueError, match="Temperature should be positive: \\[-100.\\] K"
):
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match="invalid") as w:
bb(0 * u.AA)
assert len(w) == 3 # 2 of these are RuntimeWarning from zero divide
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match="invalid") as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness convertible scale unit raises an error
with pytest.raises(
ValueError, match="scale units not dimensionless or in surface brightness: Jy"
):
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy)
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4)
def test_blackbody_dimensionless():
"""Test support for dimensionless (but not unscaled) units for scale"""
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL) ** 2
bb1 = BlackBody(temperature=T, scale=scale)
# even though we passed scale with units, we should be able to evaluate with unitless
bb1.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
bb2.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
# bolometric flux for both cases should be equivalent
assert bb1.bolometric_flux == bb2.bolometric_flux
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_blackbody_dimensionless_fit():
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL) ** 2
bb1 = BlackBody(temperature=T, scale=scale)
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
fitter = LevMarLSQFitter()
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
bb1_fit = fitter(bb1, wav, fnu, maxiter=1000)
bb2_fit = fitter(bb2, wav, fnu, maxiter=1000)
assert bb1_fit.temperature == bb2_fit.temperature
@pytest.mark.parametrize("mass", (2.0000000000000e15 * u.M_sun, 3.976819741e45 * u.kg))
def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3141 * u.kpc),
(
3254.373619264334 * (u.solMass / u.kpc**3),
6.471028627484543e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3141 * u.kpc),
(
3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3141 * u.kpc),
(
3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(4.2e23 * u.m),
(
1.527649658673012e-57 * (u.solMass / u.m**3),
3.0375936602739256e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200c.r_s, (7.616880211930209e21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(4.2e23 * u.m),
(
1.5194778058079436e-57 * (u.solMass / u.m**3),
3.0213446673751314e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200m.r_s, (8.76521436235054e21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e22 * u.m))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(4.2e23 * u.m),
(
1.5214899184117633e-57 * (u.solMass / u.m**3),
3.0253455719375224e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(nvir.r_s, (8.481607714647913e21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# 200m Overdensity
massfactor = "200m"
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_NFW_fit(fitter):
"""Test linear fitting of NFW model."""
fitter = fitter()
if isinstance(fitter, DogBoxLSQFitter):
pytest.xfail("dogbox method is poor fitting method for NFW model")
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
# fmt: off
r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# 200c Overdensity
massfactor = ("critical", 200)
# fmt: off
density_r = np.array(
[
1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200c = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
# fmt: off
density_r = np.array(
[
1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200m = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
# fmt: off
density_r = np.array(
[
1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
nvir = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = (
np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5])
* u.Mpc
)
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200m = np.array(
[
670.18236647, 1088.9843324, 1046.82334367, 1016.88890732,
987.97273478, 936.00207134, 891.80115232, 806.63307977,
744.91002191, 659.33401039, 557.82823549, 395.9735786,
318.29863006
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = "virial"
mass = 1.2e45 * u.kg
concentration = 2.4
redshift = 0.34
# fmt: off
r_r = np.array(
[
3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23
]
) * u.m
# fmt: on
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_vir = np.array(
[
205.87461783, 604.65091823, 793.9190629, 857.52516521,
908.90280843, 986.53582718, 1041.69089845, 1124.19719446,
1164.58270747, 1191.33193561, 1174.02934755, 1023.69360527,
895.52206321
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s)))
def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
# fmt: off
r_r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# Massfactor exception tests
MESSAGE = r"Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=("not", "virial"),
)
MESSAGE = r"Massfactor not virial string not of the form '#m', '#c', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor="not virial",
)
MESSAGE = r"Massfactor 200 not a tuple or string"
with pytest.raises(TypeError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=200,
)
# Verify unitless mass
# Density test
n200c = NFW(
mass=mass.value,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3000.0),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# Circular velocity test with unitless mass
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232)
|
9be4fc998cd59f67d0fd6417234049ed04be3c6d2daba1a85c1b2e196f929378 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import unittest.mock as mk
from math import cos, sin
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.modeling import models, rotations
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import wcs
@pytest.mark.parametrize(
"inp",
[
(0, 0),
(4000, -20.56),
(-2001.5, 45.9),
(0, 90),
(0, -90),
(np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]),
(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
],
[
[[25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36]],
[[37, 38, 39, 40], [41, 42, 43, 44], [45, 46, 47, 48]],
],
),
],
)
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(
"inp",
[
(1e-5, 1e-4),
(40, -20.56),
(21.5, 45.9),
([[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]),
(
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]],
],
[
[[25, 26, 27, 28], [29, 30, 31, 32], [33, 34, 35, 36]],
[[37, 38, 39, 40], [41, 42, 43, 44], [45, 46, 47, 48]],
],
),
],
)
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90 * u.deg)
x, y = model(1 * u.deg, 0 * u.arcsec)
assert_quantity_allclose([x, y], [0, 1] * u.deg, atol=1e-10 * u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_Rotation2D_errors():
model = models.Rotation2D(angle=90 * u.deg)
# Bad evaluation input shapes
x = np.array([1, 2])
y = np.array([1, 2, 3])
MESSAGE = r"Expected input arrays to have the same shape"
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(x, y, model.angle)
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(y, x, model.angle)
# Bad evaluation units
x = np.array([1, 2])
y = np.array([1, 2])
MESSAGE = r"x and y must have compatible units"
with pytest.raises(u.UnitsError, match=MESSAGE):
model.evaluate(x * u.m, y, model.angle)
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, "zxz")
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, "zyz")
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, "yzy")
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ["zxz", "zyz", "yzy", "yxy", "xyx", "xzx"]
@pytest.mark.parametrize("axes_order", euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {
"zxz": np.array(
[
[(c1 * c3 - c2 * s1 * s3), (-c1 * s3 - c2 * c3 * s1), (s1 * s2)],
[(c3 * s1 + c1 * c2 * s3), (c1 * c2 * c3 - s1 * s3), (-c1 * s2)],
[(s2 * s3), (c3 * s2), (c2)],
]
),
"zyz": np.array(
[
[(c1 * c2 * c3 - s1 * s3), (-c3 * s1 - c1 * c2 * s3), (c1 * s2)],
[(c1 * s3 + c2 * c3 * s1), (c1 * c3 - c2 * s1 * s3), (s1 * s2)],
[(-c3 * s2), (s2 * s3), (c2)],
]
),
"yzy": np.array(
[
[(c1 * c2 * c3 - s1 * s3), (-c1 * s2), (c3 * s1 + c1 * c2 * s3)],
[(c3 * s2), (c2), (s2 * s3)],
[(-c1 * s3 - c2 * c3 * s1), (s1 * s2), (c1 * c3 - c2 * s1 * s3)],
]
),
"yxy": np.array(
[
[(c1 * c3 - c2 * s1 * s3), (s1 * s2), (c1 * s3 + c2 * c3 * s1)],
[(s2 * s3), (c2), (-c3 * s2)],
[(-c3 * s1 - c1 * c2 * s3), (c1 * s2), (c1 * c2 * c3 - s1 * s3)],
]
),
"xyx": np.array(
[
[(c2), (s2 * s3), (c3 * s2)],
[(s1 * s2), (c1 * c3 - c2 * s1 * s3), (-c1 * s3 - c2 * c3 * s1)],
[(-c1 * s2), (c3 * s1 + c1 * c2 * s3), (c1 * c2 * c3 - s1 * s3)],
]
),
"xzx": np.array(
[
[(c2), (-c3 * s2), (s2 * s3)],
[(c1 * s2), (c1 * c2 * c3 - s1 * s3), (-c3 * s1 - c1 * c2 * s3)],
[(s1 * s2), (c1 * s3 + c2 * c3 * s1), (c1 * c3 - c2 * s1 * s3)],
]
),
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * np.sin(
v3
) + matrix[2, 2] * np.cos(v3)
Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + (
matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]
) * np.sin(v2)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3)
def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
def test_RotationSequence3D_errors():
# Bad axes_order labels
with pytest.raises(
ValueError, match=r"Unrecognized axis label .* should be one of .*"
):
rotations.RotationSequence3D(mk.MagicMock(), axes_order="abc")
# Bad number of angles
MESSAGE = r"The number of angles 4 should match the number of axes 3"
with pytest.raises(ValueError, match=MESSAGE):
rotations.RotationSequence3D([1, 2, 3, 4], axes_order="zyx")
# Bad evaluation input shapes
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
MESSAGE = r"Expected input arrays to have the same shape"
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(
np.array([1, 2, 3]), np.array([1, 2]), np.array([1, 2]), [1, 2, 3]
)
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(
np.array([1, 2]), np.array([1, 2, 3]), np.array([1, 2]), [1, 2, 3]
)
with pytest.raises(ValueError, match=MESSAGE):
model.evaluate(
np.array([1, 2]), np.array([1, 2]), np.array([1, 2, 3]), [1, 2, 3]
)
def test_RotationSequence3D_inverse():
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
assert_allclose(model.inverse.angles.value, [-3, -2, -1])
assert model.inverse.axes_order == "xyz"
def test_EulerAngleRotation_errors():
# Bad length of axes_order
MESSAGE = r"Expected axes_order to be a character sequence of length 3, got xyzx"
with pytest.raises(TypeError, match=MESSAGE):
rotations.EulerAngleRotation(
mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), axes_order="xyzx"
)
# Bad axes_order labels
with pytest.raises(
ValueError, match=r"Unrecognized axis label .* should be one of .*"
):
rotations.EulerAngleRotation(
mk.MagicMock(), mk.MagicMock(), mk.MagicMock(), axes_order="abc"
)
# Bad units
MESSAGE = r"All parameters should be of the same type - float or Quantity"
with pytest.raises(TypeError, match=MESSAGE):
rotations.EulerAngleRotation(1 * u.m, 2, 3, axes_order="xyz")
with pytest.raises(TypeError, match=MESSAGE):
rotations.EulerAngleRotation(1, 2 * u.m, 3, axes_order="xyz")
with pytest.raises(TypeError, match=MESSAGE):
rotations.EulerAngleRotation(1, 2, 3 * u.m, axes_order="xyz")
def test_EulerAngleRotation_inverse():
model = rotations.EulerAngleRotation(1, 2, 3, "xyz")
assert_allclose(model.inverse.phi, -3)
assert_allclose(model.inverse.theta, -2)
assert_allclose(model.inverse.psi, -1)
assert model.inverse.axes_order == "zyx"
def test__SkyRotation_errors():
# Bad units
MESSAGE = r"All parameters should be of the same type - float or Quantity"
with pytest.raises(TypeError, match=MESSAGE):
rotations._SkyRotation(1 * u.m, 2, 3)
with pytest.raises(TypeError, match=MESSAGE):
rotations._SkyRotation(1, 2 * u.m, 3)
with pytest.raises(TypeError, match=MESSAGE):
rotations._SkyRotation(1, 2, 3 * u.m)
def test__SkyRotation__evaluate():
model = rotations._SkyRotation(1, 2, 3)
phi = mk.MagicMock()
theta = mk.MagicMock()
lon = mk.MagicMock()
lat = mk.MagicMock()
lon_pole = mk.MagicMock()
alpha = 5
delta = mk.MagicMock()
with mk.patch.object(
rotations._EulerRotation, "evaluate", autospec=True, return_value=(alpha, delta)
) as mkEval:
assert (365, delta) == model._evaluate(phi, theta, lon, lat, lon_pole)
assert mkEval.call_args_list == [
mk.call(model, phi, theta, lon, lat, lon_pole, "zxz")
]
|
19c60d735a03da723d83d84ce9c361e7c38c72f2a38b606ba92aff6edf2cc111 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import math_functions
x = np.linspace(-20, 360, 100)
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
def test_math():
for name in math_functions.__all__:
model_class = getattr(math_functions, name)
assert model_class.__module__ == "astropy.modeling.math_functions"
model = model_class()
func = getattr(np, model.func.__name__)
if model.n_inputs == 1:
assert_allclose(model(x), func(x))
elif model.n_inputs == 2:
assert_allclose(model(x, x), func(x, x))
assert math_functions.ModUfunc is math_functions.RemainderUfunc
assert math_functions.DivideUfunc is math_functions.True_divideUfunc
|
11af79d133afaa2c49c0ff7b29493a196ec5ec962044e1f0607555c2f58fd8f8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import platform
import types
import warnings
import numpy as np
import pytest
from numpy.random import default_rng
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
fitters = [
fitting.LevMarLSQFitter,
fitting.TRFLSQFitter,
fitting.LevMarLSQFitter,
fitting.DogBoxLSQFitter,
]
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=0.3)
self.g2 = models.Gaussian1D(10, 13, stddev=0.4)
self.x = np.arange(10, 20, 0.1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fixed_par(self, fitter):
fitter = fitter()
g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3, fixed={"amplitude": True})
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_tied_par(self, fitter):
fitter = fitter()
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=0.3, tied={"mean": tied})
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev, rtol=10 ** (-5))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
def test_joint_fitter(self):
from scipy import optimize
g1 = models.Gaussian1D(10, 14.9, stddev=0.3)
g2 = models.Gaussian1D(10, 13, stddev=0.4)
jf = fitting.JointFitter(
[g1, g2], {g1: ["amplitude"], g2: ["amplitude"]}, [9.8]
)
x = np.arange(10, 20, 0.1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, 0.3]
p2 = [13, 0.4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1, compmodel(p[0], p[3:], x2) - y2]
)
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_no_constraints(self, fitter):
from scipy import optimize
fitter = fitter()
g1 = models.Gaussian1D(9.9, 14.5, stddev=0.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
# fmt: off
data = np.array(
[
505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0,
443.0, 416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0,
428.0
]
)
# fmt: on
self.data = data.reshape(11, 11)
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_lsq(self, fitter):
fitter = fitter()
guess_slope = 1.1
guess_intercept = 0.0
bounds = {"slope": (-1.5, 5.0), "intercept": (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds)
with pytest.warns(AstropyUserWarning, match=r"Model is linear in parameters"):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10**-5 >= bounds["slope"][0]
assert slope - 10**-5 <= bounds["slope"][1]
assert intercept + 10**-5 >= bounds["intercept"][0]
assert intercept - 10**-5 <= bounds["intercept"][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {"slope": (-1.5, 5.0), "intercept": (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept, bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(
AstropyUserWarning, match="consider using linear fitting methods"
):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10**-5 >= bounds["slope"][0]
assert slope - 10**-5 <= bounds["slope"][1]
assert intercept + 10**-5 >= bounds["intercept"][0]
assert intercept - 10**-5 <= bounds["intercept"][1]
@pytest.mark.parametrize("fitter", fitters)
def test_bounds_gauss2d_lsq(self, fitter):
fitter = fitter()
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {
"x_mean": [0.0, 11.0],
"y_mean": [0.0, 11.0],
"x_stddev": [1.0, 4],
"y_stddev": [1.0, 4],
}
gauss = models.Gaussian2D(
amplitude=10.0,
x_mean=5.0,
y_mean=5.0,
x_stddev=4.0,
y_stddev=4.0,
theta=0.5,
bounds=bounds,
)
if isinstance(fitter, fitting.LevMarLSQFitter) or isinstance(
fitter, fitting.DogBoxLSQFitter
):
with pytest.warns(AstropyUserWarning, match="The fit may be unsuccessful"):
model = fitter(gauss, X, Y, self.data)
else:
model = fitter(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10**-5 >= bounds["x_mean"][0]
assert x_mean - 10**-5 <= bounds["x_mean"][1]
assert y_mean + 10**-5 >= bounds["y_mean"][0]
assert y_mean - 10**-5 <= bounds["y_mean"][1]
assert x_stddev + 10**-5 >= bounds["x_stddev"][0]
assert x_stddev - 10**-5 <= bounds["x_stddev"][1]
assert y_stddev + 10**-5 >= bounds["y_stddev"][0]
assert y_stddev - 10**-5 <= bounds["y_stddev"][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {
"x_mean": [0.0, 11.0],
"y_mean": [0.0, 11.0],
"x_stddev": [1.0, 4],
"y_stddev": [1.0, 4],
}
gauss = models.Gaussian2D(
amplitude=10.0,
x_mean=5.0,
y_mean=5.0,
x_stddev=4.0,
y_stddev=4.0,
theta=0.5,
bounds=bounds,
)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=r".*The fit may be unsuccessful.*",
category=AstropyUserWarning,
)
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10**-5 >= bounds["x_mean"][0]
assert x_mean - 10**-5 <= bounds["x_mean"][1]
assert y_mean + 10**-5 >= bounds["y_mean"][0]
assert y_mean - 10**-5 <= bounds["y_mean"][1]
assert x_stddev + 10**-5 >= bounds["x_stddev"][0]
assert x_stddev - 10**-5 <= bounds["x_stddev"][1]
assert y_stddev + 10**-5 >= bounds["y_stddev"][0]
assert y_stddev - 10**-5 <= bounds["y_stddev"][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0.0, 9.0]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {"amplitude": False, "mean": True, "stddev": False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={"mean": True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied["amplitude"], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, tied={"amplitude": tie_amplitude}
)
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, fixed={"mean": True})
gauss.mean.fixed = False
assert gauss.fixed == {"amplitude": False, "mean": False, "stddev": False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, tied={"amplitude": tie_amplitude}
)
gauss.amplitude.tied = False
assert gauss.tied == {"amplitude": False, "mean": False, "stddev": False}
def test_set_bounds_1():
gauss = models.Gaussian1D(
amplitude=20, mean=2, stddev=1, bounds={"stddev": (0, None)}
)
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (0.0, None),
}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.0
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (0.0, None),
}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1, bounds={"stddev": (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {
"amplitude": (None, None),
"mean": (None, None),
"stddev": (None, None),
}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {"a": (None, None), "b": (0, None)}
assert m.fixed == {"a": False, "b": True}
# Make a model instance that overrides the default constraints and values
m = MyModel(
3, 4, bounds={"a": (1, None), "b": (2, None)}, fixed={"a": True, "b": False}
)
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {"a": (1, None), "b": (2, None)}
assert m.fixed == {"a": True, "b": False}
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:divide by zero encountered.*")
@pytest.mark.parametrize("fitter", fitters)
def test_fit_with_fixed_and_bound_constraints(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
# DogBoxLSQFitter causes failure on s390x, aremel possibly others (not x86_64 or arm64)
if fitter == fitting.DogBoxLSQFitter and (
platform.machine() not in ("x86_64", "arm64")
):
pytest.xfail(
"DogBoxLSQFitter can to be unstable on non-standard platforms leading to "
"random test failures"
)
fitter = fitter()
m = models.Gaussian1D(
amplitude=3,
mean=4,
stddev=1,
bounds={"mean": (4, 5)},
fixed={"amplitude": True},
)
x = np.linspace(0, 10, 10)
y = np.exp(-(x**2) / 2)
fitted_1 = fitter(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = fitter(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fit_with_bound_constraints_estimate_jacobian(fitter):
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
fitter = fitter()
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
fitted_1 = fitter(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
_ = fitter(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
if fitter == fitting.LevMarLSQFitter:
assert np.any(fitter.fit_info["fjac"] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_gaussian2d_positive_stddev(fitter):
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
fitter = fitter()
# fmt: off
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]
]
# fmt: on
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
if isinstance(fitter, fitting.TRFLSQFitter) or isinstance(
fitter, fitting.DogBoxLSQFitter
):
pytest.xfail("TRFLSQFitter seems to be broken for this test.")
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose(
[g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307],
rtol=1.5e-6,
)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize("fitter", fitters)
def test_2d_model(fitter):
"""Issue #6403"""
from astropy.utils import NumpyRNGContext
fitter = fitter()
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, 0.1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, 0.2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (0.0, 3.0),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (1.0, 3.0),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds["stddev_0"] = (4, 5)
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (4, 5),
"c0_1": (None, None),
"c1_1": (None, None),
}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds["stddev"] = (1, 5)
m = g + p
assert m.bounds == {
"amplitude_0": (None, None),
"mean_0": (None, None),
"stddev_0": (1, 5),
"c0_1": (None, None),
"c1_1": (None, None),
}
|
ef73504395877ef898958aa8633cecbca2c08607ee33893719e0ef9c469bb5a0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.models import (
Gaussian1D,
Identity,
Mapping,
Rotation2D,
Shift,
UnitsMapping,
)
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert mapping(1, 2) == (2.0, 1.0)
assert mapping.inverse(2, 1) == (1, 2)
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert mapping(1, 2) == (1.0, 2.0, 1.0, 2)
assert mapping.inverse(1, 2, 1, 2) == (1, 2)
assert mapping.inverse.n_inputs == 4
assert mapping.inverse.n_outputs == 2
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert mapping(1, 2) == (1.0)
def test_drop_axes_2():
mapping = Mapping((1,))
assert mapping(1, 2) == (2.0)
MESSAGE = (
r"Mappings such as .* that drop one or more of their inputs are not invertible"
r" at this time"
)
with pytest.raises(NotImplementedError, match=MESSAGE):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert mapping.n_inputs == 2
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
@pytest.mark.parametrize("name", [None, "test_name"])
def test_bad_inputs(name):
mapping = Mapping((1, 0), name=name)
if name is None:
name = "Mapping"
x = [np.ones((2, 3)) * idx for idx in range(5)]
for idx in range(1, 6):
if idx == 2:
continue
MESSAGE = f"{name} expects 2 inputs; got {idx}"
with pytest.raises(TypeError, match=MESSAGE):
mapping.evaluate(*x[:idx])
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose(
(res_x, res_y),
(
np.array(
[
[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081],
]
),
np.array(
[
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
),
),
)
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.0e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.parametrize("fitter", fitters)
def test_fittable_compound(fitter):
fitter = fitter()
m = Identity(1) | Mapping((0,)) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0.0, dy, x.shape)
y_noisy = y_real + n
new_model = fitter(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
def test_identity_repr():
m = Identity(1, name="foo")
assert repr(m) == "<Identity(1, name='foo')>"
m = Identity(1)
assert repr(m) == "<Identity(1)>"
def test_mapping_repr():
m = Mapping([0, 1], name="foo")
assert repr(m) == "<Mapping([0, 1], name='foo')>"
m = Mapping([0, 1])
assert repr(m) == "<Mapping([0, 1])>"
class TestUnitsMapping:
def test___init__(self):
# Set values
model = UnitsMapping(
((u.m, None),),
input_units_equivalencies="test_eqiv",
input_units_allow_dimensionless=True,
name="test",
)
assert model._mapping == ((u.m, None),)
assert model._input_units_strict == {"x": True}
assert model.input_units_equivalencies == "test_eqiv"
assert model.input_units_allow_dimensionless == {"x": True}
assert model.name == "test"
assert model._input_units == {"x": u.m}
# Default values
model = UnitsMapping(((u.K, None),))
assert model._mapping == ((u.K, None),)
assert model._input_units_strict == {"x": True}
assert model.input_units_equivalencies is None
assert model.input_units_allow_dimensionless == {"x": False}
assert model.name is None
assert model._input_units == {"x": u.K}
# Error
MESSAGE = r"If one return unit is None, then all must be None"
with pytest.raises(ValueError, match=MESSAGE):
UnitsMapping(((u.m, None), (u.m, u.K)))
def test_evaluate(self):
model = UnitsMapping(((u.m, None),))
assert model(10 * u.m) == 10
model = UnitsMapping(((u.m, u.K),))
assert model(10 * u.m) == 10 * u.K
model = UnitsMapping(
((u.m, None), (u.K, None)),
)
assert model(10 * u.m, 20 * u.K) == (10, 20)
model = UnitsMapping(
((u.m, u.K), (u.K, u.m)),
)
assert model(10 * u.m, 20 * u.K) == (10 * u.K, 20 * u.m)
def test_repr(self):
model = UnitsMapping(((u.m, None),), name="foo")
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),), name='foo')>"
model = UnitsMapping(((u.m, None),))
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),))>"
|
9a410d7808bbac388a5cd2bb90349bb476dc06dd98ba2a4b63bbdf14bc24edc0 | # Various tests of models not related to evaluation, fitting, or parameters
# pylint: disable=invalid-name, no-member
import pytest
from astropy import units as u
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
from astropy.modeling.models import Gaussian1D, Mapping, Pix2Sky_TAN
from astropy.tests.helper import assert_quantity_allclose
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box.bounding_box()
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2.0 * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2,
)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.0] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J
)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
MESSAGE = (
r".* Units of input 'x', .* could not be converted to required input units of"
r" m .*"
)
with pytest.raises(u.UnitsError, match=MESSAGE):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10 * u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
18765dd12a86814a53d1384bccec1d84548558840c15ffb4d1d6379d4883605f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import (
DogBoxLSQFitter,
LevMarLSQFitter,
LMLSQFitter,
TRFLSQFitter,
)
from astropy.modeling.functional_models import (
AiryDisk2D,
ArcCosine1D,
ArcSine1D,
ArcTangent1D,
Box1D,
Box2D,
Const1D,
Const2D,
Cosine1D,
Disk2D,
Ellipse2D,
Exponential1D,
Gaussian1D,
Gaussian2D,
KingProjectedAnalytic1D,
Linear1D,
Logarithmic1D,
Lorentz1D,
Moffat1D,
Moffat2D,
Multiply,
Planar2D,
RickerWavelet1D,
RickerWavelet2D,
Ring2D,
Scale,
Sersic1D,
Sersic2D,
Sine1D,
Tangent1D,
Trapezoid1D,
TrapezoidDisk2D,
Voigt1D,
)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
Schechter1D,
SmoothlyBrokenPowerLaw1D,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
FUNC_MODELS_1D = [
{
"class": Gaussian1D,
"parameters": {"amplitude": 3 * u.Jy, "mean": 2 * u.m, "stddev": 30 * u.cm},
"evaluation": [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
"bounding_box": [0.35, 3.65] * u.m,
},
{
"class": Sersic1D,
"parameters": {"amplitude": 3 * u.MJy / u.sr, "r_eff": 2 * u.arcsec, "n": 4},
"evaluation": [(3 * u.arcsec, 1.3237148119468918 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Sine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Cosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": False,
},
{
"class": Tangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(1 * u.s, -3 * u.km / u.s)],
"bounding_box": [-4, 0] / u.Hz,
},
{
"class": ArcSine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcCosine1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.25 * u.Hz,
"phase": 0.5,
},
"evaluation": [(0 * u.km / u.s, -1 * u.s)],
"bounding_box": [-3, 3] * u.km / u.s,
},
{
"class": ArcTangent1D,
"parameters": {
"amplitude": 3 * u.km / u.s,
"frequency": 0.125 * u.Hz,
"phase": 0.25,
},
"evaluation": [(0 * u.km / u.s, -2 * u.s)],
"bounding_box": False,
},
{
"class": Linear1D,
"parameters": {"slope": 3 * u.km / u.s, "intercept": 5000 * u.m},
"evaluation": [(6000 * u.ms, 23 * u.km)],
"bounding_box": False,
},
{
"class": Lorentz1D,
"parameters": {"amplitude": 2 * u.Jy, "x_0": 505 * u.nm, "fwhm": 100 * u.AA},
"evaluation": [(0.51 * u.micron, 1 * u.Jy)],
"bounding_box": [255, 755] * u.nm,
},
{
"class": Voigt1D,
"parameters": {
"amplitude_L": 2 * u.Jy,
"x_0": 505 * u.nm,
"fwhm_L": 100 * u.AA,
"fwhm_G": 50 * u.AA,
},
"evaluation": [(0.51 * u.micron, 1.0621795524 * u.Jy)],
"bounding_box": False,
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Trapezoid1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"width": 1 * u.um,
"slope": 5 * u.Jy / u.um,
},
"evaluation": [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
"bounding_box": [3.3, 5.5] * u.um,
},
{
"class": RickerWavelet1D,
"parameters": {"amplitude": 3 * u.Jy, "x_0": 4.4 * u.um, "sigma": 1e-3 * u.mm},
"evaluation": [(1000 * u.nm, -0.09785050 * u.Jy)],
"bounding_box": [-5.6, 14.4] * u.um,
},
{
"class": Moffat1D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 0.238853503 * u.Jy)],
"bounding_box": False,
},
{
"class": KingProjectedAnalytic1D,
"parameters": {
"amplitude": 1.0 * u.Msun / u.pc**2,
"r_core": 1.0 * u.pc,
"r_tide": 2.0 * u.pc,
},
"evaluation": [(0.5 * u.pc, 0.2 * u.Msun / u.pc**2)],
"bounding_box": [0.0 * u.pc, 2.0 * u.pc],
},
{
"class": Logarithmic1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 3.4657359027997265 * u.m)],
"bounding_box": False,
},
{
"class": Exponential1D,
"parameters": {"amplitude": 5 * u.m, "tau": 2 * u.m},
"evaluation": [(4 * u.m, 36.945280494653254 * u.m)],
"bounding_box": False,
},
]
SCALE_MODELS = [
{
"class": Scale,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m, 2 * u.m)],
"bounding_box": False,
},
{
"class": Multiply,
"parameters": {"factor": 2 * u.m},
"evaluation": [(1 * u.m / u.m, 2 * u.m)],
"bounding_box": False,
},
]
PHYS_MODELS_1D = [
{
"class": Plummer1D,
"parameters": {"mass": 3 * u.kg, "r_plum": 0.5 * u.m},
"evaluation": [(1 * u.m, 0.10249381 * u.kg / (u.m**3))],
"bounding_box": False,
},
{
"class": Drude1D,
"parameters": {
"amplitude": 1.0 * u.m,
"x_0": 2175.0 * u.AA,
"fwhm": 400.0 * u.AA,
},
"evaluation": [(2000 * u.AA, 0.5452317018423869 * u.m)],
"bounding_box": [-17825, 22175] * u.AA,
},
]
FUNC_MODELS_2D = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))
],
"bounding_box": [[-13.02230366, 15.02230366], [-12.02230366, 16.02230366]]
* u.m,
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.Jy},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
"bounding_box": False,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [
[-0.5495097567963922, 4.549509756796392],
[0.4504902432036073, 5.549509756796393],
]
* u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
"bounding_box": [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]],
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr)],
"bounding_box": False,
},
{
"class": Planar2D,
"parameters": {"slope_x": 2 * u.m, "slope_y": 3 * u.m, "intercept": 4 * u.m},
"evaluation": [(5 * u.m / u.m, 6 * u.m / u.m, 32 * u.m)],
"bounding_box": False,
},
]
POWERLAW_MODELS = [
{
"class": PowerLaw1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1},
"evaluation": [(1 * u.m, 500 * u.g)],
"bounding_box": False,
},
{
"class": BrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
},
"evaluation": [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
"bounding_box": False,
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
"bounding_box": False,
},
{
"class": ExponentialCutoffPowerLaw1D,
"parameters": {
"amplitude": 5 * u.kg,
"x_0": 10 * u.cm,
"alpha": 1,
"x_cutoff": 1 * u.m,
},
"evaluation": [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
"bounding_box": False,
},
{
"class": LogParabola1D,
"parameters": {"amplitude": 5 * u.kg, "x_0": 10 * u.cm, "alpha": 1, "beta": 2},
"evaluation": [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
"bounding_box": False,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
"bounding_box": False,
},
]
POLY_MODELS = [
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.one, "c1": 2 / u.m, "c2": 3 / u.m**2},
"evaluation": [(3 * u.m, 36 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {
"degree": 2,
"c0": 3 * u.kg,
"c1": 2 * u.kg / u.m,
"c2": 3 * u.kg / u.m**2,
},
"evaluation": [(3 * u.m, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial1D,
"parameters": {"degree": 2, "c0": 3 * u.kg, "c1": 2 * u.kg, "c2": 3 * u.kg},
"evaluation": [(3 * u.one, 36 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.one,
"c1_0": 2 / u.m,
"c2_0": 3 / u.m**2,
"c0_1": 3 / u.s,
"c0_2": -2 / u.s**2,
"c1_1": 5 / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.one)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg / u.m,
"c2_0": 3 * u.kg / u.m**2,
"c0_1": 3 * u.kg / u.s,
"c0_2": -2 * u.kg / u.s**2,
"c1_1": 5 * u.kg / u.m / u.s,
},
"evaluation": [(3 * u.m, 2 * u.s, 64 * u.kg)],
"bounding_box": False,
},
{
"class": Polynomial2D,
"parameters": {
"degree": 2,
"c0_0": 3 * u.kg,
"c1_0": 2 * u.kg,
"c2_0": 3 * u.kg,
"c0_1": 3 * u.kg,
"c0_2": -2 * u.kg,
"c1_1": 5 * u.kg,
},
"evaluation": [(3 * u.one, 2 * u.one, 64 * u.kg)],
"bounding_box": False,
},
]
MODELS = (
FUNC_MODELS_1D
+ SCALE_MODELS
+ FUNC_MODELS_2D
+ POWERLAW_MODELS
+ PHYS_MODELS_1D
+ POLY_MODELS
)
SCIPY_MODELS = {Sersic1D, Sersic2D, AiryDisk2D}
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_LevMar_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
LogParabola1D,
Schechter1D,
]
# These models will fail the TRFLSQFitter fitting test due to non-finite
NON_FINITE_TRF_MODELS = [
ArcSine1D,
ArcCosine1D,
Sersic1D,
Sersic2D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the LMLSQFitter fitting test due to non-finite
NON_FINITE_LM_MODELS = [
Sersic1D,
ArcSine1D,
ArcCosine1D,
PowerLaw1D,
LogParabola1D,
Schechter1D,
ExponentialCutoffPowerLaw1D,
BrokenPowerLaw1D,
]
# These models will fail the DogBoxLSQFitter fitting test due to non-finite
NON_FINITE_DogBox_MODELS = [
Sersic1D,
Sersic2D,
ArcSine1D,
ArcCosine1D,
SmoothlyBrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
]
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize("model", MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model["parameters"].items():
if value is None or key == "degree":
params[key] = value
else:
params[key] = np.repeat(value, 2)
params["n_models"] = 2
m = model["class"](**params)
for args in model["evaluation"]:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x], subok=True)
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y], subok=True))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model["class"] == Drude1D:
params["x_0"][-1] = 0 * u.AA
MESSAGE = r"0 is not an allowed value for x_0"
with pytest.raises(InputParameterError, match=MESSAGE):
model["class"](**params)
@pytest.mark.parametrize("model", MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model["bounding_box"] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
MESSAGE = r"No bounding box is defined for this model"
with pytest.raises(NotImplementedError, match=MESSAGE):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model["bounding_box"])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model["bounding_box"][i])
@pytest.mark.parametrize("model", MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model["class"](**model["parameters"])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()["x1"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x0": 1})
assert fixed_input_model.inputs_map()["x1"][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()["x"][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {"x": 1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy")
@pytest.mark.filterwarnings(r"ignore:.*:RuntimeWarning")
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.filterwarnings(r"ignore:The fit may be unsuccessful.*")
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("fitter", fitters)
def test_models_fitting(model, fitter):
fitter = fitter()
if (
(
isinstance(fitter, LevMarLSQFitter)
and model["class"] in NON_FINITE_LevMar_MODELS
)
or (
isinstance(fitter, TRFLSQFitter) and model["class"] in NON_FINITE_TRF_MODELS
)
or (isinstance(fitter, LMLSQFitter) and model["class"] in NON_FINITE_LM_MODELS)
or (
isinstance(fitter, DogBoxLSQFitter)
and model["class"] in NON_FINITE_DogBox_MODELS
)
):
return
m = model["class"](**model["parameters"])
if len(model["evaluation"][0]) == 2:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.exp(-x.value**2) * model["evaluation"][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model["evaluation"][0][0].unit
y = np.linspace(1, 3, 100) * model["evaluation"][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model["evaluation"][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{
"class": Gaussian2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_mean": 2 * u.m,
"y_mean": 1 * u.m,
"x_stddev": 3 * u.m,
"y_stddev": 2 * u.m,
"theta": 45 * u.deg,
},
"evaluation": [
(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5)),
],
"bounding_box": [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]]
* u.m,
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.K, 3 * u.Jy), (4 * u.K, 300 * u.cm, 3 * u.Jy)],
"bounding_box": [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m,
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [
(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy),
],
"bounding_box": [[-1, 5], [0, 6]] * u.m,
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [
(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy),
],
"bounding_box": [[1.979, 2.021], [2.979, 3.021]] * u.m,
},
{
"class": TrapezoidDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 1 * u.m,
"y_0": 2 * u.m,
"R_0": 100 * u.cm,
"slope": 1 * u.Jy / u.m,
},
"evaluation": [
(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy),
],
"bounding_box": [[-2, 6], [-3, 5]] * u.m,
},
{
"class": RickerWavelet2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"sigma": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy),
],
"bounding_box": False,
},
{
"class": AiryDisk2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"radius": 1 * u.m,
},
"evaluation": [
(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy),
],
"bounding_box": False,
},
{
"class": Moffat2D,
"parameters": {
"amplitude": 3 * u.Jy,
"x_0": 4.4 * u.um,
"y_0": 3.5 * u.um,
"gamma": 1e-3 * u.mm,
"alpha": 1,
},
"evaluation": [
(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy),
],
"bounding_box": False,
},
{
"class": Sersic2D,
"parameters": {
"amplitude": 3 * u.MJy / u.sr,
"x_0": 1 * u.arcsec,
"y_0": 2 * u.arcsec,
"r_eff": 2 * u.arcsec,
"n": 4,
"ellip": 0,
"theta": 0,
},
"evaluation": [
(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy / u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy / u.sr),
],
"bounding_box": False,
},
]
@pytest.mark.parametrize("model", unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
MESSAGE = "Units of 'x' and 'y' inputs should match"
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
if len(args) == 2:
kwargs = dict(zip(("x", "y"), args))
else:
kwargs = dict(zip(("x", "y", "z"), args))
if kwargs["x"].unit.is_equivalent(kwargs["y"].unit):
kwargs["x"] = kwargs["x"].to(kwargs["y"].unit)
with pytest.raises(u.UnitsError, match=MESSAGE):
m.without_units_for_data(**kwargs)
mag_models = [
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.ABmag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.mag, 3 * u.ABmag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.ABmag, 3 * u.mag)],
},
{
"class": Const1D,
"parameters": {"amplitude": 3 * u.mag},
"evaluation": [(0.6 * u.mag, 3 * u.mag)],
},
{
"class": Const2D,
"parameters": {"amplitude": 3 * u.ABmag},
"evaluation": [(0.6 * u.micron, 0.2 * u.m, 3 * u.ABmag)],
},
{
"class": Ellipse2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"a": 300 * u.cm,
"b": 200 * u.cm,
"theta": 45 * u.deg,
},
"evaluation": [(4 * u.m, 300 * u.cm, 3 * u.ABmag)],
},
{
"class": Disk2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"R_0": 300 * u.cm,
},
"evaluation": [(5.8 * u.m, 201 * u.cm, 3 * u.ABmag)],
},
{
"class": Ring2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.m,
"r_in": 2 * u.cm,
"r_out": 2.1 * u.cm,
},
"evaluation": [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.ABmag)],
},
{
"class": Box2D,
"parameters": {
"amplitude": 3 * u.ABmag,
"x_0": 3 * u.m,
"y_0": 2 * u.s,
"x_width": 4 * u.cm,
"y_width": 3 * u.s,
},
"evaluation": [(301 * u.cm, 3 * u.s, 3 * u.ABmag)],
},
{
"class": SmoothlyBrokenPowerLaw1D,
"parameters": {
"amplitude": 5 * u.ABmag,
"x_break": 10 * u.cm,
"alpha_1": 1,
"alpha_2": -1,
"delta": 1,
},
"evaluation": [(1 * u.cm, 15.125 * u.ABmag), (1 * u.m, 15.125 * u.ABmag)],
},
{
"class": Box1D,
"parameters": {"amplitude": 3 * u.ABmag, "x_0": 4.4 * u.um, "width": 1 * u.um},
"evaluation": [(4200 * u.nm, 3 * u.ABmag), (1 * u.m, 0 * u.ABmag)],
"bounding_box": [3.9, 4.9] * u.um,
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.ABmag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.ABmag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
{
"class": Schechter1D,
"parameters": {
"phi_star": 1.0e-4 * (u.Mpc**-3),
"m_star": -20.0 * u.mag,
"alpha": -1.9,
},
"evaluation": [(-23 * u.mag, 1.002702276867279e-12 * (u.Mpc**-3))],
},
]
@pytest.mark.parametrize("model", mag_models)
def test_models_evaluate_magunits(model):
if not HAS_SCIPY and model["class"] in SCIPY_MODELS:
pytest.skip()
m = model["class"](**model["parameters"])
for args in model["evaluation"]:
assert_quantity_allclose(m(*args[:-1]), args[-1])
def test_Schechter1D_errors():
# Non magnitude units are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.km, alpha=-1.9
)
MESSAGE = r"The units of magnitude and m_star must be a magnitude"
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.km)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
MESSAGE = (
r".*: Units of input 'x', .*, could not be converted to required input units"
r" of .*"
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.STmag)
# Differing magnitude systems are bad
model = Schechter1D(
phi_star=1.0e-4 * (u.Mpc**-3), m_star=-20.0 * u.ABmag, alpha=-1.9
)
with pytest.raises(u.UnitsError, match=MESSAGE):
model(-23 * u.mag)
|
b629f533b10520db2ce861403c1e4764508f0900c0217815de9ab1572c696361 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# By default, tests should not use the internet.
from astropy.samp import conf
from astropy.samp.errors import SAMPClientError, SAMPHubError, SAMPProxyError
def setup_module(module):
conf.use_internet = False
def test_SAMPHubError():
"""Test that SAMPHubError can be instantiated"""
SAMPHubError("test")
def test_SAMPClientError():
"""Test that SAMPClientError can be instantiated"""
SAMPClientError("test")
def test_SAMPProxyError():
"""Test that SAMPProxyError can be instantiated"""
SAMPProxyError("test", "any")
|
7a92d5cb4c824a58f0d09b5a8155b98eae98981c9f079de29a4f2945ae1b2c37 | import pytest
# By default, tests should not use the internet.
from astropy.samp import conf
from astropy.samp.errors import SAMPProxyError
from astropy.samp.hub import SAMPHubServer
from astropy.samp.integrated_client import SAMPIntegratedClient
from .test_helpers import TEST_REPLY, Receiver, assert_output, random_params
def setup_module(module):
conf.use_internet = False
class TestStandardProfile:
@property
def hub_init_kwargs(self):
return {}
@property
def client_init_kwargs(self):
return {}
@property
def client_connect_kwargs(self):
return {}
@pytest.fixture(autouse=True)
def setup_method(self, tmp_path):
self.tmpdir = str(tmp_path)
self.hub = SAMPHubServer(
web_profile=False, mode="multiple", pool_size=1, **self.hub_init_kwargs
)
self.hub.start()
self.client1 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client1.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
self.client2 = SAMPIntegratedClient(**self.client_init_kwargs)
self.client2.connect(hub=self.hub, pool_size=1, **self.client_connect_kwargs)
def teardown_method(self):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
def test_main(self):
self.client1_id = self.client1.get_public_id()
self.client2_id = self.client2.get_public_id()
self.metadata1 = {
"samp.name": "Client 1",
"samp.description.text": "Client 1 Description",
"client.version": "1.1",
}
self.metadata2 = {
"samp.name": "Client 2",
"samp.description.text": "Client 2 Description",
"client.version": "1.2",
}
# Check that the clients are connected
assert self.client1.is_connected
assert self.client2.is_connected
# Check that ping works
self.client1.ping()
self.client2.ping()
# Check that get_registered_clients works as expected.
assert self.client1_id not in self.client1.get_registered_clients()
assert self.client2_id in self.client1.get_registered_clients()
assert self.client1_id in self.client2.get_registered_clients()
assert self.client2_id not in self.client2.get_registered_clients()
# Check that get_metadata works as expected
assert self.client1.get_metadata(self.client1_id) == {}
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client1_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client1.declare_metadata(self.metadata1)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == {}
assert self.client2.get_metadata(self.client2_id) == {}
self.client2.declare_metadata(self.metadata2)
assert self.client1.get_metadata(self.client1_id) == self.metadata1
assert self.client2.get_metadata(self.client1_id) == self.metadata1
assert self.client1.get_metadata(self.client2_id) == self.metadata2
assert self.client2.get_metadata(self.client2_id) == self.metadata2
# Check that, without subscriptions, sending a notification from one
# client to another raises an error.
message = {}
message["samp.mtype"] = "table.load.votable"
message["samp.params"] = {}
with pytest.raises(SAMPProxyError):
self.client1.notify(self.client2_id, message)
# Check that there are no currently active subscriptions
assert self.client1.get_subscribed_clients("table.load.votable") == {}
assert self.client2.get_subscribed_clients("table.load.votable") == {}
# We now test notifications and calls
rec1 = Receiver(self.client1)
rec2 = Receiver(self.client2)
self.client2.bind_receive_notification(
"table.load.votable", rec2.receive_notification
)
self.client2.bind_receive_call("table.load.votable", rec2.receive_call)
self.client1.bind_receive_response("test-tag", rec1.receive_response)
# Check resulting subscriptions
assert self.client1.get_subscribed_clients("table.load.votable") == {
self.client2_id: {}
}
assert self.client2.get_subscribed_clients("table.load.votable") == {}
assert "table.load.votable" in self.client1.get_subscriptions(self.client2_id)
assert "table.load.votable" in self.client2.get_subscriptions(self.client2_id)
# Once we have finished with the calls and notifications, we will
# check the data got across correctly.
# Test notify
params = random_params(self.tmpdir)
self.client1.notify(
self.client2.get_public_id(),
{"samp.mtype": "table.load.votable", "samp.params": params},
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
params = random_params(self.tmpdir)
self.client1.enotify(
self.client2.get_public_id(), "table.load.votable", **params
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
# Test notify_all
params = random_params(self.tmpdir)
self.client1.notify_all(
{"samp.mtype": "table.load.votable", "samp.params": params}
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
params = random_params(self.tmpdir)
self.client1.enotify_all("table.load.votable", **params)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
# Test call
params = random_params(self.tmpdir)
self.client1.call(
self.client2.get_public_id(),
"test-tag",
{"samp.mtype": "table.load.votable", "samp.params": params},
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
params = random_params(self.tmpdir)
self.client1.ecall(
self.client2.get_public_id(), "test-tag", "table.load.votable", **params
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
# Test call_all
params = random_params(self.tmpdir)
self.client1.call_all(
"tag1", {"samp.mtype": "table.load.votable", "samp.params": params}
)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
params = random_params(self.tmpdir)
self.client1.ecall_all("tag2", "table.load.votable", **params)
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
# Test call_and_wait
params = random_params(self.tmpdir)
result = self.client1.call_and_wait(
self.client2.get_public_id(),
{"samp.mtype": "table.load.votable", "samp.params": params},
timeout=5,
)
assert result == TEST_REPLY
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
params = random_params(self.tmpdir)
result = self.client1.ecall_and_wait(
self.client2.get_public_id(), "table.load.votable", timeout=5, **params
)
assert result == TEST_REPLY
assert_output(
"table.load.votable",
self.client2.get_private_key(),
self.client1_id,
params,
timeout=60,
)
# TODO: check that receive_response received the right data
|
408115332bc3da55aaacb1abb031cb869f6af31784603766097e61412b2e055c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# By default, tests should not use the internet.
from astropy.samp import conf
from astropy.samp.client import SAMPClient
from astropy.samp.hub import SAMPHubServer
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
def setup_module(module):
conf.use_internet = False
def test_SAMPHubProxy():
"""Test that SAMPHubProxy can be instantiated"""
SAMPHubProxy()
def test_SAMPClient():
"""Test that SAMPClient can be instantiated"""
proxy = SAMPHubProxy()
SAMPClient(proxy)
def test_SAMPIntegratedClient():
"""Test that SAMPIntegratedClient can be instantiated"""
SAMPIntegratedClient()
@pytest.fixture
def samp_hub(request):
"""A fixture that can be used by client tests that require a HUB."""
my_hub = SAMPHubServer()
my_hub.start()
request.addfinalizer(my_hub.stop)
def test_reconnect(samp_hub):
"""Test that SAMPIntegratedClient can reconnect.
This is a regression test for bug [#2673]
https://github.com/astropy/astropy/issues/2673
"""
my_client = SAMPIntegratedClient()
my_client.connect()
my_client.disconnect()
my_client.connect()
|
0821ae8c09ab28d422dae3867c816cee006c853ddd1995aa0bccbdffeb423f76 | import sys
import pytest
from astropy.samp import conf
from astropy.samp.hub_script import hub_script
def setup_module(module):
conf.use_internet = False
def setup_function(function):
function.sys_argv_orig = sys.argv
sys.argv = ["samp_hub"]
def teardown_function(function):
sys.argv = function.sys_argv_orig
@pytest.mark.slow
def test_hub_script():
sys.argv.append("-m") # run in multiple mode
sys.argv.append("-w") # disable web profile
hub_script(timeout=3)
|
ec5359db21b618745e0e6c8c4d2a5a4fb2bb91043d01da2750775a2809bb2f9a | from astropy.samp import conf
from astropy.samp.hub import SAMPHubServer
from astropy.samp.hub_proxy import SAMPHubProxy
def setup_module(module):
conf.use_internet = False
class TestHubProxy:
def setup_method(self, method):
self.hub = SAMPHubServer(web_profile=False, mode="multiple", pool_size=1)
self.hub.start()
self.proxy = SAMPHubProxy()
self.proxy.connect(hub=self.hub, pool_size=1)
def teardown_method(self, method):
if self.proxy.is_connected:
self.proxy.disconnect()
self.hub.stop()
def test_is_connected(self):
assert self.proxy.is_connected
def test_disconnect(self):
self.proxy.disconnect()
def test_ping(self):
self.proxy.ping()
def test_registration(self):
result = self.proxy.register(self.proxy.lockfile["samp.secret"])
self.proxy.unregister(result["samp.private-key"])
def test_custom_lockfile(tmp_path):
lockfile = str(tmp_path / ".samptest")
hub = SAMPHubServer(web_profile=False, lockfile=lockfile, pool_size=1)
hub.start()
proxy = SAMPHubProxy()
proxy.connect(hub=hub, pool_size=1)
hub.stop()
|
194c84d28f91a6fb43b04dd652b3d0039b9f8c2e383022f26f1d3f232f7d6324 | """
Test the web profile using Python classes that have been adapted to act like a
web client. We can only put a single test here because only one hub can run
with the web profile active, and the user might want to run the tests in
parallel.
"""
import threading
from urllib.request import Request, urlopen
import pytest
from astropy.samp import SAMPHubServer, SAMPIntegratedClient, conf
from astropy.samp.web_profile import CLIENT_ACCESS_POLICY, CROSS_DOMAIN
from astropy.utils.data import get_readable_fileobj
from .test_standard_profile import TestStandardProfile as BaseTestStandardProfile
from .web_profile_test_helpers import (
AlwaysApproveWebProfileDialog,
SAMPIntegratedWebClient,
)
def setup_module(module):
conf.use_internet = False
class TestWebProfile(BaseTestStandardProfile):
@pytest.fixture(autouse=True)
def setup_method(self, tmp_path):
self.dialog = AlwaysApproveWebProfileDialog()
t = threading.Thread(target=self.dialog.poll)
t.start()
self.tmpdir = str(tmp_path)
lockfile = str(tmp_path / ".samp")
self.hub = SAMPHubServer(
web_profile_dialog=self.dialog, lockfile=lockfile, web_port=0, pool_size=1
)
self.hub.start()
self.client1 = SAMPIntegratedClient()
self.client1.connect(hub=self.hub, pool_size=1)
self.client1_id = self.client1.get_public_id()
self.client1_key = self.client1.get_private_key()
self.client2 = SAMPIntegratedWebClient()
self.client2.connect(web_port=self.hub._web_port, pool_size=2)
self.client2_id = self.client2.get_public_id()
self.client2_key = self.client2.get_private_key()
def teardown_method(self):
if self.client1.is_connected:
self.client1.disconnect()
if self.client2.is_connected:
self.client2.disconnect()
self.hub.stop()
self.dialog.stop()
# The full communication tests are run since TestWebProfile inherits
# test_main from TestStandardProfile
def test_web_profile(self):
# Check some additional queries to the server
with get_readable_fileobj(
f"http://localhost:{self.hub._web_port}/crossdomain.xml"
) as f:
assert f.read() == CROSS_DOMAIN
with get_readable_fileobj(
f"http://localhost:{self.hub._web_port}/clientaccesspolicy.xml"
) as f:
assert f.read() == CLIENT_ACCESS_POLICY
# Check headers
req = Request(f"http://localhost:{self.hub._web_port}/crossdomain.xml")
req.add_header("Origin", "test_web_profile")
resp = urlopen(req)
assert resp.getheader("Access-Control-Allow-Origin") == "test_web_profile"
assert resp.getheader("Access-Control-Allow-Headers") == "Content-Type"
assert resp.getheader("Access-Control-Allow-Credentials") == "true"
|
ec4d79932300c9142f6e190aeacdf61f0a181b9c9939fd5410b0c9701e78b16e | import os
import pickle
import random
import string
import time
from astropy.samp import SAMP_STATUS_OK
TEST_REPLY = {"samp.status": SAMP_STATUS_OK, "samp.result": {"txt": "test"}}
def write_output(mtype, private_key, sender_id, params):
filename = params["verification_file"]
f = open(filename, "wb")
pickle.dump(mtype, f)
pickle.dump(private_key, f)
pickle.dump(sender_id, f)
pickle.dump(params, f)
f.close()
def assert_output(mtype, private_key, sender_id, params, timeout=None):
filename = params["verification_file"]
start = time.time()
while True:
try:
with open(filename, "rb") as f:
rec_mtype = pickle.load(f)
rec_private_key = pickle.load(f)
rec_sender_id = pickle.load(f)
rec_params = pickle.load(f)
break
except (OSError, EOFError):
if timeout is not None and time.time() - start > timeout:
raise Exception(f"Timeout while waiting for file: {filename}")
assert rec_mtype == mtype
assert rec_private_key == private_key
assert rec_sender_id == sender_id
assert rec_params == params
class Receiver:
def __init__(self, client):
self.client = client
def receive_notification(self, private_key, sender_id, mtype, params, extra):
write_output(mtype, private_key, sender_id, params)
def receive_call(self, private_key, sender_id, msg_id, mtype, params, extra):
# Here we need to make sure that we first reply, *then* write out the
# file, otherwise the tests see the file and move to the next call
# before waiting for the reply to be received.
self.client.reply(msg_id, TEST_REPLY)
self.receive_notification(private_key, sender_id, mtype, params, extra)
def receive_response(self, private_key, sender_id, msg_id, response):
pass
def random_id(length=16):
return "".join(random.sample(string.ascii_letters + string.digits, length))
def random_params(directory):
return {
"verification_file": os.path.join(directory, random_id()),
"parameter1": "abcde",
"parameter2": 1331,
}
|
e8bf92499740072ff646a2696e252f276c940046bf96e649a6c2ae1cd3a6fb57 | import threading
import time
import xmlrpc.client as xmlrpc
from astropy.samp.client import SAMPClient
from astropy.samp.errors import SAMPClientError, SAMPHubError
from astropy.samp.hub import WebProfileDialog
from astropy.samp.hub_proxy import SAMPHubProxy
from astropy.samp.integrated_client import SAMPIntegratedClient
from astropy.samp.utils import ServerProxyPool
class AlwaysApproveWebProfileDialog(WebProfileDialog):
def __init__(self):
self.polling = True
WebProfileDialog.__init__(self)
def show_dialog(self, *args):
self.consent()
def poll(self):
while self.polling:
self.handle_queue()
time.sleep(0.1)
def stop(self):
self.polling = False
class SAMPWebHubProxy(SAMPHubProxy):
"""
Proxy class to simplify the client interaction with a SAMP hub (via the web
profile).
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
"""
def connect(self, pool_size=20, web_port=21012):
"""
Connect to the current SAMP Hub on localhost:web_port
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self._connected = False
try:
self.proxy = ServerProxyPool(
pool_size,
xmlrpc.ServerProxy,
f"http://127.0.0.1:{web_port}",
allow_none=1,
)
self.ping()
self._connected = True
except xmlrpc.ProtocolError as p:
raise SAMPHubError(f"Protocol Error {p.errcode}: {p.errmsg}")
@property
def _samp_hub(self):
"""
Property to abstract away the path to the hub, which allows this class
to be used for both the standard and the web profile.
"""
return self.proxy.samp.webhub
def set_xmlrpc_callback(self, private_key, xmlrpc_addr):
raise NotImplementedError(
"set_xmlrpc_callback is not defined for the web profile"
)
def register(self, identity_info):
"""
Proxy to ``register`` SAMP Hub method.
"""
return self._samp_hub.register(identity_info)
def allow_reverse_callbacks(self, private_key, allow):
"""
Proxy to ``allowReverseCallbacks`` SAMP Hub method.
"""
return self._samp_hub.allowReverseCallbacks(private_key, allow)
def pull_callbacks(self, private_key, timeout):
"""
Proxy to ``pullCallbacks`` SAMP Hub method.
"""
return self._samp_hub.pullCallbacks(private_key, timeout)
class SAMPWebClient(SAMPClient):
"""
Utility class which provides facilities to create and manage a SAMP
compliant XML-RPC server that acts as SAMP callable web client application.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
Parameters
----------
hub : :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy`
An instance of :class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` to
be used for messaging with the SAMP Hub.
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, hub, name=None, description=None, metadata=None, callable=True):
# GENERAL
self._is_running = False
self._is_registered = False
if metadata is None:
metadata = {}
if name is not None:
metadata["samp.name"] = name
if description is not None:
metadata["samp.description.text"] = description
self._metadata = metadata
self._callable = callable
# HUB INTERACTION
self.client = None
self._public_id = None
self._private_key = None
self._hub_id = None
self._notification_bindings = {}
self._call_bindings = {
"samp.app.ping": [self._ping, {}],
"client.env.get": [self._client_env_get, {}],
}
self._response_bindings = {}
self.hub = hub
self._registration_lock = threading.Lock()
self._registered_event = threading.Event()
if self._callable:
self._thread = threading.Thread(target=self._serve_forever)
self._thread.daemon = True
def _serve_forever(self):
while self.is_running:
# Wait until we are actually registered before trying to do
# anything, to avoid busy looping
# Watch for callbacks here
self._registered_event.wait()
with self._registration_lock:
if not self._is_registered:
return
results = self.hub.pull_callbacks(self.get_private_key(), 0)
for result in results:
if result["samp.methodName"] == "receiveNotification":
self.receive_notification(
self._private_key, *result["samp.params"]
)
elif result["samp.methodName"] == "receiveCall":
self.receive_call(self._private_key, *result["samp.params"])
elif result["samp.methodName"] == "receiveResponse":
self.receive_response(self._private_key, *result["samp.params"])
self.hub.disconnect()
def register(self):
"""
Register the client to the SAMP Hub.
"""
if self.hub.is_connected:
if self._private_key is not None:
raise SAMPClientError("Client already registered")
result = self.hub.register("Astropy SAMP Web Client")
if result["samp.self-id"] == "":
raise SAMPClientError(
"Registation failed - samp.self-id was not set by the hub."
)
if result["samp.private-key"] == "":
raise SAMPClientError(
"Registation failed - samp.private-key was not set by the hub."
)
self._public_id = result["samp.self-id"]
self._private_key = result["samp.private-key"]
self._hub_id = result["samp.hub-id"]
if self._callable:
self._declare_subscriptions()
self.hub.allow_reverse_callbacks(self._private_key, True)
if self._metadata != {}:
self.declare_metadata()
self._is_registered = True
# Let the client thread proceed
self._registered_event.set()
else:
raise SAMPClientError(
"Unable to register to the SAMP Hub. Hub proxy not connected."
)
def unregister(self):
# We have to hold the registration lock if the client is callable
# to avoid a race condition where the client queries the hub for
# pushCallbacks after it has already been unregistered from the hub
with self._registration_lock:
super().unregister()
class SAMPIntegratedWebClient(SAMPIntegratedClient):
"""
A Simple SAMP web client.
In practice web clients should run from the browser, so this is provided as
a means of testing a hub's support for the web profile from Python.
This class is meant to simplify the client usage providing a proxy class
that merges the :class:`~astropy.samp.client.SAMPWebClient` and
:class:`~astropy.samp.hub_proxy.SAMPWebHubProxy` functionalities in a
simplified API.
Parameters
----------
name : str, optional
Client name (corresponding to ``samp.name`` metadata keyword).
description : str, optional
Client description (corresponding to ``samp.description.text`` metadata
keyword).
metadata : dict, optional
Client application metadata in the standard SAMP format.
callable : bool, optional
Whether the client can receive calls and notifications. If set to
`False`, then the client can send notifications and calls, but can not
receive any.
"""
def __init__(self, name=None, description=None, metadata=None, callable=True):
self.hub = SAMPWebHubProxy()
self.client = SAMPWebClient(self.hub, name, description, metadata, callable)
def connect(self, pool_size=20, web_port=21012):
"""
Connect with the current or specified SAMP Hub, start and register the
client.
Parameters
----------
pool_size : int, optional
The number of socket connections opened to communicate with the
Hub.
"""
self.hub.connect(pool_size, web_port=web_port)
self.client.start()
self.client.register()
|
cca3ae431e73dd2a1d3aca6673bffc579a3b9a056ab107efce918857c7c8c8dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import time
import pytest
from astropy.samp import conf
from astropy.samp.hub import SAMPHubServer
def setup_module(module):
conf.use_internet = False
def test_SAMPHubServer():
"""Test that SAMPHub can be instantiated"""
SAMPHubServer(web_profile=False, mode="multiple", pool_size=1)
def test_SAMPHubServer_run():
"""Test that SAMPHub can be run"""
hub = SAMPHubServer(web_profile=False, mode="multiple", pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
@pytest.mark.slow
def test_SAMPHubServer_run_repeated():
"""
Test that SAMPHub can be restarted after it has been stopped, including
when web profile support is enabled.
"""
hub = SAMPHubServer(web_profile=True, mode="multiple", pool_size=1)
hub.start()
time.sleep(1)
hub.stop()
time.sleep(1)
hub.start()
time.sleep(1)
hub.stop()
|
c4efa832a3d068620e10540f14fe6e71fbcba71c191c195ba7c1c65bf80ebb10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from urllib.parse import urlparse
from warnings import warn
import erfa
import numpy as np
from astropy import config as _config
from astropy import units as u
from astropy import utils
from astropy.table import MaskedColumn, QTable
from astropy.time import Time, TimeDelta
from astropy.utils.data import (
clear_download_cache,
get_pkg_data_filename,
get_readable_fileobj,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.state import ScienceState
__all__ = [
"Conf",
"conf",
"earth_orientation_table",
"IERS",
"IERS_B",
"IERS_A",
"IERS_Auto",
"FROM_IERS_B",
"FROM_IERS_A",
"FROM_IERS_A_PREDICTION",
"TIME_BEFORE_IERS_RANGE",
"TIME_BEYOND_IERS_RANGE",
"IERS_A_FILE",
"IERS_A_URL",
"IERS_A_URL_MIRROR",
"IERS_A_README",
"IERS_B_FILE",
"IERS_B_URL",
"IERS_B_README",
"IERSRangeError",
"IERSStaleWarning",
"IERSWarning",
"IERSDegradedAccuracyWarning",
"LeapSeconds",
"IERS_LEAP_SECOND_FILE",
"IERS_LEAP_SECOND_URL",
"IETF_LEAP_SECOND_URL",
]
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = "finals2000A.all"
IERS_A_URL = "https://datacenter.iers.org/data/9/finals2000A.all"
IERS_A_URL_MIRROR = "https://maia.usno.navy.mil/ser7/finals2000A.all"
IERS_A_README = get_pkg_data_filename("data/ReadMe.finals2000A")
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename("data/eopc04_IAU2000.62-now")
IERS_B_URL = "http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now"
IERS_B_README = get_pkg_data_filename("data/ReadMe.eopc04_IAU2000")
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename("data/Leap_Second.dat")
IERS_LEAP_SECOND_URL = "https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat"
IETF_LEAP_SECOND_URL = "https://www.ietf.org/timezones/data/leap-seconds.list"
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
class IERSWarning(AstropyWarning):
"""
Generic warning class for IERS.
"""
class IERSDegradedAccuracyWarning(AstropyWarning):
"""
IERS time conversion has degraded accuracy normally due to setting
``conf.auto_download = False`` and ``conf.iers_degraded_accuracy = 'warn'``.
"""
class IERSStaleWarning(IERSWarning):
"""
Downloaded IERS table may be stale.
"""
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault(
"http_headers",
{
"User-Agent": "astropy/iers",
"Accept": "*/*",
},
)
with utils.data.conf.set_temp("remote_timeout", conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return value if value is not None else np.finfo(float).max
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
"Enable auto-downloading of the latest IERS data. If set to False "
"then the local IERS-B file will be used by default (even if the "
"full IERS file with predictions was already downloaded and cached). "
"This parameter also controls whether internet resources will be "
"queried to update the leap second table if the installed version is "
"out of date. Default is True.",
)
auto_max_age = _config.ConfigItem(
30.0,
"Maximum age (days) of predictive data before auto-downloading. "
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
"Default is 30.",
)
iers_auto_url = _config.ConfigItem(
IERS_A_URL, "URL for auto-downloading IERS file data."
)
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR, "Mirror URL for auto-downloading IERS file data."
)
remote_timeout = _config.ConfigItem(
10.0, "Remote timeout downloading IERS file data (seconds)."
)
iers_degraded_accuracy = _config.ConfigItem(
["error", "warn", "ignore"],
"IERS behavior if the range of available IERS data does not "
"cover the times when converting time scales, potentially leading "
"to degraded accuracy.",
)
system_leap_second_file = _config.ConfigItem("", "System file with leap seconds.")
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL, "URL for auto-downloading leap seconds."
)
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL, "Alternate URL for auto-downloading leap seconds."
)
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.0):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO + mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0.0, return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["UT1_UTC"], self.ut1_utc_source if return_status else None
)
def dcip_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1,
jd2,
["dX_2000A", "dY_2000A"],
self.dcip_source if return_status else None,
)
def pm_xy(self, jd1, jd2=0.0, return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(
jd1, jd2, ["PM_x", "PM_y"], self.pm_source if return_status else None
)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
if conf.iers_degraded_accuracy == "error":
msg = (
"(some) times are outside of range covered by IERS table. Cannot"
" convert with full accuracy. To allow conversion with degraded"
" accuracy set astropy.utils.iers.conf.iers_degraded_accuracy to"
' "warn" or "silent". For more information about setting this'
" configuration parameter or controlling its value globally, see"
" the Astropy configuration system documentation"
" https://docs.astropy.org/en/stable/config/index.html."
)
raise IERSRangeError(msg)
elif conf.iers_degraded_accuracy == "warn":
# No IERS data covering the time(s) and user requested a warning.
msg = (
"(some) times are outside of range covered by IERS table, "
"accuracy is degraded."
)
warn(msg, IERSDegradedAccuracyWarning)
# No IERS data covering the time(s) and user is OK with no warning.
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, "__array__") or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self["MJD"].value, mjd, side="right")
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self["MJD"][i0].value, self["MJD"][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == "UT1_UTC":
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if getattr(col, "unit", None) is not None and isinstance(col, MaskedColumn):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a["UT1_UTC_A"]) & (iers_a["PolPMFlag_A"] != "")]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table["UT1_UTC_B"])
table["UT1_UTC"] = np.where(b_bad, table["UT1_UTC_A"], table["UT1_UTC_B"])
table["UT1Flag"] = np.where(b_bad, table["UT1Flag_A"], "B")
# Repeat for polar motions.
b_bad = np.isnan(table["PM_X_B"]) | np.isnan(table["PM_Y_B"])
table["PM_x"] = np.where(b_bad, table["PM_x_A"], table["PM_X_B"])
table["PM_y"] = np.where(b_bad, table["PM_y_A"], table["PM_Y_B"])
table["PolPMFlag"] = np.where(b_bad, table["PolPMFlag_A"], "B")
b_bad = np.isnan(table["dX_2000A_B"]) | np.isnan(table["dY_2000A_B"])
table["dX_2000A"] = np.where(b_bad, table["dX_2000A_A"], table["dX_2000A_B"])
table["dY_2000A"] = np.where(b_bad, table["dY_2000A_A"], table["dY_2000A_B"])
table["NutFlag"] = np.where(b_bad, table["NutFlag_A"], "B")
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(
np.searchsorted(table["UT1Flag_A"], "P"),
np.searchsorted(table["PolPMFlag_A"], "P"),
)
table.meta["predictive_index"] = p_index
table.meta["predictive_mjd"] = table["MJD"][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format="cds", readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self["UT1Flag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == "I"] = FROM_IERS_A
source[ut1flag == "P"] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self["NutFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == "I"] = FROM_IERS_A
source[nutflag == "P"] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self["PolPMFlag"][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == "I"] = FROM_IERS_A
source[pmflag == "P"] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format="cds", readme=readme, data_start=data_start)
table.meta["data_path"] = file
table.meta["readme_path"] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get("data_url") in all_urls:
return cls.iers_table
for url in all_urls:
try:
filename = download_file(url, cache=True)
except Exception as err:
warn(f"failed to download {url}: {err}", IERSWarning)
continue
try:
cls.iers_table = cls.read(file=filename)
except Exception as err:
warn(f"malformed IERS table from {url}: {err}", IERSWarning)
continue
cls.iers_table.meta["data_url"] = url
break
else:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream if actually trying to interpolate
# predictive values.
warn("unable to download valid IERS file, using local IERS-B", IERSWarning)
cls.iers_table = IERS_B.open()
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta["predictive_mjd"]
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (
max_input_mjd > predictive_mjd
and self.time_now.mjd - predictive_mjd > auto_max_age
):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta["predictive_index"]
predictive_mjd = self.meta["predictive_mjd"]
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError(
"IERS auto_max_age configuration value must be larger than 10 days"
)
if max_input_mjd > predictive_mjd and (now_mjd - predictive_mjd) > auto_max_age:
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(
AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\nA'
" coordinate or time-related calculation might be compromised"
" or fail because the dates are not covered by the available"
' IERS file. See the "IERS data access" section of the'
" astropy documentation for additional information on working"
" offline."
)
)
return
new_table = self.__class__.read(file=filename)
new_table.meta["data_url"] = str(all_urls[0])
# New table has new values?
if new_table["MJD"][-1] > self["MJD"][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(
new_table["MJD"].value, predictive_mjd, side="right"
)
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi : new_fpi + n_replace]
# Sanity check for continuity
if new_table["MJD"][new_fpi + n_replace] - self["MJD"][-1] != 1.0 * u.d:
raise ValueError("unexpected gap in MJD when refreshing IERS table")
# Now add new rows in place
for row in new_table[new_fpi + n_replace :]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(
IERSStaleWarning(
"IERS_Auto predictive values are older than"
f" {conf.auto_max_age} days but downloading the latest table"
" did not find newer values"
)
)
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table["MJD"][np.isfinite(table["UT1_UTC_B"])]
i0 = np.searchsorted(iers_b["MJD"], mjd_b[0], side="left")
i1 = np.searchsorted(iers_b["MJD"], mjd_b[-1], side="right")
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table["MJD"][:n_iers_b], iers_b["MJD"]):
raise ValueError(
"unexpected mismatch when copying IERS-B values into IERS-A table."
)
# Finally do the overwrite
table["UT1_UTC_B"][:n_iers_b] = iers_b["UT1_UTC"]
table["PM_X_B"][:n_iers_b] = iers_b["PM_x"]
table["PM_Y_B"][:n_iers_b] = iers_b["PM_y"]
table["dX_2000A_B"][:n_iers_b] = iers_b["dX_2000A"]
table["dY_2000A_B"][:n_iers_b] = iers_b["dY_2000A"]
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r"^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$")
_expires = None
_auto_open_files = [
"erfa",
IERS_LEAP_SECOND_FILE,
"system_leap_second_file",
"iers_leap_second_auto_url",
"ietf_leap_second_auto_url",
]
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == "erfa":
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = "{0.year:04d}-{0.month:02d}-{0.day:02d}".format(datetime.utcnow())
return Time(s, scale="tai", format="iso", out_subfmt="date")
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format="jd")
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [
(f, True) for f in files if not urlparse(f).netloc or is_url_in_cache(f)
]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta["data_url"] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError(
"none of the files could be read. The "
f"following errors were raised:\n {err_list}"
)
if self.expires < self._today() and conf.auto_max_age is not None:
warn("leap-second file is expired.", IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'"""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(
f"{year}-{month_nb:02d}-{day}", scale="tai", out_subfmt="date"
)
break
else:
raise ValueError(f"did not find expiration date in {file}")
self = cls.read(lines, format="ascii.no_header", **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=["mjd", "day", "month", "year", "tai_utc"]
)
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ["ntp_seconds", "tai_utc", "comment", "day", "month", "year"]
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file,
names=names,
include_names=names[:2],
converters={"ntp_seconds": [convert_numpy(np.int64)]},
)
self["mjd"] = (self["ntp_seconds"] / 86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self["mjd"], format="mjd", scale="tai").isot
ymd = np.array(
[[int(part) for part in t.partition("T")[0].split("-")] for t in isot]
)
self["year"], self["month"], self["day"] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time(
"{0.year:04d}-{0.month:02d}-{0.day:02d}".format(erfa.leap_seconds.expires),
scale="tai",
)
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == "empty":
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == "only":
return 0
return erfa.leap_seconds.update(self)
|
dda6338ae27c841d06145a53e5a0f08896dcbd683b4521bce44204f2ccc34f61 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A collection of functions for checking various XML-related strings for
standards compliance.
"""
import re
import urllib.parse
def check_id(ID):
"""
Returns `True` if *ID* is a valid XML ID.
"""
return re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID) is not None
def fix_id(ID):
"""
Given an arbitrary string, create one that can be used as an xml
id. This is rather simplistic at the moment, since it just
replaces non-valid characters with underscores.
"""
if re.match(r"^[A-Za-z_][A-Za-z0-9_\.\-]*$", ID):
return ID
if len(ID):
corrected = ID
if not len(corrected) or re.match("^[^A-Za-z_]$", corrected[0]):
corrected = "_" + corrected
corrected = re.sub(r"[^A-Za-z_]", "_", corrected[0]) + re.sub(
r"[^A-Za-z0-9_\.\-]", "_", corrected[1:]
)
return corrected
return ""
_token_regex = r"(?![\r\l\t ])[^\r\l\t]*(?![\r\l\t ])"
def check_token(token):
"""
Returns `True` if *token* is a valid XML token, as defined by XML
Schema Part 2.
"""
return (
token == ""
or re.match(r"[^\r\n\t ]?([^\r\n\t ]| [^\r\n\t ])*[^\r\n\t ]?$", token)
is not None
)
def check_mime_content_type(content_type):
"""
Returns `True` if *content_type* is a valid MIME content type
(syntactically at least), as defined by RFC 2045.
"""
ctrls = "".join(chr(x) for x in range(0, 0x20))
token_regex = f'[^()<>@,;:\\"/[\\]?= {ctrls}\x7f]+'
return (
re.match(rf"(?P<type>{token_regex})/(?P<subtype>{token_regex})$", content_type)
is not None
)
def check_anyuri(uri):
"""
Returns `True` if *uri* is a valid URI as defined in RFC 2396.
"""
if (
re.match(
(
r"(([a-zA-Z][0-9a-zA-Z+\-\.]*:)?/{0,2}[0-9a-zA-Z;"
+ r"/?:@&=+$\.\-_!~*'()%]+)?(#[0-9a-zA-Z;/?:@&=+$\.\-_!~*'()%]+)?"
),
uri,
)
is None
):
return False
try:
urllib.parse.urlparse(uri)
except Exception:
return False
return True
|
c76919241255ac89ccbc8a9095d4d0e30eb9483cdbd8a7633e26068601beaf0c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""URL unescaper functions."""
# STDLIB
from xml.sax import saxutils
__all__ = ["unescape_all"]
# This is DIY
_bytes_entities = {
b"&": b"&",
b"<": b"<",
b">": b">",
b"&&": b"&",
b"&&": b"&",
b"%2F": b"/",
}
_bytes_keys = [b"&&", b"&&", b"&", b"<", b">", b"%2F"]
# This is used by saxutils
_str_entities = {"&&": "&", "&&": "&", "%2F": "/"}
_str_keys = ["&&", "&&", "&", "<", ">", "%2F"]
def unescape_all(url):
"""Recursively unescape a given URL.
.. note:: '&&' becomes a single '&'.
Parameters
----------
url : str or bytes
URL to unescape.
Returns
-------
clean_url : str or bytes
Unescaped URL.
"""
if isinstance(url, bytes):
func2use = _unescape_bytes
keys2use = _bytes_keys
else:
func2use = _unescape_str
keys2use = _str_keys
clean_url = func2use(url)
not_done = [clean_url.count(key) > 0 for key in keys2use]
if True in not_done:
return unescape_all(clean_url)
else:
return clean_url
def _unescape_str(url):
return saxutils.unescape(url, _str_entities)
def _unescape_bytes(url):
clean_url = url
for key in _bytes_keys:
clean_url = clean_url.replace(key, _bytes_entities[key])
return clean_url
|
e6324c2ba4ae18676e4867091c67e99bbce2e0895387b7b7c1262c6754c3de6c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class that makes it simple to stream out well-formed and
nicely-indented XML.
"""
# STDLIB
import contextlib
import textwrap
try:
from . import _iterparser
except ImportError:
def xml_escape_cdata(s):
"""
Escapes &, < and > in an XML CDATA string.
"""
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def xml_escape(s):
"""
Escapes &, ', ", < and > in an XML attribute value.
"""
s = s.replace("&", "&")
s = s.replace("'", "'")
s = s.replace('"', """)
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
else:
xml_escape_cdata = _iterparser.escape_xml_cdata
xml_escape = _iterparser.escape_xml
class XMLWriter:
"""
A class to write well-formed and nicely indented XML.
Use like this::
w = XMLWriter(fh)
with w.tag('html'):
with w.tag('body'):
w.data('This is the content')
Which produces::
<html>
<body>
This is the content
</body>
</html>
"""
def __init__(self, file):
"""
Parameters
----------
file : writable file-like
"""
self.write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self._open = 0 # true if start tag is open
self._tags = []
self._data = []
self._indentation = " " * 64
self.xml_escape_cdata = xml_escape_cdata
self.xml_escape = xml_escape
def _flush(self, indent=True, wrap=False):
"""
Flush internal buffers.
"""
if self._open:
if indent:
self.write(">\n")
else:
self.write(">")
self._open = 0
if self._data:
data = "".join(self._data)
if wrap:
indent = self.get_indentation_spaces(1)
data = textwrap.fill(
data, initial_indent=indent, subsequent_indent=indent
)
self.write("\n")
self.write(self.xml_escape_cdata(data))
self.write("\n")
self.write(self.get_indentation_spaces())
else:
self.write(self.xml_escape_cdata(data))
self._data = []
def start(self, tag, attrib={}, **extra):
"""
Opens a new element. Attributes can be given as keyword
arguments, or as a string/string dictionary. The method
returns an opaque identifier that can be passed to the
:meth:`close` method, to close all open elements up to and
including this one.
Parameters
----------
tag : str
The element name
attrib : dict of str -> str
Attribute dictionary. Alternatively, attributes can
be given as keyword arguments.
Returns
-------
id : int
Returns an element identifier.
"""
self._flush()
# This is just busy work -- we know our tag names are clean
# tag = xml_escape_cdata(tag)
self._data = []
self._tags.append(tag)
self.write(self.get_indentation_spaces(-1))
self.write(f"<{tag}")
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = list(attrib.items())
attrib.sort()
for k, v in attrib:
if v is not None:
# This is just busy work -- we know our keys are clean
# k = xml_escape_cdata(k)
v = self.xml_escape(v)
self.write(f' {k}="{v}"')
self._open = 1
return len(self._tags)
@contextlib.contextmanager
def xml_cleaning_method(self, method="escape_xml", **clean_kwargs):
"""Context manager to control how XML data tags are cleaned (escaped) to
remove potentially unsafe characters or constructs.
The default (``method='escape_xml'``) applies brute-force escaping of
certain key XML characters like ``<``, ``>``, and ``&`` to ensure that
the output is not valid XML.
In order to explicitly allow certain XML tags (e.g. link reference or
emphasis tags), use ``method='bleach_clean'``. This sanitizes the data
string using the ``clean`` function of the
`bleach <https://bleach.readthedocs.io/en/latest/clean.html>`_ package.
Any additional keyword arguments will be passed directly to the
``clean`` function.
Finally, use ``method='none'`` to disable any sanitization. This should
be used sparingly.
Example::
w = writer.XMLWriter(ListWriter(lines))
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<a href="https://google.com">google.com</a>')
w.end()
Parameters
----------
method : str
Cleaning method. Allowed values are "escape_xml",
"bleach_clean", and "none".
**clean_kwargs : keyword args
Additional keyword args that are passed to the
bleach.clean() function.
"""
current_xml_escape_cdata = self.xml_escape_cdata
if method == "bleach_clean":
# NOTE: bleach is imported locally to avoid importing it when
# it is not nocessary
try:
import bleach
except ImportError:
raise ValueError(
"bleach package is required when HTML escaping is disabled.\n"
'Use "pip install bleach".'
)
if clean_kwargs is None:
clean_kwargs = {}
self.xml_escape_cdata = lambda x: bleach.clean(x, **clean_kwargs)
elif method == "none":
self.xml_escape_cdata = lambda x: x
elif method != "escape_xml":
raise ValueError(
'allowed values of method are "escape_xml", "bleach_clean", and "none"'
)
yield
self.xml_escape_cdata = current_xml_escape_cdata
@contextlib.contextmanager
def tag(self, tag, attrib={}, **extra):
"""
A convenience method for creating wrapper elements using the
``with`` statement.
Examples
--------
>>> with writer.tag('foo'): # doctest: +SKIP
... writer.element('bar')
... # </foo> is implicitly closed here
...
Parameters are the same as to `start`.
"""
self.start(tag, attrib, **extra)
yield
self.end(tag)
def comment(self, comment):
"""
Adds a comment to the output stream.
Parameters
----------
comment : str
Comment text, as a Unicode string.
"""
self._flush()
self.write(self.get_indentation_spaces())
self.write(f"<!-- {self.xml_escape_cdata(comment)} -->\n")
def data(self, text):
"""
Adds character data to the output stream.
Parameters
----------
text : str
Character data, as a Unicode string.
"""
self._data.append(text)
def end(self, tag=None, indent=True, wrap=False):
"""
Closes the current element (opened by the most recent call to
`start`).
Parameters
----------
tag : str
Element name. If given, the tag must match the start tag.
If omitted, the current element is closed.
"""
if tag:
if not self._tags:
raise ValueError(f"unbalanced end({tag})")
if tag != self._tags[-1]:
raise ValueError(f"expected end({self._tags[-1]}), got {tag}")
else:
if not self._tags:
raise ValueError("unbalanced end()")
tag = self._tags.pop()
if self._data:
self._flush(indent, wrap)
elif self._open:
self._open = 0
self.write("/>\n")
return
if indent:
self.write(self.get_indentation_spaces())
self.write(f"</{tag}>\n")
def close(self, id):
"""
Closes open elements, up to (and including) the element identified
by the given identifier.
Parameters
----------
id : int
Element identifier, as returned by the `start` method.
"""
while len(self._tags) > id:
self.end()
def element(self, tag, text=None, wrap=False, attrib={}, **extra):
"""
Adds an entire element. This is the same as calling `start`,
`data`, and `end` in sequence. The ``text`` argument
can be omitted.
"""
self.start(tag, attrib, **extra)
if text:
self.data(text)
self.end(indent=False, wrap=wrap)
def flush(self):
pass # replaced by the constructor
def get_indentation(self):
"""
Returns the number of indentation levels the file is currently
in.
"""
return len(self._tags)
def get_indentation_spaces(self, offset=0):
"""
Returns a string of spaces that matches the current
indentation level.
"""
return self._indentation[: len(self._tags) + offset]
@staticmethod
def object_attrs(obj, attrs):
"""
Converts an object with a bunch of attributes on an object
into a dictionary for use by the `XMLWriter`.
Parameters
----------
obj : object
Any Python object
attrs : sequence of str
Attribute names to pull from the object
Returns
-------
attrs : dict
Maps attribute names to the values retrieved from
``obj.attr``. If any of the attributes is `None`, it will
not appear in the output dictionary.
"""
d = {}
for attr in attrs:
if getattr(obj, attr) is not None:
d[attr.replace("_", "-")] = str(getattr(obj, attr))
return d
|
5c07b23e9376c212c3314028378e4f2ce55350a557007880cf15ce1a7740ba98 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import sys
from collections import defaultdict
from os.path import join
from setuptools import Extension
from extension_helpers import pkg_config
def get_extensions(build_type="release"):
XML_DIR = "astropy/utils/xml/src"
cfg = defaultdict(list)
cfg["sources"] = [join(XML_DIR, "iterparse.c")]
if int(os.environ.get("ASTROPY_USE_SYSTEM_EXPAT", 0)) or int(
os.environ.get("ASTROPY_USE_SYSTEM_ALL", 0)
):
for k, v in pkg_config(["expat"], ["expat"]).items():
cfg[k].extend(v)
else:
EXPAT_DIR = "cextern/expat/lib"
cfg["sources"].extend(
[
join(EXPAT_DIR, fn)
for fn in ["xmlparse.c", "xmlrole.c", "xmltok.c", "xmltok_impl.c"]
]
)
cfg["include_dirs"].extend([XML_DIR, EXPAT_DIR])
if sys.platform.startswith("linux"):
# This is to ensure we only export the Python entry point
# symbols and the linker won't try to use the system expat in
# place of ours.
cfg["extra_link_args"].extend(
[f"-Wl,--version-script={join(XML_DIR, 'iterparse.map')}"]
)
cfg["define_macros"].append(("HAVE_EXPAT_CONFIG_H", 1))
if sys.byteorder == "big":
cfg["define_macros"].append(("BYTEORDER", "4321"))
else:
cfg["define_macros"].append(("BYTEORDER", "1234"))
if sys.platform != "win32":
cfg["define_macros"].append(("HAVE_UNISTD_H", None))
return [Extension("astropy.utils.xml._iterparser", **cfg)]
|
9170e3658f0ab883f0ebbfe122a75f4cc104ebe2d3957bc460ac6240b0844b38 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes a fast iterator-based XML parser.
"""
# STDLIB
import contextlib
import io
import sys
# ASTROPY
from astropy.utils import data
__all__ = ["get_xml_iterator", "get_xml_encoding", "xml_readlines"]
@contextlib.contextmanager
def _convert_to_fd_or_read_function(fd):
"""
Returns a function suitable for streaming input, or a file object.
This function is only useful if passing off to C code where:
- If it's a real file object, we want to use it as a real
C file object to avoid the Python overhead.
- If it's not a real file object, it's much handier to just
have a Python function to call.
This is somewhat quirky behavior, of course, which is why it is
private. For a more useful version of similar behavior, see
`astropy.utils.misc.get_readable_fileobj`.
Parameters
----------
fd : object
May be:
- a file object. If the file is uncompressed, this raw
file object is returned verbatim. Otherwise, the read
method is returned.
- a function that reads from a stream, in which case it is
returned verbatim.
- a file path, in which case it is opened. Again, like a
file object, if it's uncompressed, a raw file object is
returned, otherwise its read method.
- an object with a :meth:`read` method, in which case that
method is returned.
Returns
-------
fd : context-dependent
See above.
"""
if callable(fd):
yield fd
return
with data.get_readable_fileobj(fd, encoding="binary") as new_fd:
if sys.platform.startswith("win"):
yield new_fd.read
else:
if isinstance(new_fd, io.FileIO):
yield new_fd
else:
yield new_fd.read
def _fast_iterparse(fd, buffersize=2**10):
from xml.parsers import expat
if not callable(fd):
read = fd.read
else:
read = fd
queue = []
text = []
def start(name, attr):
queue.append(
(True, name, attr, (parser.CurrentLineNumber, parser.CurrentColumnNumber))
)
del text[:]
def end(name):
queue.append(
(
False,
name,
"".join(text).strip(),
(parser.CurrentLineNumber, parser.CurrentColumnNumber),
)
)
parser = expat.ParserCreate()
parser.specified_attributes = True
parser.StartElementHandler = start
parser.EndElementHandler = end
parser.CharacterDataHandler = text.append
Parse = parser.Parse
data = read(buffersize)
while data:
Parse(data, False)
yield from queue
del queue[:]
data = read(buffersize)
Parse("", True)
yield from queue
# Try to import the C version of the iterparser, otherwise fall back
# to the Python implementation above.
_slow_iterparse = _fast_iterparse
try:
from . import _iterparser
_fast_iterparse = _iterparser.IterParser
except ImportError:
pass
@contextlib.contextmanager
def get_xml_iterator(source, _debug_python_based_parser=False):
"""
Returns an iterator over the elements of an XML file.
The iterator doesn't ever build a tree, so it is much more memory
and time efficient than the alternative in ``cElementTree``.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
parts : iterator
The iterator returns 4-tuples (*start*, *tag*, *data*, *pos*):
- *start*: when `True` is a start element event, otherwise
an end element event.
- *tag*: The name of the element
- *data*: Depends on the value of *event*:
- if *start* == `True`, data is a dictionary of
attributes
- if *start* == `False`, data is a string containing
the text content of the element
- *pos*: Tuple (*line*, *col*) indicating the source of the
event.
"""
with _convert_to_fd_or_read_function(source) as fd:
if _debug_python_based_parser:
context = _slow_iterparse(fd)
else:
context = _fast_iterparse(fd)
yield iter(context)
def get_xml_encoding(source):
"""
Determine the encoding of an XML file by reading its header.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
encoding : str
"""
with get_xml_iterator(source) as iterator:
start, tag, data, pos = next(iterator)
if not start or tag != "xml":
raise OSError("Invalid XML file")
# The XML spec says that no encoding === utf-8
return data.get("encoding") or "utf-8"
def xml_readlines(source):
"""
Get the lines from a given XML file. Correctly determines the
encoding and always returns unicode.
Parameters
----------
source : path-like, readable file-like, or callable
Handle that contains the data or function that reads it.
If a function or callable object, it must directly read from a stream.
Non-callable objects must define a ``read`` method.
Returns
-------
lines : list of unicode
"""
encoding = get_xml_encoding(source)
with data.get_readable_fileobj(source, encoding=encoding) as input:
input.seek(0)
xml_lines = input.readlines()
return xml_lines
|
5c80d94158a8a54f0e31e0813e10fd1adbc5a4b2f65e23cec4ce02f0aca51eee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to do XML schema and DTD validation. At the moment, this
makes a subprocess call to xmllint. This could use a Python-based
library at some point in the future, if something appropriate could be
found.
"""
import os
import subprocess
def validate_schema(filename, schema_file):
"""
Validates an XML file against a schema or DTD.
Parameters
----------
filename : str
The path to the XML file to validate
schema_file : str
The path to the XML schema or DTD
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
base, ext = os.path.splitext(schema_file)
if ext == ".xsd":
schema_part = "--schema"
elif ext == ".dtd":
schema_part = "--dtdvalid"
else:
raise TypeError("schema_file must be a path to an XML Schema or DTD")
p = subprocess.Popen(
["xmllint", "--noout", "--nonet", schema_part, schema_file, filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = p.communicate()
if p.returncode == 127:
raise OSError("xmllint not found, so can not validate schema")
elif p.returncode < 0:
from astropy.utils.misc import signal_number_to_name
raise OSError(
"xmllint was terminated by signal '{}'".format(
signal_number_to_name(-p.returncode)
)
)
return p.returncode, stdout, stderr
|
8bca71b0e0352a2f5a9254e902de695da13e19441655f4343aca99c8b26ef348 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import locale
import os
import urllib.error
from datetime import datetime
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils import data, misc
def test_isiterable():
assert misc.isiterable(2) is False
assert misc.isiterable([2]) is True
assert misc.isiterable([1, 2, 3]) is True
assert misc.isiterable(np.array(2)) is False
assert misc.isiterable(np.array([1, 2, 3])) is True
def test_signal_number_to_name_no_failure():
# Regression test for #5340: ensure signal_number_to_name throws no
# AttributeError (it used ".iteritems()" which was removed in Python3).
misc.signal_number_to_name(0)
@pytest.mark.remote_data
def test_api_lookup():
try:
strurl = misc.find_api_page("astropy.utils.misc", "dev", False, timeout=5)
objurl = misc.find_api_page(misc, "dev", False, timeout=5)
except urllib.error.URLError:
if os.environ.get("CI", False):
pytest.xfail("Timed out in CI")
else:
raise
assert strurl == objurl
assert (
strurl
== "http://devdocs.astropy.org/utils/index.html#module-astropy.utils.misc"
)
# Try a non-dev version
objurl = misc.find_api_page(misc, "v3.2.1", False, timeout=3)
assert (
objurl
== "https://docs.astropy.org/en/v3.2.1/utils/index.html#module-astropy.utils.misc"
)
def test_skip_hidden():
path = data.get_pkg_data_path("data")
for root, dirs, files in os.walk(path):
assert ".hidden_file.txt" in files
assert "local.dat" in files
# break after the first level since the data dir contains some other
# subdirectories that don't have these files
break
for root, dirs, files in misc.walk_skip_hidden(path):
assert ".hidden_file.txt" not in files
assert "local.dat" in files
break
def test_JsonCustomEncoder():
from astropy import units as u
assert json.dumps(np.arange(3), cls=misc.JsonCustomEncoder) == "[0, 1, 2]"
assert json.dumps(1 + 2j, cls=misc.JsonCustomEncoder) == "[1.0, 2.0]"
assert json.dumps({1, 2, 1}, cls=misc.JsonCustomEncoder) == "[1, 2]"
assert (
json.dumps(b"hello world \xc3\x85", cls=misc.JsonCustomEncoder)
== '"hello world \\u00c5"'
)
assert json.dumps({1: 2}, cls=misc.JsonCustomEncoder) == '{"1": 2}' # default
assert json.dumps({1: u.m}, cls=misc.JsonCustomEncoder) == '{"1": "m"}'
# Quantities
tmp = json.dumps({"a": 5 * u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp)
tmpd = {"a": {"unit": "cm", "value": 5.0}}
assert newd == tmpd
tmp2 = json.dumps({"a": np.arange(2) * u.cm}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp2)
tmpd = {"a": {"unit": "cm", "value": [0.0, 1.0]}}
assert newd == tmpd
tmp3 = json.dumps({"a": np.arange(2) * u.erg / u.s}, cls=misc.JsonCustomEncoder)
newd = json.loads(tmp3)
tmpd = {"a": {"unit": "erg / s", "value": [0.0, 1.0]}}
assert newd == tmpd
def test_JsonCustomEncoder_FITS_rec_from_files():
with fits.open(
fits.util.get_testdata_filepath("variable_length_table.fits")
) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== "[[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]"
)
with fits.open(fits.util.get_testdata_filepath("btable.fits")) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== '[[1, "Sirius", -1.4500000476837158, "A1V"], '
'[2, "Canopus", -0.7300000190734863, "F0Ib"], '
'[3, "Rigil Kent", -0.10000000149011612, "G2V"]]'
)
with fits.open(fits.util.get_testdata_filepath("table.fits")) as hdul:
assert (
json.dumps(hdul[1].data, cls=misc.JsonCustomEncoder)
== '[["NGC1001", 11.100000381469727], '
'["NGC1002", 12.300000190734863], '
'["NGC1003", 15.199999809265137]]'
)
def test_set_locale():
# First, test if the required locales are available
current = locale.setlocale(locale.LC_ALL)
try:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
locale.setlocale(locale.LC_ALL, "fr_FR.utf8")
except locale.Error as e:
pytest.skip(f"Locale error: {e}")
finally:
locale.setlocale(locale.LC_ALL, current)
date = datetime(2000, 10, 1, 0, 0, 0)
day_mon = date.strftime("%a, %b")
with misc._set_locale("en_US.utf8"):
assert date.strftime("%a, %b") == "Sun, Oct"
with misc._set_locale("fr_FR.utf8"):
assert date.strftime("%a, %b") == "dim., oct."
# Back to original
assert date.strftime("%a, %b") == day_mon
with misc._set_locale(current):
assert date.strftime("%a, %b") == day_mon
def test_dtype_bytes_or_chars():
assert misc.dtype_bytes_or_chars(np.dtype(np.float64)) == 8
assert misc.dtype_bytes_or_chars(np.dtype(object)) is None
assert misc.dtype_bytes_or_chars(np.dtype(np.int32)) == 4
assert misc.dtype_bytes_or_chars(np.array(b"12345").dtype) == 5
assert misc.dtype_bytes_or_chars(np.array("12345").dtype) == 5
|
cd6ead2e0885352120473bbd4f36386664fd8036a214a190e7a1f7843cfb1fda | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_BLEACH
from astropy.utils.xml import check, unescaper, writer
def test_writer():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with w.tag("html"):
with w.tag("body"):
w.data("This is the content")
w.comment("comment")
value = "".join(fh.getvalue().split())
assert value == "<html><body>Thisisthecontent<!--comment--></body></html>"
def test_check_id():
assert check.check_id("Fof32")
assert check.check_id("_Fof32")
assert not check.check_id("32Fof")
def test_fix_id():
assert check.fix_id("Fof32") == "Fof32"
assert check.fix_id("@#f") == "___f"
def test_check_token():
assert check.check_token("token")
assert not check.check_token("token\rtoken")
def test_check_mime_content_type():
assert check.check_mime_content_type("image/jpeg")
assert not check.check_mime_content_type("image")
def test_check_anyuri():
assert check.check_anyuri("https://github.com/astropy/astropy")
def test_unescape_all():
# str
url_in = (
"http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?"
"DSACAT=IDR&amp;DSATAB=Emitters&amp;"
)
url_out = (
"http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?DSACAT=IDR&DSATAB=Emitters&"
)
assert unescaper.unescape_all(url_in) == url_out
# bytes
url_in = (
b"http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?"
b"DSACAT=IDR&amp;DSATAB=Emitters&amp;"
)
url_out = (
b"http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?DSACAT=IDR&DSATAB=Emitters&"
)
assert unescaper.unescape_all(url_in) == url_out
def test_escape_xml():
s = writer.xml_escape("This & That")
assert type(s) == str
assert s == "This & That"
s = writer.xml_escape(1)
assert type(s) == str
assert s == "1"
s = writer.xml_escape(b"This & That")
assert type(s) == bytes
assert s == b"This & That"
@pytest.mark.skipif(HAS_BLEACH, reason="bleach is installed")
def test_escape_xml_without_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with pytest.raises(
ValueError, match=r"bleach package is required when HTML escaping is disabled"
):
with w.xml_cleaning_method("bleach_clean"):
pass
@pytest.mark.skipif(not HAS_BLEACH, reason="requires bleach")
def test_escape_xml_with_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Turn off XML escaping, but still sanitize unsafe tags like <script>
with w.xml_cleaning_method("bleach_clean"):
w.start("td")
w.data("<script>x</script> <em>OK</em>")
w.end(indent=False)
assert fh.getvalue() == "<td><script>x</script> <em>OK</em></td>\n"
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Default is True (all XML tags escaped)
with w.xml_cleaning_method():
w.start("td")
w.data("<script>x</script> <em>OK</em>")
w.end(indent=False)
assert (
fh.getvalue()
== "<td><script>x</script> <em>OK</em></td>\n"
)
|
06e8d3f490ac0bd0043f7dc4404e494bdf767a33d7d6c7dd8e3067e8317428ac | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.utils.shapes import check_broadcast, unbroadcast
def test_check_broadcast():
assert check_broadcast((10, 1), (3,)) == (10, 3)
assert check_broadcast((10, 1), (3,), (4, 1, 1, 3)) == (4, 1, 10, 3)
with pytest.raises(ValueError):
check_broadcast((10, 2), (3,))
with pytest.raises(ValueError):
check_broadcast((10, 1), (3,), (4, 1, 2, 3))
def test_unbroadcast():
x = np.array([1, 2, 3])
y = np.broadcast_to(x, (2, 4, 3))
z = unbroadcast(y)
assert z.shape == (3,)
np.testing.assert_equal(z, x)
x = np.ones((3, 5))
y = np.broadcast_to(x, (5, 3, 5))
z = unbroadcast(y)
assert z.shape == (3, 5)
|
5ce1efac2ee665c2407e3e0c06ad3eb0fedf247c69d40084aef4782fb310ad1d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Some might be indirectly tested already in ``astropy.io.fits.tests``.
"""
import io
import numpy as np
import pytest
from astropy.table import Table
from astropy.utils.diff import diff_values, report_diff_values, where_not_allclose
@pytest.mark.parametrize("a", [np.nan, np.inf, 1.11, 1, "a"])
def test_diff_values_false(a):
assert not diff_values(a, a)
@pytest.mark.parametrize(
("a", "b"), [(np.inf, np.nan), (1.11, 1.1), (1, 2), (1, "a"), ("a", "b")]
)
def test_diff_values_true(a, b):
assert diff_values(a, b)
def test_float_comparison():
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/21
"""
f = io.StringIO()
a = np.float32(0.029751372)
b = np.float32(0.029751368)
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
# This test doesn't care about what the exact output is, just that it
# did show a difference in their text representations
assert "a>" in out
assert "b>" in out
def test_diff_types():
"""
Regression test for https://github.com/astropy/astropy/issues/4122
"""
f = io.StringIO()
a = 1.0
b = "1.0"
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
# fmt: off
assert out == (
" (float) a> 1.0\n"
" (str) b> '1.0'\n"
" ? + +\n"
)
# fmt: on
def test_diff_numeric_scalar_types():
"""Test comparison of different numeric scalar types."""
f = io.StringIO()
assert not report_diff_values(1.0, 1, fileobj=f)
out = f.getvalue()
assert out == " (float) a> 1.0\n (int) b> 1\n"
def test_array_comparison():
"""
Test diff-ing two arrays.
"""
f = io.StringIO()
a = np.arange(9).reshape(3, 3)
b = a + 1
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert (
out == " at [0, 0]:\n"
" a> 0\n"
" b> 1\n"
" at [0, 1]:\n"
" a> 1\n"
" b> 2\n"
" at [0, 2]:\n"
" a> 2\n"
" b> 3\n"
" ...and at 6 more indices.\n"
)
def test_diff_shaped_array_comparison():
"""
Test diff-ing two differently shaped arrays.
"""
f = io.StringIO()
a = np.empty((1, 2, 3))
identical = report_diff_values(a, a[0], fileobj=f)
assert not identical
out = f.getvalue()
assert (
out
== " Different array shapes:\n a> (1, 2, 3)\n ? ---\n b> (2, 3)\n"
)
def test_tablediff():
"""
Test diff-ing two simple Table objects.
"""
a = Table.read(
"""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.0
M82 2012-10-29 16.2 15.2
M101 2012-10-31 15.1 15.5""",
format="ascii",
)
b = Table.read(
"""name obs_date mag_b mag_v
M31 2012-01-02 17.0 16.5
M82 2012-10-29 16.2 15.2
M101 2012-10-30 15.1 15.5
NEW 2018-05-08 nan 9.0""",
format="ascii",
)
f = io.StringIO()
identical = report_diff_values(a, b, fileobj=f)
assert not identical
out = f.getvalue()
assert (
out == " name obs_date mag_b mag_v\n"
" ---- ---------- ----- -----\n"
" a> M31 2012-01-02 17.0 16.0\n"
" ? ^\n"
" b> M31 2012-01-02 17.0 16.5\n"
" ? ^\n"
" M82 2012-10-29 16.2 15.2\n"
" a> M101 2012-10-31 15.1 15.5\n"
" ? ^\n"
" b> M101 2012-10-30 15.1 15.5\n"
" ? ^\n"
" b> NEW 2018-05-08 nan 9.0\n"
)
# Identical
assert report_diff_values(a, a, fileobj=f)
@pytest.mark.parametrize("kwargs", [{}, {"atol": 0, "rtol": 0}])
def test_where_not_allclose(kwargs):
a = np.array([1, np.nan, np.inf, 4.5])
b = np.array([1, np.inf, np.nan, 4.6])
assert where_not_allclose(a, b, **kwargs) == ([3],)
assert len(where_not_allclose(a, a, **kwargs)[0]) == 0
|
fd43db8bf82cdc9c8119ab18a14be629c68f0e6ec27b50932e45858e2c3274bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import secrets
from textwrap import dedent
import pytest
from astropy.utils.parsing import _TAB_HEADER
def _docstring_canary():
"""Docstring that's here just to check for -OO."""
@pytest.mark.skipif(not _docstring_canary.__doc__, reason="Test cannot be run with -OO")
def test_generate_parser(tmp_path, monkeypatch):
# Write Python code into the temporary directory, so that the
# generated tables will also go into the temporary directory.
# We use a unique suffix so that the test can be run multiple times
# without weirdness due to module caching.
suffix = secrets.token_hex(16)
lexer_file = tmp_path / f"test_parsing_lexer_{suffix}.py"
lexer_file.write_text(
dedent(
rf"""
from astropy.utils.parsing import lex
def make_lexer():
tokens = ('NUMBER', 'PLUS')
t_PLUS = r'\+'
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
return lex('test_parsing_lextab_{suffix}', 'test_parsing_lexer_{suffix}')
"""
)
)
parser_file = tmp_path / f"test_parsing_parser_{suffix}.py"
parser_file.write_text(
dedent(
rf"""
from astropy.utils.parsing import yacc
def make_parser():
tokens = ('NUMBER', 'PLUS')
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_plus(p):
'expression : expression PLUS NUMBER'
p[0] = p[1] + p[3]
return yacc('test_parsing_parsetab_{suffix}', 'test_parsing_parser_{suffix}')
"""
)
)
monkeypatch.syspath_prepend(tmp_path)
lexer_mod = importlib.import_module(f"test_parsing_lexer_{suffix}")
lexer = lexer_mod.make_lexer()
parser_mod = importlib.import_module(f"test_parsing_parser_{suffix}")
parser = parser_mod.make_parser()
result = parser.parse("1+2+3", lexer=lexer)
assert result == 6
lextab = (tmp_path / f"test_parsing_lextab_{suffix}.py").read_text()
assert lextab.startswith(_TAB_HEADER.format(package=f"test_parsing_lexer_{suffix}"))
parsetab = (tmp_path / f"test_parsing_parsetab_{suffix}.py").read_text()
assert parsetab.startswith(
_TAB_HEADER.format(package=f"test_parsing_parser_{suffix}")
)
|
d4ece921cdd89a4871f02a0836dde4c41fe7e7ac95441374859a4e533bcd0897 | import abc
from collections import OrderedDict
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils import metadata
from astropy.utils.metadata import (
MergeConflictError,
MetaData,
common_dtype,
enable_merge_strategies,
merge,
)
class OrderedDictSubclass(OrderedDict):
pass
class MetaBaseTest:
__metaclass__ = abc.ABCMeta
def test_none(self):
d = self.test_class(*self.args)
assert isinstance(d.meta, OrderedDict)
assert len(d.meta) == 0
@pytest.mark.parametrize(
"meta",
([dict([("a", 1)]), OrderedDict([("a", 1)]), OrderedDictSubclass([("a", 1)])]),
)
def test_mapping_init(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta["a"] == 1
@pytest.mark.parametrize("meta", (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_init(self, meta):
with pytest.raises(TypeError):
self.test_class(*self.args, meta=meta)
@pytest.mark.parametrize(
"meta",
([dict([("a", 1)]), OrderedDict([("a", 1)]), OrderedDictSubclass([("a", 1)])]),
)
def test_mapping_set(self, meta):
d = self.test_class(*self.args, meta=meta)
assert type(d.meta) == type(meta)
assert d.meta["a"] == 1
@pytest.mark.parametrize("meta", (["ceci n'est pas un meta", 1.2, [1, 2, 3]]))
def test_non_mapping_set(self, meta):
with pytest.raises(TypeError):
d = self.test_class(*self.args, meta=meta)
def test_meta_fits_header(self):
header = fits.header.Header()
header.set("observer", "Edwin Hubble")
header.set("exptime", "3600")
d = self.test_class(*self.args, meta=header)
assert d.meta["OBSERVER"] == "Edwin Hubble"
class ExampleData:
meta = MetaData()
def __init__(self, meta=None):
self.meta = meta
class TestMetaExampleData(MetaBaseTest):
test_class = ExampleData
args = ()
def test_metadata_merging_conflict_exception():
"""Regression test for issue #3294.
Ensure that an exception is raised when a metadata conflict exists
and ``metadata_conflicts='error'`` has been set.
"""
data1 = ExampleData()
data2 = ExampleData()
data1.meta["somekey"] = {"x": 1, "y": 1}
data2.meta["somekey"] = {"x": 1, "y": 999}
with pytest.raises(MergeConflictError):
merge(data1.meta, data2.meta, metadata_conflicts="error")
def test_metadata_merging():
# Recursive merge
meta1 = {
"k1": {
"k1": [1, 2],
"k2": 2,
},
"k2": 2,
"k4": (1, 2),
}
meta2 = {
"k1": {"k1": [3]},
"k3": 3,
"k4": (3,),
}
out = merge(meta1, meta2, metadata_conflicts="error")
assert out == {
"k1": {
"k2": 2,
"k1": [1, 2, 3],
},
"k2": 2,
"k3": 3,
"k4": (1, 2, 3),
}
# Merge two ndarrays
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array([3])}
out = merge(meta1, meta2, metadata_conflicts="error")
assert np.all(out["k1"] == np.array([1, 2, 3]))
# Merge list and np.ndarray
meta1 = {"k1": [1, 2]}
meta2 = {"k1": np.array([3])}
assert np.all(out["k1"] == np.array([1, 2, 3]))
# Can't merge two scalar types
meta1 = {"k1": 1}
meta2 = {"k1": 2}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting shape
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array([[3]])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting array type
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array(["3"])}
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Conflicting array type with 'silent' merging
meta1 = {"k1": np.array([1, 2])}
meta2 = {"k1": np.array(["3"])}
out = merge(meta1, meta2, metadata_conflicts="silent")
assert np.all(out["k1"] == np.array(["3"]))
def test_metadata_merging_new_strategy():
original_merge_strategies = list(metadata.MERGE_STRATEGIES)
class MergeNumbersAsList(metadata.MergeStrategy):
"""
Scalar float or int values are joined in a list.
"""
types = ((int, float), (int, float))
@classmethod
def merge(cls, left, right):
return [left, right]
class MergeConcatStrings(metadata.MergePlus):
"""
Scalar string values are concatenated
"""
types = (str, str)
enabled = False
# Normally can't merge two scalar types
meta1 = {"k1": 1, "k2": "a"}
meta2 = {"k1": 2, "k2": "b"}
# Enable new merge strategy
with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts="error")
assert out["k1"] == [1, 2]
assert out["k2"] == "ab"
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
# Confirm the default enabled=False behavior
with pytest.raises(MergeConflictError):
merge(meta1, meta2, metadata_conflicts="error")
# Enable all MergeStrategy subclasses
with enable_merge_strategies(metadata.MergeStrategy):
assert MergeNumbersAsList.enabled
assert MergeConcatStrings.enabled
out = merge(meta1, meta2, metadata_conflicts="error")
assert out["k1"] == [1, 2]
assert out["k2"] == "ab"
assert not MergeNumbersAsList.enabled
assert not MergeConcatStrings.enabled
metadata.MERGE_STRATEGIES = original_merge_strategies
def test_common_dtype_string():
u3 = np.array(["123"])
u4 = np.array(["1234"])
b3 = np.array([b"123"])
b5 = np.array([b"12345"])
assert common_dtype([u3, u4]).endswith("U4")
assert common_dtype([b5, u4]).endswith("U5")
assert common_dtype([b3, b5]).endswith("S5")
def test_common_dtype_basic():
i8 = np.array(1, dtype=np.int64)
f8 = np.array(1, dtype=np.float64)
u3 = np.array("123")
with pytest.raises(MergeConflictError):
common_dtype([i8, u3])
assert common_dtype([i8, i8]).endswith("i8")
assert common_dtype([i8, f8]).endswith("f8")
|
70c05cd94e59bb455997e96eb69e5e44a39f983d8a6a80bc098d5fbe023a6d9d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.table import QTable
from astropy.table.index import SlicedIndex
from astropy.time import Time
from astropy.utils.data_info import dtype_info_name
STRING_TYPE_NAMES = {(True, "S"): "bytes", (True, "U"): "str"}
DTYPE_TESTS = (
(np.array(b"abcd").dtype, STRING_TYPE_NAMES[(True, "S")] + "4"),
(np.array("abcd").dtype, STRING_TYPE_NAMES[(True, "U")] + "4"),
("S4", STRING_TYPE_NAMES[(True, "S")] + "4"),
("U4", STRING_TYPE_NAMES[(True, "U")] + "4"),
(np.void, "void"),
(np.int32, "int32"),
(bool, "bool"),
(float, "float64"),
("<f4", "float32"),
("u8", "uint64"),
("c16", "complex128"),
("object", "object"),
)
@pytest.mark.parametrize("input,output", DTYPE_TESTS)
def test_dtype_info_name(input, output):
"""
Test that dtype_info_name is giving the expected output
Here the available types::
'b' boolean
'i' (signed) integer
'u' unsigned integer
'f' floating-point
'c' complex-floating point
'O' (Python) objects
'S', 'a' (byte-)string
'U' Unicode
'V' raw data (void)
"""
assert dtype_info_name(input) == output
def test_info_no_copy_numpy():
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
col = [1, 2]
t = QTable([col], names=["col"])
t.add_index("col")
val = t["col"][0]
# Returns a numpy scalar (e.g. np.float64) with no .info
assert isinstance(val, np.number)
with pytest.raises(AttributeError):
val.info
val = t["col"][:]
assert val.info.indices == []
cols = [[1, 2] * u.m, Time([1, 2], format="cxcsec")]
@pytest.mark.parametrize("col", cols)
def test_info_no_copy_mixin_with_index(col):
"""Test that getting a single item from Table column object does not copy info.
See #10889.
"""
t = QTable([col], names=["col"])
t.add_index("col")
val = t["col"][0]
assert "info" not in val.__dict__
assert val.info.indices == []
val = t["col"][:]
assert "info" in val.__dict__
assert val.info.indices == []
val = t[:]["col"]
assert "info" in val.__dict__
assert isinstance(val.info.indices[0], SlicedIndex)
def test_info_no_copy_skycoord():
"""Test that getting a single item from Table SkyCoord column object does
not copy info. Cannot create an index on a SkyCoord currently.
"""
col = (SkyCoord([1, 2], [1, 2], unit="deg"),)
t = QTable([col], names=["col"])
val = t["col"][0]
assert "info" not in val.__dict__
assert val.info.indices == []
val = t["col"][:]
assert val.info.indices == []
val = t[:]["col"]
assert val.info.indices == []
|
af5b38b7a93049962c823b5b71f80ec81ccfe7374f835a6342ed71e4a73aff34 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import base64
import contextlib
import errno
import hashlib
import io
import itertools
import os
import pathlib
import platform
import random
import shutil
import stat
import sys
import tempfile
import urllib.error
import urllib.parse
import urllib.request
import warnings
from concurrent.futures import ThreadPoolExecutor
from itertools import islice
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
import astropy.utils.data
from astropy import units as _u # u is taken
from astropy.config import paths
from astropy.utils.data import (
CacheDamaged,
CacheMissingWarning,
_deltemps,
_get_download_cache_loc,
_tempfilestodel,
cache_contents,
cache_total_size,
check_download_cache,
check_free_space_in_dir,
clear_download_cache,
compute_hash,
conf,
download_file,
download_files_in_parallel,
export_download_cache,
get_cached_urls,
get_file_contents,
get_free_space_in_dir,
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_fileobj,
get_pkg_data_path,
get_readable_fileobj,
import_download_cache,
import_file_to_cache,
is_url,
is_url_in_cache,
)
from astropy.utils.exceptions import AstropyWarning
CI = os.environ.get("CI", "false") == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "w") as f:
f.write("some contents\n")
try:
with open(f1):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmp_path):
def _valid_urls(tmp_path):
for i in itertools.count():
c = os.urandom(16).hex()
fn = tmp_path / f"valid_{str(i)}"
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmp_path)
@pytest.fixture
def invalid_urls(tmp_path):
def _invalid_urls(tmp_path):
for i in itertools.count():
fn = tmp_path / f"invalid_{str(i)}"
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmp_path)
@pytest.fixture
def temp_cache(tmp_path):
with paths.set_temp_cache(tmp_path):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmp_path, valid_urls):
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmp_path, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM, "os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM, "os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM, "_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmp_path) as d:
# other fixtures use the same tmp_path so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = {u for u, c in islice(valid_urls, FEW)}
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(
astropy.utils.data, "_SafeTemporaryDirectory", no_TemporaryDirectory
)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://" + "a" * 256 + ".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmp_path):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmp_path):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmp_path):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel
):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel(
[u for (u, c, c_bad) in urls], cache=True, sources=sources
)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True), [u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u), [u for (u, c) in urls]))
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls
):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(
temp_cache, tmp_path
):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=tmp_path, delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmp_path, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmp_path / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmp_path, temp_cache):
with TemporaryDirectory(dir=tmp_path) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert (
get_file_contents(download_file(f_url, cache=True)) == "new"
), "Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {"cafile": None, "capath": "/does/not/exist"}
msg = f"Verification of TLS/SSL certificate at {TESTURL_SSL} failed"
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(
TESTURL_SSL, cache=False, ssl_context=ssl_context, allow_insecure=True
)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url + s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmp_path, valid_urls, method):
urls = []
# tmp_path is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmp_path):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = {u for (u, c) in urls}
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
@pytest.mark.slow
def test_download_parallel_partial_success_lock_safe(
temp_cache, valid_urls, invalid_urls
):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmp_path):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for fn, u, c in td:
c_plus = f"{c} updated"
fn = tmp_path / c
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif(
(sys.platform.startswith("win") and CI), reason="flaky cache error on Windows CI"
)
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
"filename", ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if (not HAS_BZ2 and "bz2" in filename) or (not HAS_LZMA and "xz" in filename):
with pytest.raises(ValueError, match=r" format files are not supported"):
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmp_path):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmp_path / request.param
filename = str(datafile)
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write_bytes(contents)
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(
ModuleNotFoundError, match=r"does not provide the [lb]z[2m]a? module\."
):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmp_path):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmp_path / "tmp.dat"
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname="astropy")
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ["remote data cache could not be accessed", "temporary file"]
if n_warns == 4:
partial_warn_msgs.extend(["socket", "socket"])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert (
len(partial_warn_msgs) == 0
), f"Got some unexpected warnings: {partial_warn_msgs}"
assert n_warns in (2, 4), f"Expected 2 or 4 warnings, got {n_warns}"
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(
CacheMissingWarning, match=r".*Not clearing data cache - cache inaccessible.*"
):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
"filename",
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
# fmt: off
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0\xd7\x95"
b"\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
# fmt: on
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmp_path, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmp_path, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = tmp_path / "the.zip"
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmp_path, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = tmp_path / "the.zip"
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmp_path, temp_cache, valid_urls):
zip_file_name = tmp_path / "the.zip"
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmp_path):
fn = tmp_path / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding="binary") == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding="binary") != c
def test_export_import_roundtrip_different_location(tmp_path, valid_urls):
original_cache = tmp_path / "original"
original_cache.mkdir()
zip_file_name = tmp_path / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = {u for (u, c) in urls}
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmp_path / "new"
new_cache.mkdir()
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for u, c in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for u, c, h in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize("desired_size", [1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmp_path, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(tmp_path, desired_size)
def test_get_free_space_file_directory(tmp_path):
fn = tmp_path / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(fn)
free_space = get_free_space_in_dir(tmp_path)
assert free_space > 0 and not hasattr(free_space, "unit")
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(tmp_path, unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(tmp_path, unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmp_path):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmp_path))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmp_path):
fn = str(tmp_path / "file")
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "w") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "w") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "w") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "w") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == {bf1, bf2, bf3, bf4}
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmp_path, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = tmp_path / "file"
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = tmp_path / "astropy"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = tmp_path / "astropy" / "download" / "url"
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmp_path):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn) as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmp_path, valid_urls):
u, c = next(valid_urls)
d1 = tmp_path / "1"
d2 = tmp_path / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmp_path, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmp_path))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = list(tmp_path.iterdir())
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert (
f.read().rstrip()
== "This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type("MockOpener", (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmp_path):
try:
with readonly_dir(tmp_path):
assert is_dir_readonly(tmp_path)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmp_path):
fn = tmp_path / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmp_path):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmp_path):
filename = tmp_path / "test-file"
content = "Some text or other"
url = "http://example.com/"
with open(filename, "w") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW + 1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW + 1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW + 1
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW - 1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)], pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(
not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.",
)
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache="update", sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp("allow_internet", False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), "url"))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f) + "/")
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif(
CI and os.environ.get("IS_CRON", "false") == "false",
reason="Flaky/too much external traffic for regular CI",
)
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
"""Test that download automatically enables TLS/SSL when required"""
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.daily"
download_file(url)
@pytest.mark.parametrize("base", ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file("file://", cache=True, sources=[u])
assert not is_url_in_cache("file:///")
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = [
"Name or service not known",
"nodename nor servname provided, or not known",
"getaddrinfo failed",
"Temporary failure in name resolution",
"No address associated with hostname",
]
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
("s", "ans"),
[
("http://googlecom", True),
("https://google.com", True),
("ftp://google.com", True),
("sftp://google.com", True),
("ssh://google.com", True),
("file:///c:/path/to/the%20file.txt", True),
("google.com", False),
("C:\\\\path\\\\file.docx", False),
("data://file", False),
],
)
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
bfec6bea2fcf59b6ada83c2b18db1dfc3a7c2ea13f205485625ac4f971ea65da | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import sys
import pytest
from astropy import units as u
from astropy.utils import console
from . import test_progress_bar_func
class FakeTTY(io.StringIO):
"""IOStream that fakes a TTY; provide an encoding to emulate an output
stream with a specific encoding.
"""
def __new__(cls, encoding=None):
# Return a new subclass of FakeTTY with the requested encoding
if encoding is None:
return super().__new__(cls)
encoding = encoding
cls = type(encoding.title() + cls.__name__, (cls,), {"encoding": encoding})
return cls.__new__(cls)
def __init__(self, encoding=None):
super().__init__()
def write(self, s):
if isinstance(s, bytes):
# Just allow this case to work
s = s.decode("latin-1")
elif self.encoding is not None:
s.encode(self.encoding)
return super().write(s)
def isatty(self):
return True
def test_fake_tty():
# First test without a specified encoding; we should be able to write
# arbitrary unicode strings
f1 = FakeTTY()
assert f1.isatty()
f1.write("☃")
assert f1.getvalue() == "☃"
# Now test an ASCII-only TTY--it should raise a UnicodeEncodeError when
# trying to write a string containing non-ASCII characters
f2 = FakeTTY("ascii")
assert f2.isatty()
assert f2.__class__.__name__ == "AsciiFakeTTY"
assert pytest.raises(UnicodeEncodeError, f2.write, "☃")
assert f2.getvalue() == ""
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Cannot test on Windows")
def test_color_text():
assert console._color_text("foo", "green") == "\033[0;32mfoo\033[0m"
def test_color_print():
# This stuff is hard to test, at least smoke test it
console.color_print("foo", "green")
console.color_print("foo", "green", "bar", "red")
def test_color_print2():
# Test that this automatically detects that io.StringIO is
# not a tty
stream = io.StringIO()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == "foo\n"
stream = io.StringIO()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == "foobarbaz\n"
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Cannot test on Windows")
def test_color_print3():
# Test that this thinks the FakeTTY is a tty and applies colors.
stream = FakeTTY()
console.color_print("foo", "green", file=stream)
assert stream.getvalue() == "\x1b[0;32mfoo\x1b[0m\n"
stream = FakeTTY()
console.color_print("foo", "green", "bar", "red", "baz", file=stream)
assert stream.getvalue() == "\x1b[0;32mfoo\x1b[0m\x1b[0;31mbar\x1b[0mbaz\n"
def test_color_print_unicode():
console.color_print("überbær", "red")
def test_color_print_invalid_color():
console.color_print("foo", "unknown")
def test_spinner_non_unicode_console():
"""Regression test for #1760
Ensures that the spinner can fall go into fallback mode when using the
unicode spinner on a terminal whose default encoding cannot encode the
unicode characters.
"""
stream = FakeTTY("ascii")
chars = console.Spinner._default_unicode_chars
with console.Spinner("Reticulating splines", file=stream, chars=chars) as s:
next(s)
def test_progress_bar():
# This stuff is hard to test, at least smoke test it
with console.ProgressBar(50) as bar:
for i in range(50):
bar.update()
def test_progress_bar2():
for x in console.ProgressBar(range(50)):
pass
def test_progress_bar3():
def do_nothing(*args, **kwargs):
pass
console.ProgressBar.map(do_nothing, range(50))
def test_zero_progress_bar():
with console.ProgressBar(0) as bar:
pass
def test_progress_bar_as_generator():
sum = 0
for x in console.ProgressBar(range(50)):
sum += x
assert sum == 1225
sum = 0
for x in console.ProgressBar(50):
sum += x
assert sum == 1225
def test_progress_bar_map():
items = list(range(100))
result = console.ProgressBar.map(
test_progress_bar_func.func, items, step=10, multiprocess=True
)
assert items == result
result1 = console.ProgressBar.map(
test_progress_bar_func.func, items, step=10, multiprocess=2
)
assert items == result1
@pytest.mark.parametrize(
("seconds", "string"),
[
(864088, " 1w 3d"),
(187213, " 2d 4h"),
(3905, " 1h 5m"),
(64, " 1m 4s"),
(15, " 15s"),
(2, " 2s"),
],
)
def test_human_time(seconds, string):
human_time = console.human_time(seconds)
assert human_time == string
@pytest.mark.parametrize(
("size", "string"),
[
(8640882, "8.6M"),
(187213, "187k"),
(3905, "3.9k"),
(64, " 64 "),
(2, " 2 "),
(10 * u.GB, " 10G"),
],
)
def test_human_file_size(size, string):
human_time = console.human_file_size(size)
assert human_time == string
@pytest.mark.parametrize("size", (50 * u.km, 100 * u.g))
def test_bad_human_file_size(size):
assert pytest.raises(u.UnitConversionError, console.human_file_size, size)
|
40ae4c69ea72c5a4d49463f1b9f72498ed1d440518a48b7a66e16619787a2890 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# namedtuple is needed for find_mod_objs so it can have a non-local module
from collections import namedtuple
from unittest import mock
import pytest
import yaml
from astropy.utils import introspection
from astropy.utils.introspection import find_current_module, find_mod_objs, minversion
def test_pkg_finder():
"""
Tests that the `find_current_module` function works. Note that
this also implicitly tests compat.misc._patched_getmodule
"""
mod1 = "astropy.utils.introspection"
mod2 = "astropy.utils.tests.test_introspection"
mod3 = "astropy.utils.tests.test_introspection"
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
def test_find_current_mod():
from sys import getrecursionlimit
thismodnm = __name__
assert find_current_module(0) is introspection
assert find_current_module(1).__name__ == thismodnm
assert find_current_module(getrecursionlimit() + 1) is None
assert find_current_module(0, True).__name__ == thismodnm
assert find_current_module(0, [introspection]).__name__ == thismodnm
assert find_current_module(0, ["astropy.utils.introspection"]).__name__ == thismodnm
with pytest.raises(ImportError):
find_current_module(0, ["faddfdsasewrweriopunjlfiurrhujnkflgwhu"])
def test_find_mod_objs():
lnms, fqns, objs = find_mod_objs("astropy")
# this import is after the above call intentionally to make sure
# find_mod_objs properly imports astropy on its own
import astropy
# just check for astropy.test ... other things might be added, so we
# shouldn't check that it's the only thing
assert "test" in lnms
assert astropy.test in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False)
assert "namedtuple" in lnms
assert "collections.namedtuple" in fqns
assert namedtuple in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True)
assert "namedtuple" not in lnms
assert "collections.namedtuple" not in fqns
assert namedtuple not in objs
def test_minversion():
import numpy
good_versions = ["1.16", "1.16.1", "1.16.0.dev", "1.16dev"]
bad_versions = ["100000", "100000.2rc1"]
for version in good_versions:
assert minversion(numpy, version)
assert minversion("numpy", version)
for version in bad_versions:
assert not minversion(numpy, version)
assert not minversion("numpy", version)
assert minversion(yaml, "3.1")
assert minversion("yaml", "3.1")
def test_find_current_module_bundle():
"""
Tests that the `find_current_module` function would work if used inside
an application bundle. Since we can't test this directly, we test what
would happen if inspect.getmodule returned `None`, which is what happens
inside PyInstaller and py2app bundles.
"""
with mock.patch("inspect.getmodule", return_value=None):
mod1 = "astropy.utils.introspection"
mod2 = "astropy.utils.tests.test_introspection"
mod3 = "astropy.utils.tests.test_introspection"
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
|
b67700da1702e2b118757b7f391b3789047aaae29a0870f20ecfb4da1868a8f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.utils import collections
def test_homogeneous_list():
l = collections.HomogeneousList(int)
with pytest.raises(TypeError):
l.append(5.0)
def test_homogeneous_list2():
l = collections.HomogeneousList(int)
with pytest.raises(TypeError):
l.extend([5.0])
def test_homogeneous_list3():
l = collections.HomogeneousList(int)
l.append(5)
assert l == [5]
def test_homogeneous_list4():
l = collections.HomogeneousList(int)
l.extend([5])
assert l == [5]
def test_homogeneous_list5():
l = collections.HomogeneousList(int, [1, 2, 3])
with pytest.raises(TypeError):
l[1] = 5.0
def test_homogeneous_list_setitem_works():
l = collections.HomogeneousList(int, [1, 2, 3])
l[1] = 5
assert l == [1, 5, 3]
def test_homogeneous_list_setitem_works_with_slice():
l = collections.HomogeneousList(int, [1, 2, 3])
l[0:1] = [10, 20, 30]
assert l == [10, 20, 30, 2, 3]
l[:] = [5, 4, 3]
assert l == [5, 4, 3]
l[::2] = [2, 1]
assert l == [2, 4, 1]
def test_homogeneous_list_init_got_invalid_type():
with pytest.raises(TypeError):
collections.HomogeneousList(int, [1, 2.0, 3])
def test_homogeneous_list_works_with_generators():
hl = collections.HomogeneousList(int, (i for i in range(3)))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl.extend(i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl[0:1] = (i for i in range(3))
assert hl == [0, 1, 2]
hl = collections.HomogeneousList(int)
hl += (i for i in range(3))
assert hl == [0, 1, 2]
|
9850f41a66e002f9da35c2eedbd1155e3ca3cdf4ee6872db1f6da41f023f7d64 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import concurrent.futures
import inspect
import pickle
import pytest
from astropy.utils.decorators import (
classproperty,
deprecated,
deprecated_attribute,
deprecated_renamed_argument,
format_doc,
lazyproperty,
sharedmethod,
)
from astropy.utils.exceptions import (
AstropyDeprecationWarning,
AstropyPendingDeprecationWarning,
AstropyUserWarning,
)
class NewDeprecationWarning(AstropyDeprecationWarning):
"""
New Warning subclass to be used to test the deprecated decorator's
``warning_type`` parameter.
"""
def test_deprecated_attribute():
class DummyClass:
def __init__(self):
self.other = [42]
self._foo = 42
self._bar = 4242
self._message = "42"
self._pending = {42}
foo = deprecated_attribute("foo", "0.2")
bar = deprecated_attribute("bar", "0.2", warning_type=NewDeprecationWarning)
alternative = deprecated_attribute("alternative", "0.2", alternative="other")
message = deprecated_attribute("message", "0.2", message="MSG")
pending = deprecated_attribute("pending", "0.2", pending=True)
dummy = DummyClass()
default_msg = (
r"^The {} attribute is deprecated and may be removed in a future version\.$"
)
# Test getters and setters.
msg = default_msg.format("foo")
with pytest.warns(AstropyDeprecationWarning, match=msg) as w:
assert dummy.foo == 42
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=msg):
dummy.foo = 24
# Handling ``_foo`` should not cause deprecation warnings.
assert dummy._foo == 24
dummy._foo = 13
assert dummy._foo == 13
msg = default_msg.format("bar")
with pytest.warns(NewDeprecationWarning, match=msg) as w:
assert dummy.bar == 4242
assert len(w) == 1
with pytest.warns(NewDeprecationWarning, match=msg):
dummy.bar = 2424
with pytest.warns(AstropyDeprecationWarning, match="^MSG$"):
assert dummy.message == "42"
with pytest.warns(AstropyDeprecationWarning, match="^MSG$"):
dummy.message = "24"
msg = default_msg.format("alternative")[:-1] + r"\n Use other instead\.$"
with pytest.warns(AstropyDeprecationWarning, match=msg):
assert dummy.alternative == [42]
with pytest.warns(AstropyDeprecationWarning, match=msg):
dummy.alternative = [24]
# ``other`` is not deprecated.
assert dummy.other == [24]
dummy.other = [31]
msg = r"^The pending attribute will be deprecated in a future version\.$"
with pytest.warns(AstropyPendingDeprecationWarning, match=msg):
assert dummy.pending == {42}
with pytest.warns(AstropyPendingDeprecationWarning, match=msg):
dummy.pending = {24}
# This needs to be defined outside of the test function, because we
# want to try to pickle it.
@deprecated("100.0")
class TA:
"""
This is the class docstring.
"""
def __init__(self):
"""
This is the __init__ docstring
"""
pass
class TMeta(type):
metaclass_attr = 1
@deprecated("100.0")
class TB(metaclass=TMeta):
pass
@deprecated("100.0", warning_type=NewDeprecationWarning)
class TC:
"""
This class has the custom warning.
"""
pass
def test_deprecated_class():
orig_A = TA.__bases__[0]
# The only thing that should be different about the new class
# is __doc__, __init__, __bases__ and __subclasshook__.
# and __init_subclass__ for Python 3.6+.
for x in dir(orig_A):
if x not in (
"__doc__",
"__init__",
"__bases__",
"__dict__",
"__subclasshook__",
"__init_subclass__",
):
assert getattr(TA, x) == getattr(orig_A, x)
with pytest.warns(AstropyDeprecationWarning) as w:
TA()
assert len(w) == 1
if TA.__doc__ is not None:
assert "function" not in TA.__doc__
assert "deprecated" in TA.__doc__
assert "function" not in TA.__init__.__doc__
assert "deprecated" in TA.__init__.__doc__
# Make sure the object is picklable
pickle.dumps(TA)
with pytest.warns(NewDeprecationWarning) as w:
TC()
assert len(w) == 1
def test_deprecated_class_with_new_method():
"""
Test that a class with __new__ method still works even if it accepts
additional arguments.
This previously failed because the deprecated decorator would wrap objects
__init__ which takes no arguments.
"""
@deprecated("1.0")
class A:
def __new__(cls, a):
return super().__new__(cls)
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
A(1)
assert len(w) == 1
@deprecated("1.0")
class B:
def __new__(cls, a):
return super().__new__(cls)
def __init__(self, a):
pass
# Creating an instance should work but raise a DeprecationWarning
with pytest.warns(AstropyDeprecationWarning) as w:
B(1)
assert len(w) == 1
def test_deprecated_class_with_super():
"""
Regression test for an issue where classes that used ``super()`` in their
``__init__`` did not actually call the correct class's ``__init__`` in the
MRO.
"""
@deprecated("100.0")
class TB:
def __init__(self, a, b):
super().__init__()
with pytest.warns(AstropyDeprecationWarning) as w:
TB(1, 2)
assert len(w) == 1
if TB.__doc__ is not None:
assert "function" not in TB.__doc__
assert "deprecated" in TB.__doc__
assert "function" not in TB.__init__.__doc__
assert "deprecated" in TB.__init__.__doc__
def test_deprecated_class_with_custom_metaclass():
"""
Regression test for an issue where deprecating a class with a metaclass
other than type did not restore the metaclass properly.
"""
with pytest.warns(AstropyDeprecationWarning) as w:
TB()
assert len(w) == 1
assert type(TB) is TMeta
assert TB.metaclass_attr == 1
def test_deprecated_static_and_classmethod():
"""
Regression test for issue introduced by
https://github.com/astropy/astropy/pull/2811 and mentioned also here:
https://github.com/astropy/astropy/pull/2580#issuecomment-51049969
where it appears that deprecated staticmethods didn't work on Python 2.6.
"""
class A:
"""Docstring"""
@deprecated("1.0")
@staticmethod
def B():
pass
@deprecated("1.0")
@classmethod
def C(cls):
pass
with pytest.warns(AstropyDeprecationWarning) as w:
A.B()
assert len(w) == 1
if A.__doc__ is not None:
assert "deprecated" in A.B.__doc__
with pytest.warns(AstropyDeprecationWarning) as w:
A.C()
assert len(w) == 1
if A.__doc__ is not None:
assert "deprecated" in A.C.__doc__
def test_deprecated_argument():
# Tests the decorator with function, method, staticmethod and classmethod.
class Test:
@classmethod
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test1(cls, overwrite):
return overwrite
@staticmethod
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test2(overwrite):
return overwrite
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test3(self, overwrite):
return overwrite
@deprecated_renamed_argument(
"clobber", "overwrite", "1.3", warning_type=NewDeprecationWarning
)
def test4(self, overwrite):
return overwrite
@deprecated_renamed_argument("clobber", "overwrite", "1.3", relax=False)
def test1(overwrite):
return overwrite
for method in [Test().test1, Test().test2, Test().test3, Test().test4, test1]:
# As positional argument only
assert method(1) == 1
# As new keyword argument
assert method(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert method(clobber=1) == 1
assert len(w) == 1
assert "test_decorators.py" in str(w[0].filename)
if method.__name__ == "test4":
assert issubclass(w[0].category, NewDeprecationWarning)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
method(1, clobber=2)
def test_deprecated_argument_custom_message():
@deprecated_renamed_argument("foo", "bar", "4.0", message="Custom msg")
def test(bar=0):
pass
with pytest.warns(AstropyDeprecationWarning, match="Custom msg"):
test(foo=0)
def test_deprecated_argument_in_kwargs():
# To rename an argument that is consumed by "kwargs" the "arg_in_kwargs"
# parameter is used.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", arg_in_kwargs=True)
def test(**kwargs):
return kwargs["overwrite"]
# As positional argument only
with pytest.raises(TypeError):
test(1)
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
assert "test_decorators.py" in str(w[0].filename)
# Using both. Both keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(clobber=2, overwrite=1)
# One positional, one keyword
with pytest.raises(TypeError), pytest.warns(AstropyDeprecationWarning):
test(1, clobber=2)
def test_deprecated_argument_relaxed():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", relax=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
with pytest.warns(AstropyDeprecationWarning, match=r"1\.3") as w:
assert test(clobber=1) == 1
assert len(w) == 1
# Using both. Both keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(clobber=2, overwrite=1) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
# One positional, one keyword
with pytest.warns(AstropyUserWarning) as w:
assert test(1, clobber=2) == 1
assert len(w) == 2
assert '"clobber" was deprecated' in str(w[0].message)
assert '"clobber" and "overwrite" keywords were set' in str(w[1].message)
def test_deprecated_argument_pending():
# Relax turns the TypeError if both old and new keyword are used into
# a warning.
@deprecated_renamed_argument("clobber", "overwrite", "1.3", pending=True)
def test(overwrite):
return overwrite
# As positional argument only
assert test(1) == 1
# As new keyword argument
assert test(overwrite=1) == 1
# Using the deprecated name
assert test(clobber=1) == 1
# Using both. Both keyword
assert test(clobber=2, overwrite=1) == 1
# One positional, one keyword
assert test(1, clobber=2) == 1
def test_deprecated_argument_multi_deprecation():
@deprecated_renamed_argument(
["x", "y", "z"], ["a", "b", "c"], [1.3, 1.2, 1.3], relax=True
)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, y=2, z=3) == (1, 2, 3)
assert len(w) == 3
# Make sure relax is valid for all arguments
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
assert len(w) == 4
def test_deprecated_argument_multi_deprecation_2():
@deprecated_renamed_argument(
["x", "y", "z"], ["a", "b", "c"], [1.3, 1.2, 1.3], relax=[True, True, False]
)
def test(a, b, c):
return a, b, c
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, b=3) == (1, 3, 3)
assert len(w) == 4
with pytest.warns(AstropyUserWarning) as w:
assert test(x=1, y=2, z=3, a=3) == (3, 2, 3)
assert len(w) == 4
with pytest.raises(TypeError), pytest.warns(AstropyUserWarning):
assert test(x=1, y=2, z=3, c=5) == (1, 2, 5)
def test_deprecated_argument_not_allowed_use():
# If the argument is supposed to be inside the kwargs one needs to set the
# arg_in_kwargs parameter. Without it it raises a TypeError.
with pytest.raises(TypeError):
@deprecated_renamed_argument("clobber", "overwrite", "1.3")
def test1(**kwargs):
return kwargs["overwrite"]
# Cannot replace "*args".
with pytest.raises(TypeError):
@deprecated_renamed_argument("overwrite", "args", "1.3")
def test2(*args):
return args
# Cannot replace "**kwargs".
with pytest.raises(TypeError):
@deprecated_renamed_argument("overwrite", "kwargs", "1.3")
def test3(**kwargs):
return kwargs
def test_deprecated_argument_remove():
@deprecated_renamed_argument("x", None, "2.0", alternative="astropy.y")
def test(dummy=11, x=3):
return dummy, x
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead") as w:
assert test(x=1) == (11, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning) as w:
assert test(x=1, dummy=10) == (10, 1)
assert len(w) == 1
with pytest.warns(AstropyDeprecationWarning, match=r"Use astropy\.y instead"):
test(121, 1) == (121, 1)
assert test() == (11, 3)
assert test(121) == (121, 3)
assert test(dummy=121) == (121, 3)
def test_sharedmethod_reuse_on_subclasses():
"""
Regression test for an issue where sharedmethod would bind to one class
for all time, causing the same method not to work properly on other
subclasses of that class.
It has the same problem when the same sharedmethod is called on different
instances of some class as well.
"""
class AMeta(type):
def foo(cls):
return cls.x
class A:
x = 3
def __init__(self, x):
self.x = x
@sharedmethod
def foo(self):
return self.x
a1 = A(1)
a2 = A(2)
assert a1.foo() == 1
assert a2.foo() == 2
# Similar test now, but for multiple subclasses using the same sharedmethod
# as a classmethod
assert A.foo() == 3
class B(A):
x = 5
assert B.foo() == 5
def test_classproperty_docstring():
"""
Tests that the docstring is set correctly on classproperties.
This failed previously due to a bug in Python that didn't always
set __doc__ properly on instances of property subclasses.
"""
class A:
# Inherits docstring from getter
@classproperty
def foo(cls):
"""The foo."""
return 1
assert A.__dict__["foo"].__doc__ == "The foo."
class B:
# Use doc passed to classproperty constructor
def _get_foo(cls):
return 1
foo = classproperty(_get_foo, doc="The foo.")
assert B.__dict__["foo"].__doc__ == "The foo."
@pytest.mark.slow
def test_classproperty_lazy_threadsafe(fast_thread_switching):
"""
Test that a class property with lazy=True is thread-safe.
"""
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
# This is testing for race conditions, so try many times in the
# hope that we'll get the timing right.
for p in range(10000):
class A:
@classproperty(lazy=True)
def foo(cls):
nonlocal calls
calls += 1
return object()
# Have all worker threads query in parallel
calls = 0
futures = [executor.submit(lambda: A.foo) for i in range(workers)]
# Check that only one call happened and they all received it
values = [future.result() for future in futures]
assert calls == 1
assert values[0] is not None
assert values == [values[0]] * workers
@pytest.mark.slow
def test_lazyproperty_threadsafe(fast_thread_switching):
"""
Test thread safety of lazyproperty.
"""
# This test is generally similar to test_classproperty_lazy_threadsafe
# above. See there for comments.
class A:
def __init__(self):
self.calls = 0
@lazyproperty
def foo(self):
self.calls += 1
return object()
workers = 8
with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor:
for p in range(10000):
a = A()
futures = [executor.submit(lambda: a.foo) for i in range(workers)]
values = [future.result() for future in futures]
assert a.calls == 1
assert a.foo is not None
assert values == [a.foo] * workers
def test_format_doc_stringInput_simple():
# Simple tests with string input
docstring_fail = ""
# Raises an valueerror if input is empty
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
docstring = "test"
# A first test that replaces an empty docstring
@format_doc(docstring)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == docstring
# Test that it replaces an existing docstring
@format_doc(docstring)
def testfunc_2():
"""not test"""
pass
assert inspect.getdoc(testfunc_2) == docstring
def test_format_doc_stringInput_format():
# Tests with string input and formatting
docstring = "yes {0} no {opt}"
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc1():
pass
# Test that the formatting is done right
@format_doc(docstring, "/", opt="= life")
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == "yes / no = life"
# Test that we can include the original docstring
docstring2 = "yes {0} no {__doc__}"
@format_doc(docstring2, "/")
def testfunc3():
"""= 2 / 2 * life"""
pass
assert inspect.getdoc(testfunc3) == "yes / no = 2 / 2 * life"
def test_format_doc_objectInput_simple():
# Simple tests with object input
def docstring_fail():
pass
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(docstring_fail)
def testfunc_fail():
pass
def docstring0():
"""test"""
pass
# A first test that replaces an empty docstring
@format_doc(docstring0)
def testfunc_1():
pass
assert inspect.getdoc(testfunc_1) == inspect.getdoc(docstring0)
# Test that it replaces an existing docstring
@format_doc(docstring0)
def testfunc_2():
"""not test"""
pass
assert inspect.getdoc(testfunc_2) == inspect.getdoc(docstring0)
def test_format_doc_objectInput_format():
# Tests with object input and formatting
def docstring():
"""test {0} test {opt}"""
pass
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(docstring)
def testfunc_fail():
pass
# Test that the formatting is done right
@format_doc(docstring, "+", opt="= 2 * test")
def testfunc2():
pass
assert inspect.getdoc(testfunc2) == "test + test = 2 * test"
# Test that we can include the original docstring
def docstring2():
"""test {0} test {__doc__}"""
pass
@format_doc(docstring2, "+")
def testfunc3():
"""= 4 / 2 * test"""
pass
assert inspect.getdoc(testfunc3) == "test + test = 4 / 2 * test"
def test_format_doc_selfInput_simple():
# Simple tests with self input
# Self input while the function has no docstring raises an error
with pytest.raises(ValueError):
@format_doc(None)
def testfunc_fail():
pass
# Test that it keeps an existing docstring
@format_doc(None)
def testfunc_1():
"""not test"""
pass
assert inspect.getdoc(testfunc_1) == "not test"
def test_format_doc_selfInput_format():
# Tests with string input which is '__doc__' (special case) and formatting
# Raises an indexerror if not given the formatted args and kwargs
with pytest.raises(IndexError):
@format_doc(None)
def testfunc_fail():
"""dum {0} dum {opt}"""
pass
# Test that the formatting is done right
@format_doc(None, "di", opt="da dum")
def testfunc1():
"""dum {0} dum {opt}"""
pass
assert inspect.getdoc(testfunc1) == "dum di dum da dum"
# Test that we cannot recursively insert the original documentation
@format_doc(None, "di")
def testfunc2():
"""dum {0} dum {__doc__}"""
pass
assert inspect.getdoc(testfunc2) == "dum di dum "
def test_format_doc_onMethod():
# Check if the decorator works on methods too, to spice it up we try double
# decorator
docstring = "what we do {__doc__}"
class TestClass:
@format_doc(docstring)
@format_doc(None, "strange.")
def test_method(self):
"""is {0}"""
pass
assert inspect.getdoc(TestClass.test_method) == "what we do is strange."
def test_format_doc_onClass():
# Check if the decorator works on classes too
docstring = "what we do {__doc__} {0}{opt}"
@format_doc(docstring, "strange", opt=".")
class TestClass:
"""is"""
pass
assert inspect.getdoc(TestClass) == "what we do is strange."
|
c19b5aad4256158e94d6f256e37eab620814d7818b01801bb3ee730a4571b8b5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import traceback
import pytest
from astropy.utils.codegen import make_function_with_signature
def test_make_function_with_signature_lineno():
"""
Tests that a function made with ``make_function_with_signature`` is give
the correct line number into the module it was created from (i.e. the line
``make_function_with_signature`` was called from).
"""
def crashy_function(*args, **kwargs):
1 / 0
# Make a wrapper around this function with the signature:
# crashy_function(a, b)
# Note: the signature is not really relevant to this test
wrapped = make_function_with_signature(crashy_function, ("a", "b"))
line = """
wrapped = make_function_with_signature(crashy_function, ('a', 'b'))
""".strip()
try:
wrapped(1, 2)
except Exception:
exc_cls, exc, tb = sys.exc_info()
assert exc_cls is ZeroDivisionError
# The *last* line in the traceback should be the 1 / 0 line in
# crashy_function; the next line up should be the line that the
# make_function_with_signature call was one
tb_lines = traceback.format_tb(tb)
assert "1 / 0" in tb_lines[-1]
else:
pytest.fail("This should have caused an exception")
|
0f0c2d380cbd7c07431920c8c63f3157a33d0f42962aed76c14b57427f04b760 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
Generally, any new predefined class should override the
``from_unmasked(data, mask, copy=False)`` class method that
creates an instance from unmasked data and a mask, as well as
the ``unmasked`` property that returns just the data.
The `Masked` class itself provides a base ``mask`` property,
which can also be overridden if needed.
"""
import builtins
import numpy as np
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.data_info import ParentDtypeInfo
from astropy.utils.shapes import NDArrayShapeMethods
from .function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
__all__ = ["Masked", "MaskedNDArray"]
get__doc__ = """Masked version of {0.__name__}.
Except for the ability to pass in a ``mask``, parameters are
as for `{0.__module__}.{0.__name__}`.
""".format
class Masked(NDArrayShapeMethods):
"""A scalar value or array of values with associated mask.
The resulting instance will take its exact type from whatever the
contents are, with the type generated on the fly as needed.
Parameters
----------
data : array-like
The data for which a mask is to be added. The result will be a
a subclass of the type of ``data``.
mask : array-like of bool, optional
The initial mask to assign. If not given, taken from the data.
copy : bool
Whether the data and mask should be copied. Default: `False`.
"""
_base_classes = {}
"""Explicitly defined masked classes keyed by their unmasked counterparts.
For subclasses of these unmasked classes, masked counterparts can be generated.
"""
_masked_classes = {}
"""Masked classes keyed by their unmasked data counterparts."""
def __new__(cls, *args, **kwargs):
if cls is Masked:
# Initializing with Masked itself means we're in "factory mode".
if not kwargs and len(args) == 1 and isinstance(args[0], type):
# Create a new masked class.
return cls._get_masked_cls(args[0])
else:
return cls._get_masked_instance(*args, **kwargs)
else:
# Otherwise we're a subclass and should just pass information on.
return super().__new__(cls, *args, **kwargs)
def __init_subclass__(cls, base_cls=None, data_cls=None, **kwargs):
"""Register a Masked subclass.
Parameters
----------
base_cls : type, optional
If given, it is taken to mean that ``cls`` can be used as
a base for masked versions of all subclasses of ``base_cls``,
so it is registered as such in ``_base_classes``.
data_cls : type, optional
If given, ``cls`` should will be registered as the masked version of
``data_cls``. Will set the private ``cls._data_cls`` attribute,
and auto-generate a docstring if not present already.
**kwargs
Passed on for possible further initialization by superclasses.
"""
if base_cls is not None:
Masked._base_classes[base_cls] = cls
if data_cls is not None:
cls._data_cls = data_cls
cls._masked_classes[data_cls] = cls
if cls.__doc__ is None:
cls.__doc__ = get__doc__(data_cls)
super().__init_subclass__(**kwargs)
# This base implementation just uses the class initializer.
# Subclasses can override this in case the class does not work
# with this signature, or to provide a faster implementation.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
"""Create an instance from unmasked data and a mask."""
return cls(data, mask=mask, copy=copy)
@classmethod
def _get_masked_instance(cls, data, mask=None, copy=False):
data, data_mask = cls._get_data_and_mask(data)
if mask is None:
mask = False if data_mask is None else data_mask
masked_cls = cls._get_masked_cls(data.__class__)
return masked_cls.from_unmasked(data, mask, copy)
@classmethod
def _get_masked_cls(cls, data_cls):
"""Get the masked wrapper for a given data class.
If the data class does not exist yet but is a subclass of any of the
registered base data classes, it is automatically generated
(except we skip `~numpy.ma.MaskedArray` subclasses, since then the
masking mechanisms would interfere).
"""
if issubclass(data_cls, (Masked, np.ma.MaskedArray)):
return data_cls
masked_cls = cls._masked_classes.get(data_cls)
if masked_cls is None:
# Walk through MRO and find closest base data class.
# Note: right now, will basically always be ndarray, but
# one could imagine needing some special care for one subclass,
# which would then get its own entry. E.g., if MaskedAngle
# defined something special, then MaskedLongitude should depend
# on it.
for mro_item in data_cls.__mro__:
base_cls = cls._base_classes.get(mro_item)
if base_cls is not None:
break
else:
# Just hope that MaskedNDArray can handle it.
# TODO: this covers the case where a user puts in a list or so,
# but for those one could just explicitly do something like
# _masked_classes[list] = MaskedNDArray.
return MaskedNDArray
# Create (and therefore register) new Masked subclass for the
# given data_cls.
masked_cls = type(
"Masked" + data_cls.__name__,
(data_cls, base_cls),
{},
data_cls=data_cls,
)
return masked_cls
@classmethod
def _get_data_and_mask(cls, data, allow_ma_masked=False):
"""Split data into unmasked and mask, if present.
Parameters
----------
data : array-like
Possibly masked item, judged by whether it has a ``mask`` attribute.
If so, checks for being an instance of `~astropy.utils.masked.Masked`
or `~numpy.ma.MaskedArray`, and gets unmasked data appropriately.
allow_ma_masked : bool, optional
Whether or not to process `~numpy.ma.masked`, i.e., an item that
implies no data but the presence of a mask.
Returns
-------
unmasked, mask : array-like
Unmasked will be `None` for `~numpy.ma.masked`.
Raises
------
ValueError
If `~numpy.ma.masked` is passed in and ``allow_ma_masked`` is not set.
"""
mask = getattr(data, "mask", None)
if mask is not None:
try:
data = data.unmasked
except AttributeError:
if not isinstance(data, np.ma.MaskedArray):
raise
if data is np.ma.masked:
if allow_ma_masked:
data = None
else:
raise ValueError("cannot handle np.ma.masked here.") from None
else:
data = data.data
return data, mask
@classmethod
def _get_data_and_masks(cls, *args):
data_masks = [cls._get_data_and_mask(arg) for arg in args]
return (
tuple(data for data, _ in data_masks),
tuple(mask for _, mask in data_masks),
)
def _get_mask(self):
"""The mask.
If set, replace the original mask, with whatever it is set with,
using a view if no broadcasting or type conversion is required.
"""
return self._mask
def _set_mask(self, mask, copy=False):
self_dtype = getattr(self, "dtype", None)
mask_dtype = (
np.ma.make_mask_descr(self_dtype)
if self_dtype and self_dtype.names
else np.dtype("?")
)
ma = np.asanyarray(mask, dtype=mask_dtype)
if ma.shape != self.shape:
# This will fail (correctly) if not broadcastable.
self._mask = np.empty(self.shape, dtype=mask_dtype)
self._mask[...] = ma
elif ma is mask:
# Even if not copying use a view so that shape setting
# does not propagate.
self._mask = mask.copy() if copy else mask.view()
else:
self._mask = ma
mask = property(_get_mask, _set_mask)
# Note: subclass should generally override the unmasked property.
# This one assumes the unmasked data is stored in a private attribute.
@property
def unmasked(self):
"""The unmasked values.
See Also
--------
astropy.utils.masked.Masked.filled
"""
return self._unmasked
def filled(self, fill_value):
"""Get a copy of the underlying data, with masked values filled in.
Parameters
----------
fill_value : object
Value to replace masked values with.
See Also
--------
astropy.utils.masked.Masked.unmasked
"""
unmasked = self.unmasked.copy()
if self.mask.dtype.names:
np.ma.core._recursive_filled(unmasked, self.mask, fill_value)
else:
unmasked[self.mask] = fill_value
return unmasked
def _apply(self, method, *args, **kwargs):
# Required method for NDArrayShapeMethods, to help provide __getitem__
# and shape-changing methods.
if callable(method):
data = method(self.unmasked, *args, **kwargs)
mask = method(self.mask, *args, **kwargs)
else:
data = getattr(self.unmasked, method)(*args, **kwargs)
mask = getattr(self.mask, method)(*args, **kwargs)
result = self.from_unmasked(data, mask, copy=False)
if "info" in self.__dict__:
result.info = self.info
return result
def __setitem__(self, item, value):
value, mask = self._get_data_and_mask(value, allow_ma_masked=True)
if value is not None:
self.unmasked[item] = value
self.mask[item] = mask
class MaskedInfoBase:
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {
"fits": "null_value",
"ecsv": "null_value",
"hdf5": "data_mask",
"parquet": "data_mask",
None: "null_value",
}
class MaskedNDArrayInfo(MaskedInfoBase, ParentDtypeInfo):
"""
Container for meta information like name, description, format.
"""
# Add `serialize_method` attribute to the attrs that MaskedNDArrayInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. This is the same as for MaskedColumn.
attr_names = ParentDtypeInfo.attr_names | {"serialize_method"}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = "data"
def _represent_as_dict(self):
out = super()._represent_as_dict()
masked_array = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == "data_mask":
out["data"] = masked_array.unmasked
if np.any(masked_array.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out["mask"] = masked_array.mask
elif method == "null_value":
out["data"] = np.ma.MaskedArray(
masked_array.unmasked, mask=masked_array.mask
)
else:
raise ValueError(
'serialize method must be either "data_mask" or "null_value"'
)
return out
def _construct_from_dict(self, map):
# Override usual handling, since MaskedNDArray takes shape and buffer
# as input, which is less useful here.
# The map can contain either a MaskedColumn or a Column and a mask.
# Extract the mask for the former case.
map.setdefault("mask", getattr(map["data"], "mask", False))
return self._parent_cls.from_unmasked(**map)
class MaskedArraySubclassInfo(MaskedInfoBase):
"""Mixin class to create a subclasses such as MaskedQuantityInfo."""
# This is used below in __init_subclass__, which also inserts a
# 'serialize_method' attribute in attr_names.
def _represent_as_dict(self):
# Use the data_cls as the class name for serialization,
# so that we do not have to store all possible masked classes
# in astropy.table.serialize.__construct_mixin_classes.
out = super()._represent_as_dict()
data_cls = self._parent._data_cls
out.setdefault("__class__", data_cls.__module__ + "." + data_cls.__name__)
return out
def _comparison_method(op):
"""
Create a comparison operator for MaskedNDArray.
Needed since for string dtypes the base operators bypass __array_ufunc__
and hence return unmasked results.
"""
def _compare(self, other):
other_data, other_mask = self._get_data_and_mask(other)
result = getattr(self.unmasked, op)(other_data)
if result is NotImplemented:
return NotImplemented
mask = self.mask | (other_mask if other_mask is not None else False)
return self._masked_result(result, mask, None)
return _compare
class MaskedIterator:
"""
Flat iterator object to iterate over Masked Arrays.
A `~astropy.utils.masked.MaskedIterator` iterator is returned by ``m.flat``
for any masked array ``m``. It allows iterating over the array as if it
were a 1-D array, either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
Notes
-----
The design of `~astropy.utils.masked.MaskedIterator` follows that of
`~numpy.ma.core.MaskedIterator`. It is not exported by the
`~astropy.utils.masked` module. Instead of instantiating directly,
use the ``flat`` method in the masked array instance.
"""
def __init__(self, m):
self._masked = m
self._dataiter = m.unmasked.flat
self._maskiter = m.mask.flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
mask = self._maskiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Masked array.
if not isinstance(out, np.ndarray):
out = out[...]
mask = mask[...]
return self._masked.from_unmasked(out, mask, copy=False)
def __setitem__(self, index, value):
data, mask = self._masked._get_data_and_mask(value, allow_ma_masked=True)
if data is not None:
self._dataiter[index] = data
self._maskiter[index] = mask
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)[...]
mask = next(self._maskiter)[...]
return self._masked.from_unmasked(out, mask, copy=False)
next = __next__
class MaskedNDArray(Masked, np.ndarray, base_cls=np.ndarray, data_cls=np.ndarray):
_mask = None
info = MaskedNDArrayInfo()
def __new__(cls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
self = super().__new__(cls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(cls, **kwargs)
# For all subclasses we should set a default __new__ that passes on
# arguments other than mask to the data class, and then sets the mask.
if "__new__" not in cls.__dict__:
def __new__(newcls, *args, mask=None, **kwargs):
"""Get data class instance from arguments and then set mask."""
# Need to explicitly mention classes outside of class definition.
self = super(cls, newcls).__new__(newcls, *args, **kwargs)
if mask is not None:
self.mask = mask
elif self._mask is None:
self.mask = False
return self
cls.__new__ = __new__
if "info" not in cls.__dict__ and hasattr(cls._data_cls, "info"):
data_info = cls._data_cls.info
attr_names = data_info.attr_names | {"serialize_method"}
new_info = type(
cls.__name__ + "Info",
(MaskedArraySubclassInfo, data_info.__class__),
dict(attr_names=attr_names),
)
cls.info = new_info()
# The two pieces typically overridden.
@classmethod
def from_unmasked(cls, data, mask=None, copy=False):
# Note: have to override since __new__ would use ndarray.__new__
# which expects the shape as its first argument, not an array.
data = np.array(data, subok=True, copy=copy)
self = data.view(cls)
self._set_mask(mask, copy=copy)
return self
@property
def unmasked(self):
return super().view(self._data_cls)
@classmethod
def _get_masked_cls(cls, data_cls):
# Short-cuts
if data_cls is np.ndarray:
return MaskedNDArray
elif data_cls is None: # for .view()
return cls
return super()._get_masked_cls(data_cls)
@property
def flat(self):
"""A 1-D iterator over the Masked array.
This returns a ``MaskedIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to Python's built-in iterator, except that it also
allows assignment.
"""
return MaskedIterator(self)
@property
def _baseclass(self):
"""Work-around for MaskedArray initialization.
Allows the base class to be inferred correctly when a masked instance
is used to initialize (or viewed as) a `~numpy.ma.MaskedArray`.
"""
return self._data_cls
def view(self, dtype=None, type=None):
"""New view of the masked array.
Like `numpy.ndarray.view`, but always returning a masked array subclass.
"""
if type is None and (
isinstance(dtype, builtins.type) and issubclass(dtype, np.ndarray)
):
return super().view(self._get_masked_cls(dtype))
if dtype is None:
return super().view(self._get_masked_cls(type))
dtype = np.dtype(dtype)
if not (
dtype.itemsize == self.dtype.itemsize
and (dtype.names is None or len(dtype.names) == len(self.dtype.names))
):
raise NotImplementedError(
f"{self.__class__} cannot be viewed with a dtype with a "
"with a different number of fields or size."
)
return super().view(dtype, self._get_masked_cls(type))
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# Logically, this should come from ndarray and hence be None, but
# just in case someone creates a new mixin, we check.
super_array_finalize = super().__array_finalize__
if super_array_finalize: # pragma: no cover
super_array_finalize(obj)
if self._mask is None:
# Got here after, e.g., a view of another masked class.
# Get its mask, or initialize ours.
self._set_mask(getattr(obj, "_mask", False))
if "info" in obj.__dict__:
self.info = obj.info
@property
def shape(self):
"""The shape of the data and the mask.
Usually used to get the current shape of an array, but may also be
used to reshape the array in-place by assigning a tuple of array
dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the
size of the array and the remaining dimensions.
Raises
------
AttributeError
If a copy is required, of either the data or the mask.
"""
# Redefinition to allow defining a setter and add a docstring.
return super().shape
@shape.setter
def shape(self, shape):
old_shape = self.shape
self._mask.shape = shape
# Reshape array proper in try/except just in case some broadcasting
# or so causes it to fail.
try:
super(MaskedNDArray, type(self)).shape.__set__(self, shape)
except Exception as exc:
self._mask.shape = old_shape
# Given that the mask reshaping succeeded, the only logical
# reason for an exception is something like a broadcast error in
# in __array_finalize__, or a different memory ordering between
# mask and data. For those, give a more useful error message;
# otherwise just raise the error.
if "could not broadcast" in exc.args[0]:
raise AttributeError(
"Incompatible shape for in-place modification. "
"Use `.reshape()` to make a copy with the desired "
"shape."
) from None
else: # pragma: no cover
raise
_eq_simple = _comparison_method("__eq__")
_ne_simple = _comparison_method("__ne__")
__lt__ = _comparison_method("__lt__")
__le__ = _comparison_method("__le__")
__gt__ = _comparison_method("__gt__")
__ge__ = _comparison_method("__ge__")
def __eq__(self, other):
if not self.dtype.names:
return self._eq_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] == other[field] for field in self.dtype.names], axis=-1
)
return result.all(axis=-1)
def __ne__(self, other):
if not self.dtype.names:
return self._ne_simple(other)
# For structured arrays, we treat this as a reduction over the fields,
# where masked fields are skipped and thus do not influence the result.
other = np.asanyarray(other, dtype=self.dtype)
result = np.stack(
[self[field] != other[field] for field in self.dtype.names], axis=-1
)
return result.any(axis=-1)
def _combine_masks(self, masks, out=None):
masks = [m for m in masks if m is not None and m is not False]
if not masks:
return False
if len(masks) == 1:
if out is None:
return masks[0].copy()
else:
np.copyto(out, masks[0])
return out
out = np.logical_or(masks[0], masks[1], out=out)
for mask in masks[2:]:
np.logical_or(out, mask, out=out)
return out
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.pop("out", None)
out_unmasked = None
out_mask = None
if out is not None:
out_unmasked, out_masks = self._get_data_and_masks(*out)
for d, m in zip(out_unmasked, out_masks):
if m is None:
# TODO: allow writing to unmasked output if nothing is masked?
if d is not None:
raise TypeError("cannot write to unmasked output")
elif out_mask is None:
out_mask = m
unmasked, masks = self._get_data_and_masks(*inputs)
if ufunc.signature:
# We're dealing with a gufunc. For now, only deal with
# np.matmul and gufuncs for which the mask of any output always
# depends on all core dimension values of all inputs.
# Also ignore axes keyword for now...
# TODO: in principle, it should be possible to generate the mask
# purely based on the signature.
if "axes" in kwargs:
raise NotImplementedError(
"Masked does not yet support gufunc calls with 'axes'."
)
if ufunc is np.matmul:
# np.matmul is tricky and its signature cannot be parsed by
# _parse_gufunc_signature.
unmasked = np.atleast_1d(*unmasked)
mask0, mask1 = masks
masks = []
is_mat1 = unmasked[1].ndim >= 2
if mask0 is not None:
masks.append(np.logical_or.reduce(mask0, axis=-1, keepdims=is_mat1))
if mask1 is not None:
masks.append(
np.logical_or.reduce(mask1, axis=-2, keepdims=True)
if is_mat1
else np.logical_or.reduce(mask1)
)
mask = self._combine_masks(masks, out=out_mask)
else:
# Parse signature with private numpy function. Note it
# cannot handle spaces in tuples, so remove those.
in_sig, out_sig = np.lib.function_base._parse_gufunc_signature(
ufunc.signature.replace(" ", "")
)
axis = kwargs.get("axis", -1)
keepdims = kwargs.get("keepdims", False)
in_masks = []
for sig, mask in zip(in_sig, masks):
if mask is not None:
if sig:
# Input has core dimensions. Assume that if any
# value in those is masked, the output will be
# masked too (TODO: for multiple core dimensions
# this may be too strong).
mask = np.logical_or.reduce(
mask, axis=axis, keepdims=keepdims
)
in_masks.append(mask)
mask = self._combine_masks(in_masks)
result_masks = []
for os in out_sig:
if os:
# Output has core dimensions. Assume all those
# get the same mask.
result_mask = np.expand_dims(mask, axis)
else:
result_mask = mask
result_masks.append(result_mask)
mask = result_masks if len(result_masks) > 1 else result_masks[0]
elif method == "__call__":
# Regular ufunc call.
mask = self._combine_masks(masks, out=out_mask)
elif method == "outer":
# Must have two arguments; adjust masks as will be done for data.
assert len(masks) == 2
masks = [(m if m is not None else False) for m in masks]
mask = np.logical_or.outer(masks[0], masks[1], out=out_mask)
elif method in {"reduce", "accumulate"}:
# Reductions like np.add.reduce (sum).
if masks[0] is not None:
# By default, we simply propagate masks, since for
# things like np.sum, it makes no sense to do otherwise.
# Individual methods need to override as needed.
# TODO: take care of 'out' too?
if method == "reduce":
axis = kwargs.get("axis", None)
keepdims = kwargs.get("keepdims", False)
where = kwargs.get("where", True)
mask = np.logical_or.reduce(
masks[0],
where=where,
axis=axis,
keepdims=keepdims,
out=out_mask,
)
if where is not True:
# Mask also whole rows that were not selected by where,
# so would have been left as unmasked above.
mask |= np.logical_and.reduce(
masks[0], where=where, axis=axis, keepdims=keepdims
)
else:
# Accumulate
axis = kwargs.get("axis", 0)
mask = np.logical_or.accumulate(masks[0], axis=axis, out=out_mask)
elif out is not None:
mask = False
else: # pragma: no cover
# Can only get here if neither input nor output was masked, but
# perhaps axis or where was masked (in NUMPY_LT_1_21 this is
# possible). We don't support this.
return NotImplemented
elif method in {"reduceat", "at"}: # pragma: no cover
# TODO: implement things like np.add.accumulate (used for cumsum).
raise NotImplementedError(
"masked instances cannot yet deal with 'reduceat' or 'at'."
)
if out_unmasked is not None:
kwargs["out"] = out_unmasked
result = getattr(ufunc, method)(*unmasked, **kwargs)
if result is None: # pragma: no cover
# This happens for the "at" method.
return result
if out is not None and len(out) == 1:
out = out[0]
return self._masked_result(result, mask, out)
def __array_function__(self, function, types, args, kwargs):
# TODO: go through functions systematically to see which ones
# work and/or can be supported.
if function in MASKED_SAFE_FUNCTIONS:
return super().__array_function__(function, types, args, kwargs)
elif function in APPLY_TO_BOTH_FUNCTIONS:
helper = APPLY_TO_BOTH_FUNCTIONS[function]
try:
helper_result = helper(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
data_args, mask_args, kwargs, out = helper_result
if out is not None:
if not isinstance(out, Masked):
return self._not_implemented_or_raise(function, types)
function(*mask_args, out=out.mask, **kwargs)
function(*data_args, out=out.unmasked, **kwargs)
return out
mask = function(*mask_args, **kwargs)
result = function(*data_args, **kwargs)
elif function in DISPATCHED_FUNCTIONS:
dispatched_function = DISPATCHED_FUNCTIONS[function]
try:
dispatched_result = dispatched_function(*args, **kwargs)
except NotImplementedError:
return self._not_implemented_or_raise(function, types)
if not isinstance(dispatched_result, tuple):
return dispatched_result
result, mask, out = dispatched_result
elif function in UNSUPPORTED_FUNCTIONS:
return NotImplemented
else: # pragma: no cover
# By default, just pass it through for now.
return super().__array_function__(function, types, args, kwargs)
if mask is None:
return result
else:
return self._masked_result(result, mask, out)
def _not_implemented_or_raise(self, function, types):
# Our function helper or dispatcher found that the function does not
# work with Masked. In principle, there may be another class that
# knows what to do with us, for which we should return NotImplemented.
# But if there is ndarray (or a non-Masked subclass of it) around,
# it quite likely coerces, so we should just break.
if any(issubclass(t, np.ndarray) and not issubclass(t, Masked) for t in types):
raise TypeError(
"the MaskedNDArray implementation cannot handle {} "
"with the given arguments.".format(function)
) from None
else:
return NotImplemented
def _masked_result(self, result, mask, out):
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
if not isinstance(mask, (list, tuple)):
mask = (mask,) * len(result)
return tuple(
self._masked_result(result_, mask_, out_)
for (result_, mask_, out_) in zip(result, mask, out)
)
if out is None:
# Note that we cannot count on result being the same class as
# 'self' (e.g., comparison of quantity results in an ndarray, most
# operations on Longitude and Latitude result in Angle or
# Quantity), so use Masked to determine the appropriate class.
return Masked(result, mask)
# TODO: remove this sanity check once test cases are more complete.
assert isinstance(out, Masked)
# If we have an output, the result was written in-place, so we should
# also write the mask in-place (if not done already in the code).
if out._mask is not mask:
out._mask[...] = mask
return out
# Below are ndarray methods that need to be overridden as masked elements
# need to be skipped and/or an initial value needs to be set.
def _reduce_defaults(self, kwargs, initial_func=None):
"""Get default where and initial for masked reductions.
Generally, the default should be to skip all masked elements. For
reductions such as np.minimum.reduce, we also need an initial value,
which can be determined using ``initial_func``.
"""
if "where" not in kwargs:
kwargs["where"] = ~self.mask
if initial_func is not None and "initial" not in kwargs:
kwargs["initial"] = initial_func(self.unmasked)
return kwargs
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
# Unfortunately, cannot override the call to diagonal inside trace, so
# duplicate implementation in numpy/core/src/multiarray/calculation.c.
diagonal = self.diagonal(offset=offset, axis1=axis1, axis2=axis2)
return diagonal.sum(-1, dtype=dtype, out=out)
def min(self, axis=None, out=None, **kwargs):
return super().min(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmax)
)
def max(self, axis=None, out=None, **kwargs):
return super().max(
axis=axis, out=out, **self._reduce_defaults(kwargs, np.nanmin)
)
def nonzero(self):
unmasked_nonzero = self.unmasked.nonzero()
if self.ndim >= 1:
not_masked = ~self.mask[unmasked_nonzero]
return tuple(u[not_masked] for u in unmasked_nonzero)
else:
return unmasked_nonzero if not self.mask else np.nonzero(0)
def compress(self, condition, axis=None, out=None):
if out is not None:
raise NotImplementedError("cannot yet give output")
return self._apply("compress", condition, axis=axis)
def repeat(self, repeats, axis=None):
return self._apply("repeat", repeats, axis=axis)
def choose(self, choices, out=None, mode="raise"):
# Let __array_function__ take care since choices can be masked too.
return np.choose(self, choices, out=out, mode=mode)
if NUMPY_LT_1_22:
def argmin(self, axis=None, out=None):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out)
def argmax(self, axis=None, out=None):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out)
else:
def argmin(self, axis=None, out=None, *, keepdims=False):
# Todo: should this return a masked integer array, with masks
# if all elements were masked?
at_min = self == self.min(axis=axis, keepdims=True)
return at_min.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argmax(self, axis=None, out=None, *, keepdims=False):
at_max = self == self.max(axis=axis, keepdims=True)
return at_max.filled(False).argmax(axis=axis, out=out, keepdims=keepdims)
def argsort(self, axis=-1, kind=None, order=None):
"""Returns the indices that would sort an array.
Perform an indirect sort along the given axis on both the array
and the mask, with masked items being sorted to the end.
Parameters
----------
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis).
If None, the flattened array is used.
kind : str or None, ignored.
The kind of sort. Present only to allow subclasses to work.
order : str or list of str.
For an array with fields defined, the fields to compare first,
second, etc. A single field can be specified as a string, and not
all fields need be specified, but unspecified fields will still be
used, in dtype order, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sorts along the specified ``axis``. Use
``np.take_along_axis(self, index_array, axis=axis)`` to obtain
the sorted array.
"""
if axis is None:
data = self.ravel()
axis = -1
else:
data = self
if self.dtype.names:
# As done inside the argsort implementation in multiarray/methods.c.
if order is None:
order = self.dtype.names
else:
order = np.core._internal._newnames(self.dtype, order)
keys = tuple(data[name] for name in order[::-1])
elif order is not None:
raise ValueError("Cannot specify order when the array has no fields.")
else:
keys = (data,)
return np.lexsort(keys, axis=axis)
def sort(self, axis=-1, kind=None, order=None):
"""Sort an array in-place. Refer to `numpy.sort` for full documentation."""
# TODO: probably possible to do this faster than going through argsort!
indices = self.argsort(axis, kind=kind, order=order)
self[:] = np.take_along_axis(self, indices, axis=axis)
def argpartition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.argsort(axis=axis, order=order)
def partition(self, kth, axis=-1, kind="introselect", order=None):
# TODO: should be possible to do this faster than with a full argsort!
return self.sort(axis=axis, order=None)
def cumsum(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.add.accumulate(self, axis=axis, dtype=dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if axis is None:
self = self.ravel()
axis = 0
return np.multiply.accumulate(self, axis=axis, dtype=dtype, out=out)
def clip(self, min=None, max=None, out=None, **kwargs):
"""Return an array whose values are limited to ``[min, max]``.
Like `~numpy.clip`, but any masked values in ``min`` and ``max``
are ignored for clipping. The mask of the input array is propagated.
"""
# TODO: implement this at the ufunc level.
dmin, mmin = self._get_data_and_mask(min)
dmax, mmax = self._get_data_and_mask(max)
if mmin is None and mmax is None:
# Fast path for unmasked max, min.
return super().clip(min, max, out=out, **kwargs)
masked_out = np.positive(self, out=out)
out = masked_out.unmasked
if dmin is not None:
np.maximum(out, dmin, out=out, where=True if mmin is None else ~mmin)
if dmax is not None:
np.minimum(out, dmax, out=out, where=True if mmax is None else ~mmax)
return masked_out
def mean(self, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
# Implementation based on that in numpy/core/_methods.py
# Cast bool, unsigned int, and int to float64 by default,
# and do float16 at higher precision.
is_float16_result = False
if dtype is None:
if issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
elif issubclass(self.dtype.type, np.float16):
dtype = np.dtype("f4")
is_float16_result = out is None
where = ~self.mask & where
result = self.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where
)
n = np.add.reduce(where, axis=axis, keepdims=keepdims)
result /= n
if is_float16_result:
result = result.astype(self.dtype)
return result
def var(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
where_final = ~self.mask & where
# Simplified implementation based on that in numpy/core/_methods.py
n = np.add.reduce(where_final, axis=axis, keepdims=keepdims)[...]
# Cast bool, unsigned int, and int to float64 by default.
if dtype is None and issubclass(self.dtype.type, (np.integer, np.bool_)):
dtype = np.dtype("f8")
mean = self.mean(axis=axis, dtype=dtype, keepdims=True, where=where)
x = self - mean
x *= x.conjugate() # Conjugate just returns x if not complex.
result = x.sum(
axis=axis, dtype=dtype, out=out, keepdims=keepdims, where=where_final
)
n -= ddof
n = np.maximum(n, 0, out=n)
result /= n
result._mask |= n == 0
return result
def std(
self, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True
):
result = self.var(
axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims, where=where
)
return np.sqrt(result, out=result)
def __bool__(self):
# First get result from array itself; this will error if not a scalar.
result = super().__bool__()
return result and not self.mask
def any(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_or.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
def all(self, axis=None, out=None, keepdims=False, *, where=True):
return np.logical_and.reduce(
self, axis=axis, out=out, keepdims=keepdims, where=~self.mask & where
)
# Following overrides needed since somehow the ndarray implementation
# does not actually call these.
def __str__(self):
return np.array_str(self)
def __repr__(self):
return np.array_repr(self)
def __format__(self, format_spec):
string = super().__format__(format_spec)
if self.shape == () and self.mask:
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
class MaskedRecarray(np.recarray, MaskedNDArray, data_cls=np.recarray):
# Explicit definition since we need to override some methods.
def __array_finalize__(self, obj):
# recarray.__array_finalize__ does not do super, so we do it
# explicitly.
super().__array_finalize__(obj)
super(np.recarray, self).__array_finalize__(obj)
# __getattribute__, __setattr__, and field use these somewhat
# obscrure ndarray methods. TODO: override in MaskedNDArray?
def getfield(self, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
return self[field]
raise NotImplementedError("can only get existing field from structured dtype.")
def setfield(self, val, dtype, offset=0):
for field, info in self.dtype.fields.items():
if offset == info[1] and dtype == info[0]:
self[field] = val
return
raise NotImplementedError("can only set existing field from structured dtype.")
|
eb4fe420aaa96a8044e0230a138cc02708dca80b61fe8f77ba1ff11e100274db | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Built-in mask mixin class.
The design uses `Masked` as a factory class which automatically
generates new subclasses for any data class that is itself a
subclass of a predefined masked class, with `MaskedNDArray`
providing such a predefined class for `~numpy.ndarray`.
"""
from .core import *
|
efea3b589551fc0ea02b9606e4cdfc45b0e476ba5e3ccd142159abcb2dc2ce93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Helpers for letting numpy functions interact with Masked arrays.
The module supplies helper routines for numpy functions that propagate
masks appropriately., for use in the ``__array_function__``
implementation of `~astropy.utils.masked.MaskedNDArray`. They are not
very useful on their own, but the ones with docstrings are included in
the documentation so that there is a place to find out how the mask is
interpreted.
"""
import numpy as np
from astropy.units.quantity_helper.function_helpers import FunctionAssigner
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_25
# This module should not really be imported, but we define __all__
# such that sphinx can typeset the functions with docstrings.
# The latter are added to __all__ at the end.
__all__ = [
"MASKED_SAFE_FUNCTIONS",
"APPLY_TO_BOTH_FUNCTIONS",
"DISPATCHED_FUNCTIONS",
"UNSUPPORTED_FUNCTIONS",
]
MASKED_SAFE_FUNCTIONS = set()
"""Set of functions that work fine on Masked classes already.
Most of these internally use `numpy.ufunc` or other functions that
are already covered.
"""
APPLY_TO_BOTH_FUNCTIONS = {}
"""Dict of functions that should apply to both data and mask.
The `dict` is keyed by the numpy function and the values are functions
that take the input arguments of the numpy function and organize these
for passing the data and mask to the numpy function.
Returns
-------
data_args : tuple
Arguments to pass on to the numpy function for the unmasked data.
mask_args : tuple
Arguments to pass on to the numpy function for the masked data.
kwargs : dict
Keyword arguments to pass on for both unmasked data and mask.
out : `~astropy.utils.masked.Masked` instance or None
Optional instance in which to store the output.
Raises
------
NotImplementedError
When an arguments is masked when it should not be or vice versa.
"""
DISPATCHED_FUNCTIONS = {}
"""Dict of functions that provide the numpy function's functionality.
These are for more complicated versions where the numpy function itself
cannot easily be used. It should return either the result of the
function, or a tuple consisting of the unmasked result, the mask for the
result and a possible output instance.
It should raise `NotImplementedError` if one of the arguments is masked
when it should not be or vice versa.
"""
UNSUPPORTED_FUNCTIONS = set()
"""Set of numpy functions that are not supported for masked arrays.
For most, masked input simply makes no sense, but for others it may have
been lack of time. Issues or PRs for support for functions are welcome.
"""
# Almost all from np.core.fromnumeric defer to methods so are OK.
MASKED_SAFE_FUNCTIONS |= {
getattr(np, name)
for name in np.core.fromnumeric.__all__
if name not in {"choose", "put", "resize", "searchsorted", "where", "alen"}
}
MASKED_SAFE_FUNCTIONS |= {
# built-in from multiarray
np.may_share_memory, np.can_cast, np.min_scalar_type, np.result_type,
np.shares_memory,
# np.core.arrayprint
np.array_repr,
# np.core.function_base
np.linspace, np.logspace, np.geomspace,
# np.core.numeric
np.isclose, np.allclose, np.flatnonzero, np.argwhere,
# np.core.shape_base
np.atleast_1d, np.atleast_2d, np.atleast_3d, np.stack, np.hstack, np.vstack,
# np.lib.function_base
np.average, np.diff, np.extract, np.meshgrid, np.trapz, np.gradient,
# np.lib.index_tricks
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.fill_diagonal,
# np.lib.shape_base
np.column_stack, np.row_stack, np.dstack,
np.array_split, np.split, np.hsplit, np.vsplit, np.dsplit,
np.expand_dims, np.apply_along_axis, np.kron, np.tile,
np.take_along_axis, np.put_along_axis,
# np.lib.type_check (all but asfarray, nan_to_num)
np.iscomplexobj, np.isrealobj, np.imag, np.isreal, np.real,
np.real_if_close, np.common_type,
# np.lib.ufunclike
np.fix, np.isneginf, np.isposinf,
# np.lib.function_base
np.angle, np.i0,
} # fmt: skip
IGNORED_FUNCTIONS = {
# I/O - useless for Masked, since no way to store the mask.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
IGNORED_FUNCTIONS |= {
np.pad, np.searchsorted, np.digitize,
np.is_busday, np.busday_count, np.busday_offset,
# numpy.lib.function_base
np.cov, np.corrcoef, np.trim_zeros,
# numpy.core.numeric
np.correlate, np.convolve,
# numpy.lib.histograms
np.histogram, np.histogram2d, np.histogramdd, np.histogram_bin_edges,
# TODO!!
np.dot, np.vdot, np.inner, np.tensordot, np.cross,
np.einsum, np.einsum_path,
} # fmt: skip
# Really should do these...
IGNORED_FUNCTIONS |= {
getattr(np, setopsname) for setopsname in np.lib.arraysetops.__all__
}
if NUMPY_LT_1_23:
IGNORED_FUNCTIONS |= {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
# Explicitly unsupported functions
UNSUPPORTED_FUNCTIONS |= {
np.unravel_index,
np.ravel_multi_index,
np.ix_,
}
# No support for the functions also not supported by Quantity
# (io, polynomial, etc.).
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
apply_to_both = FunctionAssigner(APPLY_TO_BOTH_FUNCTIONS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
def _get_data_and_masks(*args):
"""Separate out arguments into tuples of data and masks.
An all-False mask is created if an argument does not have a mask.
"""
from .core import Masked
data, masks = Masked._get_data_and_masks(*args)
masks = tuple(
m if m is not None else np.zeros(np.shape(d), bool) for d, m in zip(data, masks)
)
return data, masks
# Following are simple ufunc-like functions which should just copy the mask.
@dispatched_function
def datetime_as_string(arr, *args, **kwargs):
return (np.datetime_as_string(arr.unmasked, *args, **kwargs), arr.mask.copy(), None)
@dispatched_function
def sinc(x):
return np.sinc(x.unmasked), x.mask.copy(), None
@dispatched_function
def iscomplex(x):
return np.iscomplex(x.unmasked), x.mask.copy(), None
@dispatched_function
def unwrap(p, *args, **kwargs):
return np.unwrap(p.unmasked, *args, **kwargs), p.mask.copy(), None
@dispatched_function
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
data = np.nan_to_num(x.unmasked, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
return (data, x.mask.copy(), None) if copy else x
# Following are simple functions related to shapes, where the same function
# should be applied to the data and the mask. They cannot all share the
# same helper, because the first arguments have different names.
@apply_to_both(
helps={np.copy, np.asfarray, np.resize, np.moveaxis, np.rollaxis, np.roll}
)
def masked_a_helper(a, *args, **kwargs):
data, mask = _get_data_and_masks(a)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.flip, np.flipud, np.fliplr, np.rot90, np.triu, np.tril})
def masked_m_helper(m, *args, **kwargs):
data, mask = _get_data_and_masks(m)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.diag, np.diagflat})
def masked_v_helper(v, *args, **kwargs):
data, mask = _get_data_and_masks(v)
return data + args, mask + args, kwargs, None
@apply_to_both(helps={np.delete})
def masked_arr_helper(array, *args, **kwargs):
data, mask = _get_data_and_masks(array)
return data + args, mask + args, kwargs, None
@apply_to_both
def broadcast_to(array, shape, subok=False):
"""Broadcast array to the given shape.
Like `numpy.broadcast_to`, and applied to both unmasked data and mask.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and mask are allowed, i.e., for ``subok=False``,
a `~astropy.utils.masked.MaskedNDArray` will be returned.
"""
data, mask = _get_data_and_masks(array)
return data, mask, dict(shape=shape, subok=subok), None
@dispatched_function
def outer(a, b, out=None):
return np.multiply.outer(np.ravel(a), np.ravel(b), out=out)
@dispatched_function
def empty_like(prototype, dtype=None, order="K", subok=True, shape=None):
"""Return a new array with the same shape and type as a given array.
Like `numpy.empty_like`, but will add an empty mask.
"""
unmasked = np.empty_like(
prototype.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
if dtype is not None:
dtype = (
np.ma.make_mask_descr(unmasked.dtype)
if unmasked.dtype.names
else np.dtype("?")
)
mask = np.empty_like(
prototype.mask, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, mask, None
@dispatched_function
def zeros_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of zeros with the same shape and type as a given array.
Like `numpy.zeros_like`, but will add an all-false mask.
"""
unmasked = np.zeros_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def ones_like(a, dtype=None, order="K", subok=True, shape=None):
"""Return an array of ones with the same shape and type as a given array.
Like `numpy.ones_like`, but will add an all-false mask.
"""
unmasked = np.ones_like(
a.unmasked, dtype=dtype, order=order, subok=subok, shape=shape
)
return unmasked, False, None
@dispatched_function
def full_like(a, fill_value, dtype=None, order="K", subok=True, shape=None):
"""Return a full array with the same shape and type as a given array.
Like `numpy.full_like`, but with a mask that is also set.
If ``fill_value`` is `numpy.ma.masked`, the data will be left unset
(i.e., as created by `numpy.empty_like`).
"""
result = np.empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
result[...] = fill_value
return result
@dispatched_function
def put(a, ind, v, mode="raise"):
"""Replaces specified elements of an array with given values.
Like `numpy.put`, but for masked array ``a`` and possibly masked
value ``v``. Masked indices ``ind`` are not supported.
"""
from astropy.utils.masked import Masked
if isinstance(ind, Masked) or not isinstance(a, Masked):
raise NotImplementedError
v_data, v_mask = a._get_data_and_mask(v)
if v_data is not None:
np.put(a.unmasked, ind, v_data, mode=mode)
# v_mask of None will be correctly interpreted as False.
np.put(a.mask, ind, v_mask, mode=mode)
return None
@dispatched_function
def putmask(a, mask, values):
"""Changes elements of an array based on conditional and input values.
Like `numpy.putmask`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(a, Masked):
raise NotImplementedError
values_data, values_mask = a._get_data_and_mask(values)
if values_data is not None:
np.putmask(a.unmasked, mask, values_data)
np.putmask(a.mask, mask, values_mask)
return None
@dispatched_function
def place(arr, mask, vals):
"""Change elements of an array based on conditional and input values.
Like `numpy.place`, but for masked array ``a`` and possibly masked
``values``. Masked ``mask`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(mask, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
vals_data, vals_mask = arr._get_data_and_mask(vals)
if vals_data is not None:
np.place(arr.unmasked, mask, vals_data)
np.place(arr.mask, mask, vals_mask)
return None
@dispatched_function
def copyto(dst, src, casting="same_kind", where=True):
"""Copies values from one array to another, broadcasting as necessary.
Like `numpy.copyto`, but for masked destination ``dst`` and possibly
masked source ``src``.
"""
from astropy.utils.masked import Masked
if not isinstance(dst, Masked) or isinstance(where, Masked):
raise NotImplementedError
src_data, src_mask = dst._get_data_and_mask(src)
if src_data is not None:
np.copyto(dst.unmasked, src_data, casting=casting, where=where)
if src_mask is not None:
np.copyto(dst.mask, src_mask, where=where)
return None
@dispatched_function
def packbits(a, *args, **kwargs):
result = np.packbits(a.unmasked, *args, **kwargs)
mask = np.packbits(a.mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def unpackbits(a, *args, **kwargs):
result = np.unpackbits(a.unmasked, *args, **kwargs)
mask = np.zeros(a.shape, dtype="u1")
mask[a.mask] = 255
mask = np.unpackbits(mask, *args, **kwargs).astype(bool)
return result, mask, None
@dispatched_function
def bincount(x, weights=None, minlength=0):
"""Count number of occurrences of each value in array of non-negative ints.
Like `numpy.bincount`, but masked entries in ``x`` will be skipped.
Any masked entries in ``weights`` will lead the corresponding bin to
be masked.
"""
from astropy.utils.masked import Masked
if weights is not None:
weights = np.asanyarray(weights)
if isinstance(x, Masked) and x.ndim <= 1:
# let other dimensions lead to errors.
if weights is not None and weights.ndim == x.ndim:
weights = weights[~x.mask]
x = x.unmasked[~x.mask]
mask = None
if weights is not None:
weights, w_mask = Masked._get_data_and_mask(weights)
if w_mask is not None:
mask = np.bincount(x, w_mask.astype(int), minlength=minlength).astype(bool)
result = np.bincount(x, weights, minlength=0)
return result, mask, None
@dispatched_function
def msort(a):
result = a.copy()
result.sort(axis=0)
return result
@dispatched_function
def sort_complex(a):
# Just a copy of function_base.sort_complex, to avoid the asarray.
b = a.copy()
b.sort()
if not issubclass(b.dtype.type, np.complexfloating): # pragma: no cover
if b.dtype.char in "bhBH":
return b.astype("F")
elif b.dtype.char == "g":
return b.astype("G")
else:
return b.astype("D")
else:
return b
@dispatched_function
def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"):
data, masks = _get_data_and_masks(*arrays)
if out is None:
return (
np.concatenate(data, axis=axis, dtype=dtype, casting=casting),
np.concatenate(masks, axis=axis),
None,
)
else:
from astropy.utils.masked import Masked
if not isinstance(out, Masked):
raise NotImplementedError
np.concatenate(masks, out=out.mask, axis=axis)
np.concatenate(data, out=out.unmasked, axis=axis, dtype=dtype, casting=casting)
return out
@apply_to_both
def append(arr, values, axis=None):
data, masks = _get_data_and_masks(arr, values)
return data, masks, dict(axis=axis), None
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
from astropy.utils.masked import Masked
arrays, list_ndim, result_ndim, final_size = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim
)
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags["F_CONTIGUOUS"] for arr in arrays)
C_order = all(arr.flags["C_CONTIGUOUS"] for arr in arrays)
order = "F" if F_order and not C_order else "C"
result = Masked(np.empty(shape=shape, dtype=dtype, order=order))
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
@dispatched_function
def broadcast_arrays(*args, subok=True):
"""Broadcast arrays to a common shape.
Like `numpy.broadcast_arrays`, applied to both unmasked data and masks.
Note that ``subok`` is taken to mean whether or not subclasses of
the unmasked data and masks are allowed, i.e., for ``subok=False``,
`~astropy.utils.masked.MaskedNDArray` instances will be returned.
"""
from .core import Masked
are_masked = [isinstance(arg, Masked) for arg in args]
data = [
(arg.unmasked if is_masked else arg) for arg, is_masked in zip(args, are_masked)
]
results = np.broadcast_arrays(*data, subok=subok)
shape = results[0].shape if isinstance(results, list) else results.shape
masks = [
(np.broadcast_to(arg.mask, shape, subok=subok) if is_masked else None)
for arg, is_masked in zip(args, are_masked)
]
results = [
(Masked(result, mask) if mask is not None else result)
for (result, mask) in zip(results, masks)
]
return results if len(results) > 1 else results[0]
@apply_to_both
def insert(arr, obj, values, axis=None):
"""Insert values along the given axis before the given indices.
Like `numpy.insert` but for possibly masked ``arr`` and ``values``.
Masked ``obj`` is not supported.
"""
from astropy.utils.masked import Masked
if isinstance(obj, Masked) or not isinstance(arr, Masked):
raise NotImplementedError
(arr_data, val_data), (arr_mask, val_mask) = _get_data_and_masks(arr, values)
return ((arr_data, obj, val_data, axis), (arr_mask, obj, val_mask, axis), {}, None)
@dispatched_function
def count_nonzero(a, axis=None, *, keepdims=False):
"""Counts the number of non-zero values in the array ``a``.
Like `numpy.count_nonzero`, with masked values counted as 0 or `False`.
"""
filled = a.filled(np.zeros((), a.dtype))
return np.count_nonzero(filled, axis, keepdims=keepdims)
def _masked_median_1d(a, overwrite_input):
# TODO: need an in-place mask-sorting option.
unmasked = a.unmasked[~a.mask]
if unmasked.size:
return a.from_unmasked(np.median(unmasked, overwrite_input=overwrite_input))
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=(1,))[0], mask=True)
def _masked_median(a, axis=None, out=None, overwrite_input=False):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_median_1d(part, overwrite_input)
else:
result = np.apply_along_axis(_masked_median_1d, axis, a, overwrite_input)
if out is not None:
out[...] = result
return result
@dispatched_function
def median(a, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
if NUMPY_LT_1_25:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
return (r.reshape(k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_median, axis=axis, out=out, **kwargs
)
def _masked_quantile_1d(a, q, **kwargs):
"""
Private function for rank 1 arrays. Compute quantile ignoring NaNs.
See nanpercentile for parameter usage
"""
unmasked = a.unmasked[~a.mask]
if unmasked.size:
result = np.lib.function_base._quantile_unchecked(unmasked, q, **kwargs)
return a.from_unmasked(result)
else:
return a.from_unmasked(np.zeros_like(a.unmasked, shape=q.shape), True)
def _masked_quantile(a, q, axis=None, out=None, **kwargs):
# As for np.nanmedian, but without a fast option as yet.
if axis is None or a.ndim == 1:
part = a.ravel()
result = _masked_quantile_1d(part, q, **kwargs)
else:
result = np.apply_along_axis(_masked_quantile_1d, axis, a, q, **kwargs)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
return result
@dispatched_function
def quantile(a, q, axis=None, out=None, **kwargs):
from astropy.utils.masked import Masked
if isinstance(q, Masked) or out is not None and not isinstance(out, Masked):
raise NotImplementedError
a = Masked(a)
q = np.asanyarray(q)
if not np.lib.function_base._quantile_is_valid(q):
raise ValueError("Quantiles must be in the range [0, 1]")
if NUMPY_LT_1_25:
keepdims = kwargs.pop("keepdims", False)
r, k = np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
return (r.reshape(q.shape + k) if keepdims else r) if out is None else out
else:
return np.lib.function_base._ureduce(
a, func=_masked_quantile, q=q, axis=axis, out=out, **kwargs
)
@dispatched_function
def percentile(a, q, *args, **kwargs):
q = np.true_divide(q, 100)
return quantile(a, q, *args, **kwargs)
@dispatched_function
def array_equal(a1, a2, equal_nan=False):
(a1d, a2d), (a1m, a2m) = _get_data_and_masks(a1, a2)
if a1d.shape != a2d.shape:
return False
equal = a1d == a2d
if equal_nan:
equal |= np.isnan(a1d) & np.isnan(a2d)
return bool((equal | a1m | a2m).all())
@dispatched_function
def array_equiv(a1, a2):
return bool((a1 == a2).all())
@dispatched_function
def where(condition, *args):
from astropy.utils.masked import Masked
if not args:
return condition.nonzero(), None, None
condition, c_mask = Masked._get_data_and_mask(condition)
data, masks = _get_data_and_masks(*args)
unmasked = np.where(condition, *data)
mask = np.where(condition, *masks)
if c_mask is not None:
mask |= c_mask
return Masked(unmasked, mask=mask)
@dispatched_function
def choose(a, choices, out=None, mode="raise"):
"""Construct an array from an index array and a set of arrays to choose from.
Like `numpy.choose`. Masked indices in ``a`` will lead to masked output
values and underlying data values are ignored if out of bounds (for
``mode='raise'``). Any values masked in ``choices`` will be propagated
if chosen.
"""
from astropy.utils.masked import Masked
a_data, a_mask = Masked._get_data_and_mask(a)
if a_mask is not None and mode == "raise":
# Avoid raising on masked indices.
a_data = a.filled(fill_value=0)
kwargs = {"mode": mode}
if out is not None:
if not isinstance(out, Masked):
raise NotImplementedError
kwargs["out"] = out.unmasked
data, masks = _get_data_and_masks(*choices)
data_chosen = np.choose(a_data, data, **kwargs)
if out is not None:
kwargs["out"] = out.mask
mask_chosen = np.choose(a_data, masks, **kwargs)
if a_mask is not None:
mask_chosen |= a_mask
return Masked(data_chosen, mask_chosen) if out is None else out
@apply_to_both
def select(condlist, choicelist, default=0):
"""Return an array drawn from elements in choicelist, depending on conditions.
Like `numpy.select`, with masks in ``choicelist`` are propagated.
Any masks in ``condlist`` are ignored.
"""
from astropy.utils.masked import Masked
condlist = [c.unmasked if isinstance(c, Masked) else c for c in condlist]
data_list, mask_list = _get_data_and_masks(*choicelist)
default = Masked(default) if default is not np.ma.masked else Masked(0, mask=True)
return (
(condlist, data_list, default.unmasked),
(condlist, mask_list, default.mask),
{},
None,
)
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
"""Evaluate a piecewise-defined function.
Like `numpy.piecewise` but for masked input array ``x``.
Any masks in ``condlist`` are ignored.
"""
# Copied implementation from numpy.lib.function_base.piecewise,
# just to ensure output is Masked.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0
): # pragma: no cover
condlist = [condlist]
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
# The one real change...
y = np.zeros_like(x)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
for item, value in zip(where, what):
y[item] = value
return y
@dispatched_function
def interp(x, xp, fp, *args, **kwargs):
"""One-dimensional linear interpolation.
Like `numpy.interp`, but any masked points in ``xp`` and ``fp``
are ignored. Any masked values in ``x`` will still be evaluated,
but masked on output.
"""
from astropy.utils.masked import Masked
xd, xm = Masked._get_data_and_mask(x)
if isinstance(xp, Masked) or isinstance(fp, Masked):
(xp, fp), (xpm, fpm) = _get_data_and_masks(xp, fp)
if xp.ndim == fp.ndim == 1:
# Avoid making arrays 1-D; will just raise below.
m = xpm | fpm
xp = xp[~m]
fp = fp[~m]
result = np.interp(xd, xp, fp, *args, **kwargs)
return result if xm is None else Masked(result, xm.copy())
@dispatched_function
def lexsort(keys, axis=-1):
"""Perform an indirect stable sort using a sequence of keys.
Like `numpy.lexsort` but for possibly masked ``keys``. Masked
values are sorted towards the end for each key.
"""
# Sort masks to the end.
from .core import Masked
new_keys = []
for key in keys:
if isinstance(key, Masked):
# If there are other keys below, want to be sure that
# for masked values, those other keys set the order.
new_key = key.unmasked
if new_keys and key.mask.any():
new_key = new_key.copy()
new_key[key.mask] = new_key.flat[0]
new_keys.extend([new_key, key.mask])
else:
new_keys.append(key)
return np.lexsort(new_keys, axis=axis)
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is Masked.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError(
"function is not returning an array of the correct shape"
)
return val
class MaskedFormat:
"""Formatter for masked array scalars.
For use in `numpy.array2string`, wrapping the regular formatters such
that if a value is masked, its formatted string is replaced.
Typically initialized using the ``from_data`` class method.
"""
def __init__(self, format_function):
self.format_function = format_function
# Special case for structured void and subarray: we need to make all the
# format functions for the items masked as well.
# TODO: maybe is a separate class is more logical?
ffs = getattr(format_function, "format_functions", None)
if ffs:
# StructuredVoidFormat: multiple format functions to be changed.
self.format_function.format_functions = [MaskedFormat(ff) for ff in ffs]
ff = getattr(format_function, "format_function", None)
if ff:
# SubarrayFormat: change format function for the elements.
self.format_function.format_function = MaskedFormat(ff)
def __call__(self, x):
if x.dtype.names:
# The replacement of x with a list is needed because the function
# inside StructuredVoidFormat iterates over x, which works for an
# np.void but not an array scalar.
return self.format_function([x[field] for field in x.dtype.names])
if x.shape:
# For a subarray pass on the data directly, since the
# items will be iterated on inside the function.
return self.format_function(x)
# Single element: first just typeset it normally, replace with masked
# string if needed.
string = self.format_function(x.unmasked[()])
if x.mask:
# Strikethrough would be neat, but terminal needs a different
# formatting than, say, jupyter notebook.
# return "\x1B[9m"+string+"\x1B[29m"
# return ''.join(s+'\u0336' for s in string)
n = min(3, max(1, len(string)))
return " " * (len(string) - n) + "\u2014" * n
else:
return string
@classmethod
def from_data(cls, data, **options):
from numpy.core.arrayprint import _get_format_function
return cls(_get_format_function(data, **options))
def _array2string(a, options, separator=" ", prefix=""):
# Mostly copied from numpy.core.arrayprint, except:
# - The format function is wrapped in a mask-aware class;
# - Arrays scalars are not cast as arrays.
from numpy.core.arrayprint import _formatArray, _leading_trailing
data = np.asarray(a)
if a.size > options["threshold"]:
summary_insert = "..."
data = _leading_trailing(data, options["edgeitems"])
else:
summary_insert = ""
# find the right formatting function for the array
format_function = MaskedFormat.from_data(data, **options)
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " " * len(prefix)
lst = _formatArray(
a,
format_function,
options["linewidth"],
next_line_prefix,
separator,
options["edgeitems"],
summary_insert,
options["legacy"],
)
return lst
@dispatched_function
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
):
# Copied from numpy.core.arrayprint, but using _array2string above.
from numpy.core.arrayprint import _format_options, _make_options_dict
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
)
options = _format_options.copy()
options.update(overrides)
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
@dispatched_function
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# Override to avoid special treatment of array scalars.
return array2string(a, max_line_width, precision, suppress_small, " ", "")
# For the nanfunctions, we just treat any nan as an additional mask.
_nanfunc_fill_values = {"nansum": 0, "nancumsum": 0, "nanprod": 1, "nancumprod": 1}
def masked_nanfunc(nanfuncname):
np_func = getattr(np, nanfuncname[3:])
fill_value = _nanfunc_fill_values.get(nanfuncname, None)
def nanfunc(a, *args, **kwargs):
from astropy.utils.masked import Masked
a, mask = Masked._get_data_and_mask(a)
if issubclass(a.dtype.type, np.inexact):
nans = np.isnan(a)
mask = nans if mask is None else (nans | mask)
if mask is not None:
a = Masked(a, mask)
if fill_value is not None:
a = a.filled(fill_value)
return np_func(a, *args, **kwargs)
doc = f"Like `numpy.{nanfuncname}`, skipping masked values as well.\n\n"
if fill_value is not None:
# sum, cumsum, prod, cumprod
doc += (
f"Masked/NaN values are replaced with {fill_value}. "
"The output is not masked."
)
elif "arg" in nanfuncname:
doc += (
"No exceptions are raised for fully masked/NaN slices.\n"
"Instead, these give index 0."
)
else:
doc += (
"No warnings are given for fully masked/NaN slices.\n"
"Instead, they are masked in the output."
)
nanfunc.__doc__ = doc
nanfunc.__name__ = nanfuncname
return nanfunc
for nanfuncname in np.lib.nanfunctions.__all__:
globals()[nanfuncname] = dispatched_function(
masked_nanfunc(nanfuncname), helps=getattr(np, nanfuncname)
)
# Add any dispatched or helper function that has a docstring to
# __all__, so they will be typeset by sphinx. The logic is that for
# those presumably the use of the mask is not entirely obvious.
__all__ += sorted(
helper.__name__
for helper in (
set(APPLY_TO_BOTH_FUNCTIONS.values()) | set(DISPATCHED_FUNCTIONS.values())
)
if helper.__doc__
)
|
8ef802b41e5f0ec09cd50ae8d30d976c947fe346af91e83170ded1722731672c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = [
"asdf",
"asdf_astropy",
"bleach",
"bottleneck",
"bs4",
"bz2",
"fsspec",
"h5py",
"html5lib",
"IPython",
"jplephem",
"lxml",
"matplotlib",
"mpmath",
"pandas",
"PIL",
"pytz",
"s3fs",
"scipy",
"skyfield",
"sortedcontainers",
"lzma",
"pyarrow",
"pytest_mpl",
]
_formerly_optional_deps = ["yaml"] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps["PLT"] = "matplotlib.pyplot"
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
from astropy.utils.exceptions import AstropyDeprecationWarning
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning,
)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
0bad59dd58b508227b56cdbad7b6210edb0729a5d4bc9738e6aa8f9144de1fff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage contains utility modules for compatibility with older/newer
versions of python, as well as including some bugfixes for the stdlib that are
important for Astropy.
Note that all public functions in the `astropy.utils.compat.misc` module are
imported here for easier access.
The content of this module is solely for internal use of ``astropy``
and subject to changes without deprecations. Do not use it in external
packages or code.
"""
from .misc import *
# Importing this module will also install monkey-patches defined in it
from .numpycompat import *
|
366b8824693aa31870c2b13623aa9174adfcea494a845f706eadbf72a4cc8fff | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = [
"NUMPY_LT_1_21_1",
"NUMPY_LT_1_22",
"NUMPY_LT_1_22_1",
"NUMPY_LT_1_23",
"NUMPY_LT_1_24",
"NUMPY_LT_1_25",
]
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_21_1 = not minversion(np, "1.21.1")
NUMPY_LT_1_22 = not minversion(np, "1.22")
NUMPY_LT_1_22_1 = not minversion(np, "1.22.1")
NUMPY_LT_1_23 = not minversion(np, "1.23")
NUMPY_LT_1_24 = not minversion(np, "1.24dev0")
NUMPY_LT_1_25 = not minversion(np, "1.25.0.dev0+151")
|
b56c9d08ce8d98983b79e4ef42f0576d2353900b2dcadfa8e8abf7903d41a180 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Simple utility functions and bug fixes for compatibility with all supported
versions of Python. This module should generally not be used directly, as
everything in `__all__` will be imported into `astropy.utils.compat` and can
be accessed from there.
"""
import functools
import sys
from astropy.utils.decorators import deprecated
__all__ = ["override__dir__", "PYTHON_LT_3_11"]
PYTHON_LT_3_11 = sys.version_info < (3, 11)
@deprecated(
since="v5.2",
message=(
"http://bugs.python.org/issue12166 is resolved. See docstring for alternatives."
),
)
def override__dir__(f):
"""
When overriding a __dir__ method on an object, you often want to include the
"standard" members on the object as well. This decorator takes care of that
automatically, and all the wrapped function needs to do is return a list of
the "special" members that wouldn't be found by the normal Python means.
.. deprecated:: v5.2
Use ``sorted(super().__dir__() + ...)`` instead.
Example
-------
Your class could define __dir__ as follows::
@override__dir__
def __dir__(self):
return ['special_method1', 'special_method2']
Notes
-----
This function was introduced because of http://bugs.python.org/issue12166,
which has since been resolved by
http://hg.python.org/cpython/rev/8f403199f999. Now, the best way to
customize ``__dir__`` is to use ``super``.
::
def __dir__(self):
added = {'special_method1', 'special_method2'}
return sorted(set(super().__dir__()) | added)
"""
# http://bugs.python.org/issue12166
@functools.wraps(f)
def override__dir__wrapper(self):
members = set(object.__dir__(self))
members.update(f(self))
return sorted(members)
return override__dir__wrapper
|
b78f58ce16cbda6c92632c7a584fb65427597f931a04405c3f2a57af241c41d2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import locale
import os
import platform
import urllib.request
import erfa
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.tests.tests.test_imports import test_imports
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
# Import every top-level astropy module as a test that the ERFA leap second
# table is not updated for normal imports.
test_imports()
# Now test that the erfa leap_seconds table has not been updated. This must be
# done at the module level, which unfortunately will abort the entire test run
# if if fails. Running within a normal pytest test will not work because the
# other tests will end up updating this attribute by virtue of doing Time UTC
# transformations.
assert erfa.leap_seconds._expires is None
# Tests in this module assume that the erfa.leap_seconds attribute has been
# updated from the `erfa` package built-in table to the astropy built-in
# leap-second table. That has the effect of ensuring that the
# `erfa.leap_seconds.expires` property is sufficiently in the future.
iers_table = iers.LeapSeconds.auto_open()
erfa.leap_seconds.update(iers_table)
assert erfa.leap_seconds._expires is not None
SYSTEM_FILE = "/usr/share/zoneinfo/leap-seconds.list"
# Test leap_seconds.list in test/data.
LEAP_SECOND_LIST = get_pkg_data_filename("data/leap-seconds.list")
def test_configuration():
# This test just ensures things stay consistent.
# Adjust if changes are made.
assert iers.conf.iers_leap_second_auto_url == iers.IERS_LEAP_SECOND_URL
assert iers.conf.ietf_leap_second_auto_url == iers.IETF_LEAP_SECOND_URL
class TestReading:
"""Basic tests that leap seconds can be read."""
def verify_day_month_year(self, ls):
assert np.all(ls["day"] == 1)
assert np.all((ls["month"] == 1) | (ls["month"] == 7) | (ls["year"] < 1970))
assert np.all(ls["year"] >= 1960)
t = Time(
{"year": ls["year"], "month": ls["month"], "day": ls["day"]},
format="ymdhms",
)
assert np.all(t == Time(ls["mjd"], format="mjd"))
def test_read_leap_second_dat(self):
ls = iers.LeapSeconds.from_iers_leap_seconds(iers.IERS_LEAP_SECOND_FILE)
# Below, >= to take into account we might ship and updated file.
assert ls.expires >= Time("2020-06-28", scale="tai")
assert ls["mjd"][0] == 41317
assert ls["tai_utc"][0] == 10
assert ls["mjd"][-1] >= 57754
assert ls["tai_utc"][-1] >= 37
self.verify_day_month_year(ls)
def test_read_leap_second_dat_locale(self):
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == "Darwin":
locale.setlocale(locale.LC_ALL, "fr_FR")
else:
locale.setlocale(locale.LC_ALL, "fr_FR.utf8")
ls = iers.LeapSeconds.from_iers_leap_seconds(iers.IERS_LEAP_SECOND_FILE)
except locale.Error as e:
pytest.skip(f"Locale error: {e}")
finally:
locale.setlocale(locale.LC_ALL, current)
# Below, >= to take into account we might ship and updated file.
assert ls.expires >= Time("2020-06-28", scale="tai")
def test_open_leap_second_dat(self):
ls = iers.LeapSeconds.from_iers_leap_seconds(iers.IERS_LEAP_SECOND_FILE)
ls2 = iers.LeapSeconds.open(iers.IERS_LEAP_SECOND_FILE)
assert np.all(ls == ls2)
@pytest.mark.parametrize(
"file",
(LEAP_SECOND_LIST, "file:" + urllib.request.pathname2url(LEAP_SECOND_LIST)),
)
def test_read_leap_seconds_list(self, file):
ls = iers.LeapSeconds.from_leap_seconds_list(file)
assert ls.expires == Time("2020-06-28", scale="tai")
assert ls["mjd"][0] == 41317
assert ls["tai_utc"][0] == 10
assert ls["mjd"][-1] == 57754
assert ls["tai_utc"][-1] == 37
self.verify_day_month_year(ls)
@pytest.mark.parametrize(
"file",
(LEAP_SECOND_LIST, "file:" + urllib.request.pathname2url(LEAP_SECOND_LIST)),
)
def test_open_leap_seconds_list(self, file):
ls = iers.LeapSeconds.from_leap_seconds_list(file)
ls2 = iers.LeapSeconds.open(file)
assert np.all(ls == ls2)
@pytest.mark.skipif(
not os.path.isfile(SYSTEM_FILE), reason=f"system does not have {SYSTEM_FILE}"
)
def test_open_system_file(self):
ls = iers.LeapSeconds.open(SYSTEM_FILE)
expired = ls.expires < Time.now()
if expired:
pytest.skip("System leap second file is expired.")
assert not expired
def make_fake_file(expiration, tmp_path):
"""copy the built-in IERS file but set a different expiration date."""
ls = iers.LeapSeconds.from_iers_leap_seconds()
fake_file = str(tmp_path / "fake_leap_seconds.dat")
with open(fake_file, "w") as fh:
fh.write(
"\n".join([f"# File expires on {expiration}"] + str(ls).split("\n")[2:-1])
)
return fake_file
def test_fake_file(tmp_path):
fake_file = make_fake_file("28 June 2345", tmp_path)
fake = iers.LeapSeconds.from_iers_leap_seconds(fake_file)
assert fake.expires == Time("2345-06-28", scale="tai")
class TestAutoOpenExplicitLists:
# For this set of tests, leap-seconds are allowed to be expired
# except as explicitly tested.
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_auto_open_simple(self):
ls = iers.LeapSeconds.auto_open([iers.IERS_LEAP_SECOND_FILE])
assert ls.meta["data_url"] == iers.IERS_LEAP_SECOND_FILE
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_auto_open_erfa(self):
ls = iers.LeapSeconds.auto_open(["erfa", iers.IERS_LEAP_SECOND_FILE])
assert ls.meta["data_url"] in ["erfa", iers.IERS_LEAP_SECOND_FILE]
@pytest.mark.filterwarnings(iers.IERSStaleWarning)
def test_fake_future_file(self, tmp_path):
fake_file = make_fake_file("28 June 2345", tmp_path)
# Try as system file for auto_open, setting auto_max_age such
# that any ERFA or system files are guaranteed to be expired,
# while the fake file is guaranteed to be OK.
with iers.conf.set_temp("auto_max_age", -100000):
ls = iers.LeapSeconds.auto_open(
["erfa", iers.IERS_LEAP_SECOND_FILE, fake_file]
)
assert ls.expires == Time("2345-06-28", scale="tai")
assert ls.meta["data_url"] == str(fake_file)
# And as URL
fake_url = "file:" + urllib.request.pathname2url(fake_file)
ls2 = iers.LeapSeconds.auto_open(
["erfa", iers.IERS_LEAP_SECOND_FILE, fake_url]
)
assert ls2.expires == Time("2345-06-28", scale="tai")
assert ls2.meta["data_url"] == str(fake_url)
def test_fake_expired_file(self, tmp_path):
fake_file1 = make_fake_file("28 June 2010", tmp_path)
fake_file2 = make_fake_file("27 June 2012", tmp_path)
# Between these and the built-in one, the built-in file is best.
ls = iers.LeapSeconds.auto_open(
[fake_file1, fake_file2, iers.IERS_LEAP_SECOND_FILE]
)
assert ls.meta["data_url"] == iers.IERS_LEAP_SECOND_FILE
# But if we remove the built-in one, the least expired one will be
# used and we get a warning that it is stale.
with pytest.warns(iers.IERSStaleWarning):
ls2 = iers.LeapSeconds.auto_open([fake_file1, fake_file2])
assert ls2.meta["data_url"] == fake_file2
assert ls2.expires == Time("2012-06-27", scale="tai")
# Use the fake files to make sure auto_max_age is safe.
# Should have no warning in either example.
with iers.conf.set_temp("auto_max_age", None):
ls3 = iers.LeapSeconds.auto_open([fake_file1, iers.IERS_LEAP_SECOND_FILE])
assert ls3.meta["data_url"] == iers.IERS_LEAP_SECOND_FILE
with iers.conf.set_temp("auto_max_age", None):
ls4 = iers.LeapSeconds.auto_open([fake_file1, fake_file2])
assert ls4.meta["data_url"] == fake_file2
@pytest.mark.remote_data
class TestRemoteURLs:
def setup_class(cls):
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail.
iers.conf.auto_download = True
def teardown_class(cls):
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
# In these tests, the results may be cached.
# This is fine - no need to download again.
def test_iers_url(self):
ls = iers.LeapSeconds.auto_open([iers.IERS_LEAP_SECOND_URL])
assert ls.expires > Time.now()
def test_ietf_url(self):
ls = iers.LeapSeconds.auto_open([iers.IETF_LEAP_SECOND_URL])
assert ls.expires > Time.now()
class TestDefaultAutoOpen:
"""Test auto_open with different _auto_open_files."""
def setup_method(self):
# Identical to what is used in LeapSeconds.auto_open().
self.good_enough = iers.LeapSeconds._today() + TimeDelta(
180 - iers._none_to_float(iers.conf.auto_max_age), format="jd"
)
self._auto_open_files = iers.LeapSeconds._auto_open_files.copy()
def teardown_method(self):
iers.LeapSeconds._auto_open_files = self._auto_open_files
def remove_auto_open_files(self, *files):
"""Remove some files from the auto-opener.
The default set is restored in teardown.
"""
for f in files:
iers.LeapSeconds._auto_open_files.remove(f)
def test_erfa_found(self):
# Set huge maximum age such that whatever ERFA has is OK.
# Since it is checked first, it should thus be found.
with iers.conf.set_temp("auto_max_age", 100000):
ls = iers.LeapSeconds.open()
assert ls.meta["data_url"] == "erfa"
def test_builtin_found(self):
# Set huge maximum age such that built-in file is always OK.
# If we remove 'erfa', it should thus be found.
self.remove_auto_open_files("erfa")
with iers.conf.set_temp("auto_max_age", 100000):
ls = iers.LeapSeconds.open()
assert ls.meta["data_url"] == iers.IERS_LEAP_SECOND_FILE
# The test below is marked remote_data only to ensure it runs
# as an allowed-fail job on CI: i.e., we will notice it (eventually)
# but will not be misled in thinking that a PR is bad.
@pytest.mark.remote_data
def test_builtin_not_expired(self):
# TODO: would be nice to have automatic PRs for this!
ls = iers.LeapSeconds.open(iers.IERS_LEAP_SECOND_FILE)
assert ls.expires > self.good_enough, (
"The leap second file built in to astropy is expired. Fix with:\n"
"cd astropy/utils/iers/data/; . update_builtin_iers.sh\n"
"and commit as a PR (for details, see release procedure)."
)
def test_fake_future_file(self, tmp_path):
fake_file = make_fake_file("28 June 2345", tmp_path)
# Try as system file for auto_open, setting auto_max_age such
# that any ERFA or system files are guaranteed to be expired.
with iers.conf.set_temp("auto_max_age", -100000), iers.conf.set_temp(
"system_leap_second_file", fake_file
):
ls = iers.LeapSeconds.open()
assert ls.expires == Time("2345-06-28", scale="tai")
assert ls.meta["data_url"] == str(fake_file)
# And as URL
fake_url = "file:" + urllib.request.pathname2url(fake_file)
with iers.conf.set_temp("auto_max_age", -100000), iers.conf.set_temp(
"iers_leap_second_auto_url", fake_url
):
ls2 = iers.LeapSeconds.open()
assert ls2.expires == Time("2345-06-28", scale="tai")
assert ls2.meta["data_url"] == str(fake_url)
def test_fake_expired_file(self, tmp_path):
self.remove_auto_open_files(
"erfa", "iers_leap_second_auto_url", "ietf_leap_second_auto_url"
)
fake_file = make_fake_file("28 June 2010", tmp_path)
with iers.conf.set_temp("system_leap_second_file", fake_file):
# If we try this directly, the built-in file will be found.
ls = iers.LeapSeconds.open()
assert ls.meta["data_url"] == iers.IERS_LEAP_SECOND_FILE
# But if we remove the built-in one, the expired one will be
# used and we get a warning that it is stale.
self.remove_auto_open_files(iers.IERS_LEAP_SECOND_FILE)
with pytest.warns(iers.IERSStaleWarning):
ls2 = iers.LeapSeconds.open()
assert ls2.meta["data_url"] == fake_file
assert ls2.expires == Time("2010-06-28", scale="tai")
@pytest.mark.skipif(
not os.path.isfile(SYSTEM_FILE), reason=f"system does not have {SYSTEM_FILE}"
)
def test_system_file_used_if_not_expired(self, tmp_path):
# We skip the test if the system file is on a CI and is expired -
# we should not depend on CI keeping it up to date, but if it is,
# we should check that it is used if possible.
if iers.LeapSeconds.open(SYSTEM_FILE).expires <= self.good_enough:
pytest.skip("System leap second file is expired.")
self.remove_auto_open_files("erfa")
with iers.conf.set_temp("system_leap_second_file", SYSTEM_FILE):
ls = iers.LeapSeconds.open()
assert ls.expires > self.good_enough
assert ls.meta["data_url"] in (iers.IERS_LEAP_SECOND_FILE, SYSTEM_FILE)
# Also check with a "built-in" file that is expired
fake_file = make_fake_file("28 June 2017", tmp_path)
iers.LeapSeconds._auto_open_files[0] = fake_file
ls2 = iers.LeapSeconds.open()
assert ls2.expires > Time.now()
assert ls2.meta["data_url"] == SYSTEM_FILE
@pytest.mark.remote_data
def test_auto_open_urls_always_good_enough(self):
# Avoid using the erfa, built-in and system files, as they might
# be good enough already.
try:
# Need auto_download so that IERS_B won't be loaded and
# cause tests to fail.
iers.conf.auto_download = True
self.remove_auto_open_files(
"erfa", iers.IERS_LEAP_SECOND_FILE, "system_leap_second_file"
)
ls = iers.LeapSeconds.open()
assert ls.expires > self.good_enough
assert ls.meta["data_url"].startswith("http")
finally:
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class ERFALeapSecondsSafe:
"""Base class for tests that change the ERFA leap-second tables.
It ensures the original state is restored.
"""
def setup_method(self):
# Keep current leap-second table and expiration.
self.erfa_ls = self._erfa_ls = erfa.leap_seconds.get()
self.erfa_expires = self._expires = erfa.leap_seconds._expires
def teardown_method(self):
# Restore leap-second table and expiration.
erfa.leap_seconds.set(self.erfa_ls)
erfa.leap_seconds._expires = self._expires
class TestFromERFA(ERFALeapSecondsSafe):
def test_get_erfa_ls(self):
ls = iers.LeapSeconds.from_erfa()
assert ls.colnames == ["year", "month", "tai_utc"]
assert isinstance(ls.expires, Time)
assert ls.expires == self.erfa_expires
ls_array = np.array(ls["year", "month", "tai_utc"])
assert np.all(ls_array == self.erfa_ls)
def test_get_built_in_erfa_ls(self):
ls = iers.LeapSeconds.from_erfa(built_in=True)
assert ls.colnames == ["year", "month", "tai_utc"]
assert isinstance(ls.expires, Time)
ls_array = np.array(ls["year", "month", "tai_utc"])
assert np.all(ls_array == self.erfa_ls[: len(ls_array)])
def test_get_modified_erfa_ls(self):
erfa.leap_seconds.set(self.erfa_ls[:-10])
ls = iers.LeapSeconds.from_erfa()
assert len(ls) == len(self.erfa_ls) - 10
ls_array = np.array(ls["year", "month", "tai_utc"])
assert np.all(ls_array == self.erfa_ls[:-10])
ls2 = iers.LeapSeconds.from_erfa(built_in=True)
assert len(ls2) > len(ls)
erfa.leap_seconds.set(None)
erfa_built_in = erfa.leap_seconds.get()
assert len(ls2) == len(erfa_built_in)
ls2_array = np.array(ls2["year", "month", "tai_utc"])
assert np.all(ls2_array == erfa_built_in)
def test_open(self):
ls = iers.LeapSeconds.open("erfa")
assert isinstance(ls.expires, Time)
assert ls.expires == self.erfa_expires
ls_array = np.array(ls["year", "month", "tai_utc"])
assert np.all(ls_array == self.erfa_ls)
class TestUpdateLeapSeconds(ERFALeapSecondsSafe):
def setup_method(self):
super().setup_method()
# Read default leap second table.
self.ls = iers.LeapSeconds.from_iers_leap_seconds()
# For tests, reset ERFA table to built-in default.
erfa.leap_seconds.set()
self.erfa_ls = erfa.leap_seconds.get()
def test_built_in_up_to_date(self):
"""Leap second should match between built-in and ERFA."""
erfa_since_1970 = self.erfa_ls[self.erfa_ls["year"] > 1970]
assert len(self.ls) >= len(erfa_since_1970), "built-in leap seconds out of date"
assert len(self.ls) <= len(erfa_since_1970), "ERFA leap seconds out of date"
overlap = np.array(self.ls["year", "month", "tai_utc"])
assert np.all(overlap == erfa_since_1970.astype(overlap.dtype))
def test_update_with_built_in(self):
"""An update with built-in should not do anything."""
n_update = self.ls.update_erfa_leap_seconds()
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert np.all(new_erfa_ls == self.erfa_ls)
@pytest.mark.parametrize("n_short", (1, 3))
def test_update(self, n_short):
"""Check whether we can recover removed leap seconds."""
erfa.leap_seconds.set(self.erfa_ls[:-n_short])
n_update = self.ls.update_erfa_leap_seconds()
assert n_update == n_short
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
# Check that a second update does not do anything.
n_update2 = self.ls.update_erfa_leap_seconds()
assert n_update2 == 0
new_erfa_ls2 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls2, self.erfa_ls)
def test_update_initialize_erfa(self):
# With pre-initialization, update does nothing.
erfa.leap_seconds.set(self.erfa_ls[:-2])
n_update = self.ls.update_erfa_leap_seconds(initialize_erfa=True)
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
def test_update_overwrite(self):
n_update = self.ls.update_erfa_leap_seconds(initialize_erfa="empty")
assert n_update == len(self.ls)
new_erfa_ls = erfa.leap_seconds.get()
assert new_erfa_ls["year"].min() > 1970
n_update2 = self.ls.update_erfa_leap_seconds()
assert n_update2 == 0
new_erfa_ls2 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls2, new_erfa_ls)
n_update3 = self.ls.update_erfa_leap_seconds(initialize_erfa=True)
assert n_update3 == 0
new_erfa_ls3 = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls3, self.erfa_ls)
def test_bad_jump(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad["tai_utc"][-1] = 5
with pytest.raises(ValueError, match="jump"):
bad.update_erfa_leap_seconds()
# With an error the ERFA table should not change.
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls[:-2])
# Unless we initialized it beforehand.
with pytest.raises(ValueError, match="jump"):
bad.update_erfa_leap_seconds(initialize_erfa=True)
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls)
# Of course, we get no errors if we initialize only.
erfa.leap_seconds.set(self.erfa_ls[:-2])
n_update = bad.update_erfa_leap_seconds(initialize_erfa="only")
assert n_update == 0
new_erfa_ls = erfa.leap_seconds.get()
assert_array_equal(new_erfa_ls, self.erfa_ls)
def test_bad_day(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad["day"][-1] = 5
with pytest.raises(ValueError, match="not on 1st"):
bad.update_erfa_leap_seconds()
def test_bad_month(self):
erfa.leap_seconds.set(self.erfa_ls[:-2])
bad = self.ls.copy()
bad["month"][-1] = 5
with pytest.raises(ValueError, match="January"):
bad.update_erfa_leap_seconds()
assert_array_equal(erfa.leap_seconds.get(), self.erfa_ls[:-2])
|
da16f65ab44e683f4b5f8c085d8473f7d7af1573e5b64bc976b70ee26c9d8774 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from pathlib import Path
import numpy as np
import pytest
from astropy import units as u
from astropy.config import set_temp_cache
from astropy.table import QTable
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
CI = os.environ.get("CI", False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, "FileNotFoundError", OSError)
try:
iers.IERS_A.open("finals2000A.all") # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join("data", "iers_a_excerpt"))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic:
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize("iers_cls", (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(
ut1_utc,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.0)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0.0, return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format="jd", scale="utc")
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(
ut1_utc3,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open("surely this does not exist")
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt:
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab["UT1_UTC"].unit / u.second).is_unity()
assert "P" in iers_tab["UT1Flag"]
assert "I" in iers_tab["UT1Flag"]
assert "B" in iers_tab["UT1Flag"]
assert np.all(
(iers_tab["UT1Flag"] == "I")
| (iers_tab["UT1Flag"] == "P")
| (iers_tab["UT1Flag"] == "B")
)
assert (iers_tab["dX_2000A"].unit / u.marcsec).is_unity()
assert (iers_tab["dY_2000A"].unit / u.marcsec).is_unity()
assert "P" in iers_tab["NutFlag"]
assert "I" in iers_tab["NutFlag"]
assert "B" in iers_tab["NutFlag"]
assert np.all(
(iers_tab["NutFlag"] == "P")
| (iers_tab["NutFlag"] == "I")
| (iers_tab["NutFlag"] == "B")
)
assert (iers_tab["PM_x"].unit / u.arcsecond).is_unity()
assert (iers_tab["PM_y"].unit / u.arcsecond).is_unity()
assert "P" in iers_tab["PolPMFlag"]
assert "I" in iers_tab["PolPMFlag"]
assert "B" in iers_tab["PolPMFlag"]
assert np.all(
(iers_tab["PolPMFlag"] == "P")
| (iers_tab["PolPMFlag"] == "I")
| (iers_tab["PolPMFlag"] == "B")
)
t = Time([57053.0, 57054.0, 57055.0], format="mjd")
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(
ut1_utc, [-0.4916557, -0.4925323, -0.4934373] * u.s, atol=0.1 * u.ms
)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(
dcip_x, [-0.086, -0.093, -0.087] * u.marcsec, atol=1.0 * u.narcsec
)
assert_quantity_allclose(
dcip_y, [0.094, 0.081, 0.072] * u.marcsec, atol=1 * u.narcsec
)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(
pm_x, [0.003734, 0.004581, 0.004623] * u.arcsec, atol=0.1 * u.marcsec
)
assert_quantity_allclose(
pm_y, [0.310824, 0.313150, 0.315517] * u.arcsec, atol=0.1 * u.marcsec
)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif(not HAS_IERS_A, reason="requires IERS_A")
class TestIERS_A:
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0.0, 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(
ut1_utc,
[-0.5868211, -0.5868184, -0.5868184, 0.4131816, 0.41328895] * u.s,
atol=0.1 * u.ms,
)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.0, return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.0
class TestIERS_Auto:
def setup_class(self):
"""Set up useful data for the tests."""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-02-30-test")
)
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join("data", "finals2000A-2016-04-30-test")
)
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format="jd") * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test."""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("iers_auto_url_mirror", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", self.ame):
with pytest.raises(
ValueError,
match=re.escape(iers.INTERPOLATE_ERROR.format(self.ame)),
):
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter("ignore", iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227] * self.N) * u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced."""
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
with iers.conf.set_temp("auto_max_age", 5.0):
with pytest.raises(
ValueError,
match=(
r"IERS auto_max_age configuration value must be larger than 10"
r" days"
),
):
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
def test_no_auto_download(self):
with iers.conf.set_temp("auto_download", False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta["predictive_mjd"]
dat._time_now = Time(predictive_mjd, format="mjd") + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1293286
)
assert np.allclose(
dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.2246227
)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format="mjd") + 60 * u.d
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1293286
)
with pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
) as warns, pytest.raises(
ValueError,
match="interpolating from IERS_Auto using predictive values",
):
dat.ut1_utc(Time(60000, format="mjd").jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(
iers.IERSStaleWarning, match="IERS_Auto predictive values are older"
) as warns:
dat.ut1_utc(Time(60000, format="mjd").jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp("auto_max_age", None):
dat.ut1_utc(Time(60000, format="mjd").jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp("iers_auto_url", self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(
dat.ut1_utc(Time(50000, format="mjd").jd).value, 0.1293286
)
assert np.allclose(dat.ut1_utc(Time(60000, format="mjd").jd).value, -0.3)
# Now the time range should be different.
assert dat["MJD"][0] == 57359.0 * u.d
assert dat["MJD"][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A],
B[name][i_B],
rtol=1e-15,
err_msg=(
f"Bug #9206 IERS B parameter {name} not copied over "
"correctly to IERS Auto"
),
)
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert "UT1_UTC_A" in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert "UT1_UTC" in iersb_tab.colnames
finally:
iers.IERS_B.close()
@pytest.mark.remote_data
def test_iers_out_of_range_handling(tmp_path):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmp_path):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
with iers.conf.set_temp("auto_download", False):
# Should be fine with built-in IERS_B
(now - 300 * u.day).ut1
# Default is to raise an error
match = r"\(some\) times are outside of range covered by IERS table"
with pytest.raises(iers.IERSRangeError, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "warn"):
with pytest.warns(iers.IERSDegradedAccuracyWarning, match=match):
(now + 100 * u.day).ut1
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 100 * u.day).ut1
@pytest.mark.remote_data
def test_iers_download_error_handling(tmp_path):
# Make sure we don't have IERS-A data available anywhere
with set_temp_cache(tmp_path):
iers.IERS_A.close()
iers.IERS_Auto.close()
iers.IERS.close()
now = Time.now()
# bad site name
with iers.conf.set_temp("iers_auto_url", "FAIL FAIL"):
# site that exists but doesn't have IERS data
with iers.conf.set_temp("iers_auto_url_mirror", "https://google.com"):
with pytest.warns(iers.IERSWarning) as record:
with iers.conf.set_temp("iers_degraded_accuracy", "ignore"):
(now + 100 * u.day).ut1
assert len(record) == 3
assert str(record[0].message).startswith(
"failed to download FAIL FAIL: Malformed URL"
)
assert str(record[1].message).startswith(
"malformed IERS table from https://google.com"
)
assert str(record[2].message).startswith(
"unable to download valid IERS file, using local IERS-B"
)
|
56afd7938e4d78ec021f03959ebbc280e744b558f993c85baed1749f1a216c52 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
# SYSTEM
import io
import zlib
from astropy.utils.xml.iterparser import _fast_iterparse
# The C-based XML parser for VOTables previously used fixed-sized
# buffers (allocated at __init__() time). This test will
# only pass with the patch that allows a dynamic realloc() of
# the queue. This addresses the bugs:
#
# - "RuntimeError: XML queue overflow"
# https://github.com/astropy/astropy/issues/5824
# (Kudos to Stefan Becker---ARI/ZAH Heidelberg)
#
# - "iterparse.c: add queue_realloc() + move 'buffersize / 2' logic there"
# https://github.com/astropy/astropy/issues/5869
#
# This test code can emulate a combination of network buffering and
# gzip decompression---with different request sizes, it can be used to
# demonstrate both under-reading and over-reading.
#
# Using the 512-tag VOTABLE XML sample input, and various combinations
# of minimum/maximum fetch sizes, the following situations can be
# generated:
#
# maximum_fetch = 1 (ValueError, no element found) still within gzip headers
# maximum_fetch = 80 (ValueError, unclosed token) short read
# maximum_fetch =217 passes, because decompressed_length > requested
# && <512 tags in a single parse
# maximum_fetch =218 (RuntimeError, XML queue overflow)
#
# The test provided here covers the over-reading identified in #5824
# (equivalent to the 217).
# Firstly, assemble a minimal VOTABLE header, table contents and footer.
# This is done in textual form, as the aim is to only test the parser, not
# the outputter!
HEADER = """<?xml version="1.0" encoding="UTF-8"?>
<VOTABLE>
<RESOURCE type="results">
<TABLE>
<FIELD ID="foo" name="foo" datatype="int" arraysize="1"/>
<DATA>
<TABLEDATA>
"""
ROW = """<TR><TD>0</TD></TR>
"""
FOOTER = """
</TABLEDATA>
</DATA>
</TABLE>
</RESOURCE>
</VOTABLE>
"""
# minimum passable buffer size => 1024
# 1024 / 2 => 512 tags for overflow
# 512 - 7 tags in header, - 5 tags in footer = 500 tags required for overflow
# 500 / 4 tags (<tr><td></td></tr>) per row == 125 rows required for overflow
VOTABLE_XML = HEADER + 125 * ROW + FOOTER
# UngzipFileWrapper() wraps an existing file-like Object,
# decompressing the content and returning the plaintext.
# This therefore emulates the behavior of the Python 'requests'
# library when transparently decompressing Gzip HTTP responses.
#
# The critical behavior is that---because of the
# decompression---read() can return considerably more
# bytes than were requested! (But, read() can also return less).
#
# inspiration:
# http://stackoverflow.com/questions/4013843/how-to-wrap-file-object-read-and-write-operation-which-are-readonly
class UngzipFileWrapper:
def __init__(self, fd, **kwargs):
self._file = fd
self._z = zlib.decompressobj(16 + zlib.MAX_WBITS)
def read(self, requested_length):
# emulate network buffering dynamics by clamping the read size
clamped_length = max(1, min(1 << 24, requested_length))
compressed = self._file.read(clamped_length)
plaintext = self._z.decompress(compressed)
# Only for real local files---just for the testcase
if len(compressed) == 0:
self.close()
return plaintext
def __getattr__(self, attr):
return getattr(self._file, attr)
# test_iterparser_over_read_simple() is a very cut down test,
# of the original more flexible test-case, but without external
# dependencies. The plaintext is compressed and then decompressed
# to provide a better emulation of the original situation where
# the bug was observed.
#
# If a dependency upon 'zlib' is not desired, it would be possible to
# simplify this testcase by replacing the compress/decompress with a
# read() method emulation that always returned more from a buffer
# that was requested.
def test_iterparser_over_read_simple():
# Take the plaintext of 512 tags, and compression it with a
# Gzip-style header (+16), to most closely emulate the behavior
# of most HTTP servers.
zlib_GZIP_STYLE_HEADER = 16
compo = zlib.compressobj(
zlib.Z_BEST_COMPRESSION, zlib.DEFLATED, zlib.MAX_WBITS + zlib_GZIP_STYLE_HEADER
)
# Bytes vs. String .encode()/.decode() for compatibility with Python 3.5.
s = compo.compress(VOTABLE_XML.encode())
s = s + compo.flush()
fd = io.BytesIO(s)
fd.seek(0)
# Finally setup the test of the C-based '_fast_iterparse()' iterator
# and a situation in which it can be called a-la the VOTable Parser.
MINIMUM_REQUESTABLE_BUFFER_SIZE = 1024
uncompressed_fd = UngzipFileWrapper(fd)
iterable = _fast_iterparse(uncompressed_fd.read, MINIMUM_REQUESTABLE_BUFFER_SIZE)
list(iterable)
|
886a393d95015a64cbf5529ade487319feb4c54e0f31d756828d36e6b81130e5 | from astropy.utils.data import get_pkg_data_filename
def get_data_filename():
return get_pkg_data_filename("data/foo.txt")
|
8bd0b0dd174c9b6d9d02325a0554d1af4d8a82eaf65d1e5711b4ac41ce514de0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.table import QTable, hstack, join, vstack
from astropy.utils.compat.optional_deps import HAS_H5PY
from astropy.utils.masked import Masked
from .test_masked import assert_masked_equal
FILE_FORMATS = ["ecsv", "fits"]
if HAS_H5PY:
FILE_FORMATS.append("h5")
class MaskedArrayTableSetup:
@classmethod
def setup_arrays(self):
self.a = np.array([3.0, 5.0, 0.0])
self.mask_a = np.array([True, False, False])
@classmethod
def setup_class(self):
self.setup_arrays()
self.ma = Masked(self.a, mask=self.mask_a)
self.ma.info.format = ".1f"
self.t = QTable([self.ma], names=["ma"])
class MaskedQuantityTableSetup(MaskedArrayTableSetup):
@classmethod
def setup_arrays(self):
self.a = np.array([3.0, 5.0, 0.0]) << u.m
self.mask_a = np.array([True, False, False])
class TestMaskedArrayTable(MaskedArrayTableSetup):
def test_table_initialization(self):
assert_array_equal(self.t["ma"].unmasked, self.a)
assert_array_equal(self.t["ma"].mask, self.mask_a)
assert repr(self.t).splitlines()[-3:] == [
" ———",
" 5.0",
" 0.0",
]
def test_info_basics(self):
assert self.t["ma"].info.name == "ma"
assert "serialize_method" in self.t["ma"].info.attr_names
t2 = self.t.copy()
t2["ma"].info.format = ".2f"
t2["ma"].info.serialize_method["fits"] = "nonsense"
assert repr(t2).splitlines()[-3:] == [
" ———",
" 5.00",
" 0.00",
]
# Check that if we slice, things get copied over correctly.
t3 = t2[:2]
assert t3["ma"].info.name == "ma"
assert t3["ma"].info.format == ".2f"
assert "serialize_method" in t3["ma"].info.attr_names
assert t3["ma"].info.serialize_method["fits"] == "nonsense"
@pytest.mark.parametrize("file_format", FILE_FORMATS)
def test_table_write(self, file_format, tmp_path):
name = tmp_path / f"a.{file_format}"
kwargs = {}
if file_format == "h5":
kwargs["path"] = "trial"
kwargs["serialize_meta"] = True
self.t.write(name, **kwargs)
t2 = QTable.read(name)
assert isinstance(t2["ma"], self.ma.__class__)
assert np.all(t2["ma"] == self.ma)
assert np.all(t2["ma"].mask == self.mask_a)
if file_format == "fits":
# Imperfect roundtrip through FITS native format description.
assert self.t["ma"].info.format in t2["ma"].info.format
else:
assert t2["ma"].info.format == self.t["ma"].info.format
@pytest.mark.parametrize("serialize_method", ["data_mask", "null_value"])
def test_table_write_serialization(self, serialize_method, tmp_path):
name = tmp_path / "test.ecsv"
self.t.write(name, serialize_method=serialize_method)
with open(name) as fh:
lines = fh.readlines()
t2 = QTable.read(name)
assert isinstance(t2["ma"], self.ma.__class__)
if serialize_method == "data_mask":
# Will data_mask, we have data and mask columns and should
# exactly round-trip.
assert len(lines[-1].split()) == 2
assert_masked_equal(t2["ma"], self.ma)
else:
# With null_value we have just a data column with null values
# marked, so we lost information on the data below the mask.
assert len(lines[-1].split()) == 1
assert np.all(t2["ma"] == self.ma)
assert np.all(t2["ma"].mask == self.mask_a)
def test_non_existing_serialize_method(self, tmp_path):
name = tmp_path / "bad.ecsv"
with pytest.raises(ValueError, match="serialize method must be"):
self.t.write(name, serialize_method="bad_serialize_method")
class TestMaskedQuantityTable(TestMaskedArrayTable, MaskedQuantityTableSetup):
# Runs tests from TestMaskedArrayTable as well as some extra ones.
def test_table_operations_requiring_masking(self):
t1 = self.t
t2 = QTable({"ma2": Masked([1, 2] * u.m)})
t12 = hstack([t1, t2], join_type="outer")
assert np.all(t12["ma"].mask == [True, False, False])
# 'ma2' is shorter by one so we expect one True from hstack so length matches
assert np.all(t12["ma2"].mask == [False, False, True])
t12 = hstack([t1, t2], join_type="inner")
assert np.all(t12["ma"].mask == [True, False])
assert np.all(t12["ma2"].mask == [False, False])
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type="outer")
# ma ma2
# m m
# --- ---
# —— ——
# 5.0 ——
# 0.0 ——
# —— 1.0
# —— 2.0
assert np.all(t12["ma"].mask == [True, False, False, True, True])
assert np.all(t12["ma2"].mask == [True, True, True, False, False])
def test_table_operations_requiring_masking_auto_promote(self):
MaskedQuantity = Masked(u.Quantity)
t1 = QTable({"ma1": [1, 2] * u.m})
t2 = QTable({"ma2": [3, 4, 5] * u.m})
t12 = hstack([t1, t2], join_type="outer")
assert isinstance(t12["ma1"], MaskedQuantity)
assert np.all(t12["ma1"].mask == [False, False, True])
assert np.all(t12["ma1"] == [1, 2, 0] * u.m)
assert not isinstance(t12["ma2"], MaskedQuantity)
assert isinstance(t12["ma2"], u.Quantity)
assert np.all(t12["ma2"] == [3, 4, 5] * u.m)
t12 = hstack([t1, t2], join_type="inner")
assert isinstance(t12["ma1"], u.Quantity)
assert not isinstance(t12["ma1"], MaskedQuantity)
assert isinstance(t12["ma2"], u.Quantity)
assert not isinstance(t12["ma2"], MaskedQuantity)
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type="outer")
assert np.all(t12["ma1"].mask == [False, False, True, True, True])
assert np.all(t12["ma2"].mask == [True, True, False, False, False])
t1["a"] = [1, 2]
t2["a"] = [1, 3, 4]
t12 = join(t1, t2, join_type="outer")
assert np.all(t12["ma1"].mask == [False, False, True, True])
assert np.all(t12["ma2"].mask == [False, True, False, False])
|
9d33b81c4e2cdad0b8493a201daedd08b87bb74da6e10ad88c116526414adf19 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test numpy functions and ufuncs on Masked arrays and quantities.
The tests here are fairly detailed but do not aim for complete
coverage. Complete coverage of all numpy functions is done
with less detailed tests in test_function_helpers.
"""
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.masked.core import Masked
from .test_masked import (
LongitudeSetup,
MaskedArraySetup,
QuantitySetup,
assert_masked_equal,
)
class MaskedUfuncTests(MaskedArraySetup):
@pytest.mark.parametrize(
"ufunc", (np.add, np.subtract, np.divide, np.arctan2, np.minimum)
)
def test_2op_ufunc(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize(
"ufunc", (np.add, np.subtract, np.divide, np.arctan2, np.minimum)
)
def test_ufunc_inplace(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
out = Masked(np.zeros_like(ma_mb.unmasked))
result = ufunc(self.ma, self.mb, out=out)
assert result is out
assert_masked_equal(result, ma_mb)
def test_ufunc_inplace_no_masked_input(self):
a_b = np.add(self.a, self.b)
out = Masked(np.zeros_like(a_b))
result = np.add(self.a, self.b, out=out)
assert result is out
assert_array_equal(result.unmasked, a_b)
assert_array_equal(result.mask, np.zeros(a_b.shape, bool))
def test_ufunc_inplace_error(self):
out = np.zeros(self.ma.shape)
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
@pytest.mark.parametrize("ufunc", (np.add.outer, np.minimum.outer))
def test_2op_ufunc_outer(self, ufunc):
ma_mb = ufunc(self.ma, self.mb)
expected_data = ufunc(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b)
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
def test_3op_ufunc(self):
ma_mb = np.clip(self.ma, self.b, self.c)
expected_data = np.clip(self.a, self.b, self.c)
expected_mask = self.mask_a
assert_array_equal(ma_mb.unmasked, expected_data)
assert_array_equal(ma_mb.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_add_reduce(self, axis):
ma_reduce = np.add.reduce(self.ma, axis=axis)
expected_data = np.add.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
out = Masked(np.zeros_like(ma_reduce.unmasked), np.ones_like(ma_reduce.mask))
ma_reduce2 = np.add.reduce(self.ma, axis=axis, out=out)
assert ma_reduce2 is out
assert_masked_equal(ma_reduce2, ma_reduce)
def test_add_reduce_no_masked_input(self):
a_reduce = np.add.reduce(self.a, axis=0)
out = Masked(np.zeros_like(a_reduce), np.ones(a_reduce.shape, bool))
result = np.add.reduce(self.a, axis=0, out=out)
assert result is out
assert_array_equal(out.unmasked, a_reduce)
assert_array_equal(out.mask, np.zeros(a_reduce.shape, bool))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_minimum_reduce(self, axis):
ma_reduce = np.minimum.reduce(self.ma, axis=axis)
expected_data = np.minimum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_maximum_reduce(self, axis):
ma_reduce = np.maximum.reduce(self.ma, axis=axis)
expected_data = np.maximum.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
class TestMaskedArrayUfuncs(MaskedUfuncTests):
# multiply.reduce does not work with units, so test only for plain array.
@pytest.mark.parametrize("axis", (0, 1, None))
def test_multiply_reduce(self, axis):
ma_reduce = np.multiply.reduce(self.ma, axis=axis)
expected_data = np.multiply.reduce(self.a, axis=axis)
expected_mask = np.logical_or.reduce(self.ma.mask, axis=axis)
assert_array_equal(ma_reduce.unmasked, expected_data)
assert_array_equal(ma_reduce.mask, expected_mask)
def test_ufunc_not_implemented_for_other(self):
"""
If the unmasked operation returns NotImplemented, this
should lead to a TypeError also for the masked version.
"""
a = np.array([1, 2])
b = 3 * u.m
with pytest.raises(TypeError):
a & b
ma = Masked(a)
with pytest.raises(TypeError):
ma & b
class TestMaskedQuantityUfuncs(MaskedUfuncTests, QuantitySetup):
def test_ufunc_inplace_error2(self):
out = Masked(np.zeros(self.ma.shape))
with pytest.raises(TypeError):
np.add(self.ma, self.mb, out=out)
class TestMaskedLongitudeUfuncs(MaskedUfuncTests, LongitudeSetup):
def test_ufunc_inplace_quantity_initial(self):
out = Masked(np.zeros(self.ma.shape) << u.m)
result = np.add(self.ma, self.mb, out=out)
assert result is out
expected = np.add(self.ma, self.mb).view(Quantity)
assert_masked_equal(result, expected)
class TestMaskedArrayConcatenation(MaskedArraySetup):
def test_concatenate(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.ma, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate((self.mask_a, self.mask_b[np.newaxis]), axis=0)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
def test_concatenate_not_all_masked(self):
mb = self.mb[np.newaxis]
concat_a_b = np.concatenate((self.a, mb), axis=0)
expected_data = np.concatenate((self.a, self.b[np.newaxis]), axis=0)
expected_mask = np.concatenate(
(np.zeros(self.a.shape, bool), self.mask_b[np.newaxis]), axis=0
)
assert_array_equal(concat_a_b.unmasked, expected_data)
assert_array_equal(concat_a_b.mask, expected_mask)
@pytest.mark.parametrize("obj", (1, slice(2, 3)))
def test_insert(self, obj):
mc_in_a = np.insert(self.ma, obj, self.mc, axis=-1)
expected = Masked(
np.insert(self.a, obj, self.c, axis=-1),
np.insert(self.mask_a, obj, self.mask_c, axis=-1),
)
assert_masked_equal(mc_in_a, expected)
def test_insert_masked_obj(self):
with pytest.raises(TypeError):
np.insert(self.ma, Masked(1, mask=False), self.mc, axis=-1)
def test_append(self):
mc_to_a = np.append(self.ma, self.mc, axis=-1)
expected = Masked(
np.append(self.a, self.c, axis=-1),
np.append(self.mask_a, self.mask_c, axis=-1),
)
assert_masked_equal(mc_to_a, expected)
class TestMaskedQuantityConcatenation(TestMaskedArrayConcatenation, QuantitySetup):
pass
class TestMaskedLongitudeConcatenation(TestMaskedArrayConcatenation, LongitudeSetup):
pass
class TestMaskedArrayBroadcast(MaskedArraySetup):
def test_broadcast_to(self):
shape = self.ma.shape
ba = np.broadcast_to(self.mb, shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_to_using_apply(self):
# Partially just to ensure we cover the relevant part of _apply.
shape = self.ma.shape
ba = self.mb._apply(np.broadcast_to, shape=shape, subok=True)
assert ba.shape == shape
assert ba.mask.shape == shape
expected = Masked(
np.broadcast_to(self.mb.unmasked, shape, subok=True),
np.broadcast_to(self.mb.mask, shape, subok=True),
)
assert_masked_equal(ba, expected)
def test_broadcast_arrays(self):
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=True)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=True)
bm = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c)
for mb_, b_, bm_ in zip(mb, b, bm):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, bm_)
def test_broadcast_arrays_not_all_masked(self):
mb = np.broadcast_arrays(self.a, self.mb, self.c, subok=True)
assert_array_equal(mb[0], self.a)
expected1 = np.broadcast_to(self.mb, self.a.shape, subok=True)
assert_masked_equal(mb[1], expected1)
expected2 = np.broadcast_to(self.c, self.a.shape, subok=True)
assert_array_equal(mb[2], expected2)
def test_broadcast_arrays_subok_false(self):
# subok affects ndarray subclasses but not masking itself.
mb = np.broadcast_arrays(self.ma, self.mb, self.mc, subok=False)
assert all(type(mb_.unmasked) is np.ndarray for mb_ in mb)
b = np.broadcast_arrays(self.a, self.b, self.c, subok=False)
mask_b = np.broadcast_arrays(self.mask_a, self.mask_b, self.mask_c, subok=False)
for mb_, b_, mask_ in zip(mb, b, mask_b):
assert_array_equal(mb_.unmasked, b_)
assert_array_equal(mb_.mask, mask_)
class TestMaskedQuantityBroadcast(TestMaskedArrayBroadcast, QuantitySetup):
pass
class TestMaskedLongitudeBroadcast(TestMaskedArrayBroadcast, LongitudeSetup):
pass
class TestMaskedArrayCalculation(MaskedArraySetup):
@pytest.mark.parametrize("n,axis", [(1, -1), (2, -1), (1, 0)])
def test_diff(self, n, axis):
mda = np.diff(self.ma, n=n, axis=axis)
expected_data = np.diff(self.a, n, axis)
nan_mask = np.zeros_like(self.a)
nan_mask[self.ma.mask] = np.nan
expected_mask = np.isnan(np.diff(nan_mask, n=n, axis=axis))
assert_array_equal(mda.unmasked, expected_data)
assert_array_equal(mda.mask, expected_mask)
def test_diff_explicit(self):
ma = Masked(
np.arange(8.0), [True, False, False, False, False, True, False, False]
)
mda = np.diff(ma)
assert np.all(mda.unmasked == 1.0)
assert np.all(mda.mask == [True, False, False, False, True, True, False])
mda = np.diff(ma, n=2)
assert np.all(mda.unmasked == 0.0)
assert np.all(mda.mask == [True, False, False, True, True, True])
class TestMaskedQuantityCalculation(TestMaskedArrayCalculation, QuantitySetup):
pass
class TestMaskedLongitudeCalculation(TestMaskedArrayCalculation, LongitudeSetup):
pass
class TestMaskedArraySorting(MaskedArraySetup):
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort1(self, axis):
ma_lexsort = np.lexsort((self.ma,), axis=axis)
filled = self.a.copy()
filled[self.mask_a] = 9e9
expected_data = filled.argsort(axis)
assert_array_equal(ma_lexsort, expected_data)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort2(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.ma, mb), axis=axis)
filled_a = self.ma.filled(9e9)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((filled_a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.ma), axis=axis)
expected_ba = np.lexsort((filled_b, filled_a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.ma], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
@pytest.mark.parametrize("axis", [-1, 0])
def test_lexsort_mix(self, axis):
mb = np.broadcast_to(-self.mb, self.ma.shape).copy()
mamb_lexsort = np.lexsort((self.a, mb), axis=axis)
filled_b = mb.filled(9e9)
expected_ab = np.lexsort((self.a, filled_b), axis=axis)
assert_array_equal(mamb_lexsort, expected_ab)
mbma_lexsort = np.lexsort((mb, self.a), axis=axis)
expected_ba = np.lexsort((filled_b, self.a), axis=axis)
assert_array_equal(mbma_lexsort, expected_ba)
mbma_lexsort2 = np.lexsort(np.stack([mb, self.a], axis=0), axis=axis)
assert_array_equal(mbma_lexsort2, expected_ba)
|
67484867e2f1c2771f424ce3324e7d2382788619234053cc788a1d78fdae2408 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test masked class initialization, methods, and operators.
Functions, including ufuncs, are tested in test_functions.py
"""
import operator
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import Longitude
from astropy.units import Quantity
from astropy.utils.compat import NUMPY_LT_1_22
from astropy.utils.masked import Masked, MaskedNDArray
def assert_masked_equal(a, b):
assert_array_equal(a.unmasked, b.unmasked)
assert_array_equal(a.mask, b.mask)
VARIOUS_ITEMS = [(1, 1), slice(None, 1), (), 1]
class ArraySetup:
_data_cls = np.ndarray
@classmethod
def setup_class(self):
self.a = np.arange(6.0).reshape(2, 3)
self.mask_a = np.array([[True, False, False], [False, True, False]])
self.b = np.array([-3.0, -2.0, -1.0])
self.mask_b = np.array([False, True, False])
self.c = np.array([[0.25], [0.5]])
self.mask_c = np.array([[False], [True]])
self.sdt = np.dtype([("a", "f8"), ("b", "f8")])
self.mask_sdt = np.dtype([("a", "?"), ("b", "?")])
self.sa = np.array(
[
[(1.0, 2.0), (3.0, 4.0)],
[(11.0, 12.0), (13.0, 14.0)],
],
dtype=self.sdt,
)
self.mask_sa = np.array(
[
[(True, True), (False, False)],
[(False, True), (True, False)],
],
dtype=self.mask_sdt,
)
self.sb = np.array([(1.0, 2.0), (-3.0, 4.0)], dtype=self.sdt)
self.mask_sb = np.array([(True, False), (False, False)], dtype=self.mask_sdt)
self.scdt = np.dtype([("sa", "2f8"), ("sb", "i8", (2, 2))])
self.sc = np.array(
[
([1.0, 2.0], [[1, 2], [3, 4]]),
([-1.0, -2.0], [[-1, -2], [-3, -4]]),
],
dtype=self.scdt,
)
self.mask_scdt = np.dtype([("sa", "2?"), ("sb", "?", (2, 2))])
self.mask_sc = np.array(
[
([True, False], [[False, False], [True, True]]),
([False, True], [[True, False], [False, True]]),
],
dtype=self.mask_scdt,
)
class QuantitySetup(ArraySetup):
_data_cls = Quantity
@classmethod
def setup_class(self):
super().setup_class()
self.a = Quantity(self.a, u.m)
self.b = Quantity(self.b, u.cm)
self.c = Quantity(self.c, u.km)
self.sa = Quantity(self.sa, u.m, dtype=self.sdt)
self.sb = Quantity(self.sb, u.cm, dtype=self.sdt)
class LongitudeSetup(ArraySetup):
_data_cls = Longitude
@classmethod
def setup_class(self):
super().setup_class()
self.a = Longitude(self.a, u.deg)
self.b = Longitude(self.b, u.deg)
self.c = Longitude(self.c, u.deg)
# Note: Longitude does not work on structured arrays, so
# leaving it as regular array (which just reruns some tests).
class TestMaskedArrayInitialization(ArraySetup):
def test_simple(self):
ma = Masked(self.a, mask=self.mask_a)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.a))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.a)
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_structured(self):
ma = Masked(self.sa, mask=self.mask_sa)
assert isinstance(ma, np.ndarray)
assert isinstance(ma, type(self.sa))
assert isinstance(ma, Masked)
assert_array_equal(ma.unmasked, self.sa)
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
def test_masked_ndarray_init():
# Note: as a straight ndarray subclass, MaskedNDArray passes on
# the arguments relevant for np.ndarray, not np.array.
a_in = np.arange(3, dtype=int)
m_in = np.array([True, False, False])
buff = a_in.tobytes()
# Check we're doing things correctly using regular ndarray.
a = np.ndarray(shape=(3,), dtype=int, buffer=buff)
assert_array_equal(a, a_in)
# Check with and without mask.
ma = MaskedNDArray((3,), dtype=int, mask=m_in, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, m_in)
ma = MaskedNDArray((3,), dtype=int, buffer=buff)
assert_array_equal(ma.unmasked, a_in)
assert_array_equal(ma.mask, np.zeros(3, bool))
def test_cannot_initialize_with_masked():
with pytest.raises(ValueError, match="cannot handle np.ma.masked"):
Masked(np.ma.masked)
def test_cannot_just_use_anything_with_a_mask_attribute():
class my_array(np.ndarray):
mask = True
a = np.array([1.0, 2.0]).view(my_array)
with pytest.raises(AttributeError, match="unmasked"):
Masked(a)
class TestMaskedClassCreation:
"""Try creating a MaskedList and subclasses.
By no means meant to be realistic, just to check that the basic
machinery allows it.
"""
@classmethod
def setup_class(self):
self._base_classes_orig = Masked._base_classes.copy()
self._masked_classes_orig = Masked._masked_classes.copy()
class MaskedList(Masked, list, base_cls=list, data_cls=list):
def __new__(cls, *args, mask=None, copy=False, **kwargs):
self = super().__new__(cls)
self._unmasked = self._data_cls(*args, **kwargs)
self.mask = mask
return self
# Need to have shape for basics to work.
@property
def shape(self):
return (len(self._unmasked),)
self.MaskedList = MaskedList
def teardown_class(self):
Masked._base_classes = self._base_classes_orig
Masked._masked_classes = self._masked_classes_orig
def test_setup(self):
assert issubclass(self.MaskedList, Masked)
assert issubclass(self.MaskedList, list)
assert Masked(list) is self.MaskedList
def test_masked_list(self):
ml = self.MaskedList(range(3), mask=[True, False, False])
assert ml.unmasked == [0, 1, 2]
assert_array_equal(ml.mask, np.array([True, False, False]))
ml01 = ml[:2]
assert ml01.unmasked == [0, 1]
assert_array_equal(ml01.mask, np.array([True, False]))
def test_from_list(self):
ml = Masked([1, 2, 3], mask=[True, False, False])
assert ml.unmasked == [1, 2, 3]
assert_array_equal(ml.mask, np.array([True, False, False]))
def test_masked_list_subclass(self):
class MyList(list):
pass
ml = MyList(range(3))
mml = Masked(ml, mask=[False, True, False])
assert isinstance(mml, Masked)
assert isinstance(mml, MyList)
assert isinstance(mml.unmasked, MyList)
assert mml.unmasked == [0, 1, 2]
assert_array_equal(mml.mask, np.array([False, True, False]))
assert Masked(MyList) is type(mml)
class TestMaskedNDArraySubclassCreation:
"""Test that masked subclasses can be created directly and indirectly."""
@classmethod
def setup_class(self):
class MyArray(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.asanyarray(*args, **kwargs).view(cls)
self.MyArray = MyArray
self.a = np.array([1.0, 2.0]).view(self.MyArray)
self.m = np.array([True, False], dtype=bool)
def teardown_method(self, method):
Masked._masked_classes.pop(self.MyArray, None)
def test_direct_creation(self):
assert self.MyArray not in Masked._masked_classes
mcls = Masked(self.MyArray)
assert issubclass(mcls, Masked)
assert issubclass(mcls, self.MyArray)
assert mcls.__name__ == "MaskedMyArray"
assert mcls.__doc__.startswith("Masked version of MyArray")
mms = mcls(self.a, mask=self.m)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
mcls = Masked(self.MyArray)
mms = mcls(self.a)
assert isinstance(mms, mcls)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, np.zeros(mms.shape, bool))
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
mcls = Masked(self.MyArray)
ma = masked_array(np.asarray(self.a), mask=self.m)
mms = mcls(ma)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_indirect_creation(self):
assert self.MyArray not in Masked._masked_classes
mms = Masked(self.a, mask=self.m)
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
assert self.MyArray in Masked._masked_classes
assert Masked(self.MyArray) is type(mms)
def test_can_initialize_with_masked_values(self):
mcls = Masked(self.MyArray)
mms = mcls(Masked(np.asarray(self.a), mask=self.m))
assert isinstance(mms, Masked)
assert isinstance(mms, self.MyArray)
assert_array_equal(mms.unmasked, self.a)
assert_array_equal(mms.mask, self.m)
def test_viewing(self):
mms = Masked(self.a, mask=self.m)
mms2 = mms.view()
assert type(mms2) is mms.__class__
assert_masked_equal(mms2, mms)
ma = mms.view(np.ndarray)
assert type(ma) is MaskedNDArray
assert_array_equal(ma.unmasked, self.a.view(np.ndarray))
assert_array_equal(ma.mask, self.m)
class TestMaskedQuantityInitialization(TestMaskedArrayInitialization, QuantitySetup):
def test_masked_quantity_class_init(self):
# TODO: class definitions should be more easily accessible.
mcls = Masked._masked_classes[self.a.__class__]
# This is not a very careful test.
mq = mcls([1.0, 2.0], mask=[True, False], unit=u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.value.mask == [True, False])
assert np.all(mq.mask == [True, False])
def test_masked_quantity_getting(self):
mcls = Masked._masked_classes[self.a.__class__]
MQ = Masked(Quantity)
assert MQ is mcls
def test_initialization_without_mask(self):
# Default for not giving a mask should be False.
MQ = Masked(Quantity)
mq = MQ([1.0, 2.0], u.s)
assert mq.unit == u.s
assert np.all(mq.value.unmasked == [1.0, 2.0])
assert np.all(mq.mask == [False, False])
@pytest.mark.parametrize("masked_array", [Masked, np.ma.MaskedArray])
def test_initialization_with_masked_values(self, masked_array):
MQ = Masked(Quantity)
a = np.array([1.0, 2.0])
m = np.array([True, False])
ma = masked_array(a, m)
mq = MQ(ma)
assert isinstance(mq, Masked)
assert isinstance(mq, Quantity)
assert_array_equal(mq.value.unmasked, a)
assert_array_equal(mq.mask, m)
class TestMaskSetting(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask["a"].any() and not ma.mask["b"].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask["a"].all() and ma.mask["b"].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(
ma.mask,
np.array([[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt),
)
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
class MaskedArraySetup(ArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
self.mc = Masked(self.c, mask=self.mask_c)
self.msa = Masked(self.sa, mask=self.mask_sa)
self.msb = Masked(self.sb, mask=self.mask_sb)
self.msc = Masked(self.sc, mask=self.mask_sc)
class TestViewing(MaskedArraySetup):
def test_viewing_as_new_type(self):
ma2 = self.ma.view(type(self.ma))
assert_masked_equal(ma2, self.ma)
ma3 = self.ma.view()
assert_masked_equal(ma3, self.ma)
def test_viewing_as_new_dtype(self):
# Not very meaningful, but possible...
ma2 = self.ma.view("c8")
assert_array_equal(ma2.unmasked, self.a.view("c8"))
assert_array_equal(ma2.mask, self.mask_a)
@pytest.mark.parametrize("new_dtype", ["2f4", "f8,f8,f8"])
def test_viewing_as_new_dtype_not_implemented(self, new_dtype):
# But cannot (yet) view in way that would need to create a new mask,
# even though that view is possible for a regular array.
check = self.a.view(new_dtype)
with pytest.raises(NotImplementedError, match="different.*size"):
self.ma.view(check.dtype)
def test_viewing_as_something_impossible(self):
with pytest.raises(TypeError):
# Use intp to ensure have the same size as object,
# otherwise we get a different error message
Masked(np.array([1, 2], dtype=np.intp)).view(Masked)
class TestMaskedArrayCopyFilled(MaskedArraySetup):
def test_copy(self):
ma_copy = self.ma.copy()
assert type(ma_copy) is type(self.ma)
assert_array_equal(ma_copy.unmasked, self.ma.unmasked)
assert_array_equal(ma_copy.mask, self.ma.mask)
assert not np.may_share_memory(ma_copy.unmasked, self.ma.unmasked)
assert not np.may_share_memory(ma_copy.mask, self.ma.mask)
@pytest.mark.parametrize("fill_value", (0, 1))
def test_filled(self, fill_value):
fill_value = fill_value * getattr(self.a, "unit", 1)
expected = self.a.copy()
expected[self.ma.mask] = fill_value
result = self.ma.filled(fill_value)
assert_array_equal(expected, result)
def test_filled_no_fill_value(self):
with pytest.raises(TypeError, match="missing 1 required"):
self.ma.filled()
@pytest.mark.parametrize("fill_value", [(0, 1), (-1, -1)])
def test_filled_structured(self, fill_value):
fill_value = np.array(fill_value, dtype=self.sdt)
if hasattr(self.sa, "unit"):
fill_value = fill_value << self.sa.unit
expected = self.sa.copy()
expected["a"][self.msa.mask["a"]] = fill_value["a"]
expected["b"][self.msa.mask["b"]] = fill_value["b"]
result = self.msa.filled(fill_value)
assert_array_equal(expected, result)
def test_flat(self):
ma_copy = self.ma.copy()
ma_flat = ma_copy.flat
# Check that single item keeps class and mask
ma_flat1 = ma_flat[1]
assert ma_flat1.unmasked == self.a.flat[1]
assert ma_flat1.mask == self.mask_a.flat[1]
# As well as getting items via iteration.
assert all(
(ma.unmasked == a and ma.mask == m)
for (ma, a, m) in zip(self.ma.flat, self.a.flat, self.mask_a.flat)
)
# check that flat works like a view of the real array
ma_flat[1] = self.b[1]
assert ma_flat[1] == self.b[1]
assert ma_copy[0, 1] == self.b[1]
class TestMaskedQuantityCopyFilled(TestMaskedArrayCopyFilled, QuantitySetup):
pass
class TestMaskedLongitudeCopyFilled(TestMaskedArrayCopyFilled, LongitudeSetup):
pass
class TestMaskedArrayShaping(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = (6,)
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match="cannot reshape"):
ma.shape = (5,)
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.0], [2.0]], self.a.shape), mask=self.mask_a)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma2.shape = (6,)
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(
self.a.copy(), mask=np.broadcast_to([[True], [False]], self.mask_a.shape)
)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma3.shape = (6,)
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
class MaskedItemTests(MaskedArraySetup):
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_getitem(self, item):
ma_part = self.ma[item]
expected_data = self.a[item]
expected_mask = self.mask_a[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_getitem_structured(self, item):
ma_part = self.msa[item]
expected_data = self.sa[item]
expected_mask = self.mask_sa[item]
assert_array_equal(ma_part.unmasked, expected_data)
assert_array_equal(ma_part.mask, expected_mask)
@pytest.mark.parametrize(
"indices,axis",
[([0, 1], 1), ([0, 1], 0), ([0, 1], None), ([[0, 1], [2, 3]], None)],
)
def test_take(self, indices, axis):
ma_take = self.ma.take(indices, axis=axis)
expected_data = self.a.take(indices, axis=axis)
expected_mask = self.mask_a.take(indices, axis=axis)
assert_array_equal(ma_take.unmasked, expected_data)
assert_array_equal(ma_take.mask, expected_mask)
ma_take2 = np.take(self.ma, indices, axis=axis)
assert_masked_equal(ma_take2, ma_take)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem(self, item, mask):
base = self.ma.copy()
expected_data = self.a.copy()
expected_mask = self.mask_a.copy()
value = self.a[0, 0] if mask is None else Masked(self.a[0, 0], mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
@pytest.mark.parametrize("mask", [None, True, False])
def test_setitem_structured(self, item, mask):
base = self.msa.copy()
expected_data = self.sa.copy()
expected_mask = self.mask_sa.copy()
value = self.sa["b"] if item == "a" else self.sa[0, 0]
if mask is not None:
value = Masked(value, mask)
base[item] = value
expected_data[item] = value if mask is None else value.unmasked
expected_mask[item] = False if mask is None else value.mask
assert_array_equal(base.unmasked, expected_data)
assert_array_equal(base.mask, expected_mask)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_setitem_np_ma_masked(self, item):
base = self.ma.copy()
expected_mask = self.mask_a.copy()
base[item] = np.ma.masked
expected_mask[item] = True
assert_array_equal(base.unmasked, self.a)
assert_array_equal(base.mask, expected_mask)
class TestMaskedArrayItems(MaskedItemTests):
@classmethod
def setup_class(self):
super().setup_class()
self.d = np.array(["aa", "bb"])
self.mask_d = np.array([True, False])
self.md = Masked(self.d, self.mask_d)
# Quantity, Longitude cannot hold strings.
def test_getitem_strings(self):
md = self.md.copy()
md0 = md[0]
assert md0.unmasked == self.d[0]
assert md0.mask
md_all = md[:]
assert_masked_equal(md_all, md)
def test_setitem_strings_np_ma_masked(self):
md = self.md.copy()
md[1] = np.ma.masked
assert_array_equal(md.unmasked, self.d)
assert_array_equal(md.mask, np.ones(2, bool))
class TestMaskedQuantityItems(MaskedItemTests, QuantitySetup):
pass
class TestMaskedLongitudeItems(MaskedItemTests, LongitudeSetup):
pass
class MaskedOperatorTests(MaskedArraySetup):
@pytest.mark.parametrize("op", (operator.add, operator.sub))
def test_add_subtract(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that, e.g.,
# Longitude decays into an Angle.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality(self, op):
mapmb = op(self.ma, self.mb)
expected_data = op(self.a, self.b)
expected_mask = self.ma.mask | self.mb.mask
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_not_implemented(self):
with pytest.raises(TypeError):
self.ma > "abc"
@pytest.mark.parametrize("different_names", [False, True])
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_structured_equality(self, op, different_names):
msb = self.msb
if different_names:
msb = msb.astype(
[(f"different_{name}", dt) for name, dt in msb.dtype.fields.items()]
)
mapmb = op(self.msa, self.msb)
# Expected is a bit tricky here: only unmasked fields count
expected_data = np.ones(mapmb.shape, bool)
expected_mask = np.ones(mapmb.shape, bool)
for field in self.sdt.names:
fa, mfa = self.sa[field], self.mask_sa[field]
fb, mfb = self.sb[field], self.mask_sb[field]
mfequal = mfa | mfb
fequal = (fa == fb) | mfequal
expected_data &= fequal
expected_mask &= mfequal
if op is operator.ne:
expected_data = ~expected_data
# Note: assert_array_equal also checks type, i.e., that boolean
# output is represented as plain Masked ndarray.
assert_array_equal(mapmb.unmasked, expected_data)
assert_array_equal(mapmb.mask, expected_mask)
def test_matmul(self):
result = self.ma.T @ self.ma
assert_array_equal(result.unmasked, self.a.T @ self.a)
mask1 = np.any(self.mask_a, axis=0)
expected_mask = np.logical_or.outer(mask1, mask1)
assert_array_equal(result.mask, expected_mask)
result2 = self.ma.T @ self.a
assert_array_equal(result2.unmasked, self.a.T @ self.a)
expected_mask2 = np.logical_or.outer(mask1, np.zeros(3, bool))
assert_array_equal(result2.mask, expected_mask2)
result3 = self.a.T @ self.ma
assert_array_equal(result3.unmasked, self.a.T @ self.a)
expected_mask3 = np.logical_or.outer(np.zeros(3, bool), mask1)
assert_array_equal(result3.mask, expected_mask3)
def test_matvec(self):
result = self.ma @ self.mb
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.a @ self.b)
# Just using the masked vector still has all elements masked.
result2 = self.a @ self.mb
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.a @ self.b)
new_ma = self.ma.copy()
new_ma.mask[0, 0] = False
result3 = new_ma @ self.b
assert_array_equal(result3.unmasked, self.a @ self.b)
assert_array_equal(result3.mask, new_ma.mask.any(-1))
def test_vecmat(self):
result = self.mb @ self.ma.T
assert np.all(result.mask)
assert_array_equal(result.unmasked, self.b @ self.a.T)
result2 = self.b @ self.ma.T
assert np.all(result2.mask)
assert_array_equal(result2.unmasked, self.b @ self.a.T)
new_ma = self.ma.T.copy()
new_ma.mask[0, 0] = False
result3 = self.b @ new_ma
assert_array_equal(result3.unmasked, self.b @ self.a.T)
assert_array_equal(result3.mask, new_ma.mask.any(0))
def test_vecvec(self):
result = self.mb @ self.mb
assert result.shape == ()
assert result.mask
assert result.unmasked == self.b @ self.b
mb_no_mask = Masked(self.b, False)
result2 = mb_no_mask @ mb_no_mask
assert not result2.mask
class TestMaskedArrayOperators(MaskedOperatorTests):
# Some further tests that use strings, which are not useful for Quantity.
@pytest.mark.parametrize("op", (operator.eq, operator.ne))
def test_equality_strings(self, op):
m1 = Masked(np.array(["a", "b", "c"]), mask=[True, False, False])
m2 = Masked(np.array(["a", "b", "d"]), mask=[False, False, False])
result = op(m1, m2)
assert_array_equal(result.unmasked, op(m1.unmasked, m2.unmasked))
assert_array_equal(result.mask, m1.mask | m2.mask)
result2 = op(m1, m2.unmasked)
assert_masked_equal(result2, result)
def test_not_implemented(self):
with pytest.raises(TypeError):
Masked(["a", "b"]) > object()
class TestMaskedQuantityOperators(MaskedOperatorTests, QuantitySetup):
pass
class TestMaskedLongitudeOperators(MaskedOperatorTests, LongitudeSetup):
pass
class TestMaskedArrayMethods(MaskedArraySetup):
def test_round(self):
# Goes via ufunc, hence easy.
mrc = self.mc.round()
expected = Masked(self.c.round(), self.mask_c)
assert_masked_equal(mrc, expected)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum(self, axis):
ma_sum = self.ma.sum(axis)
expected_data = self.a.sum(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_sum_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_sum = self.ma.sum(axis, where=where_final)
expected_data = self.ma.unmasked.sum(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumsum(self, axis):
ma_sum = self.ma.cumsum(axis)
expected_data = self.a.cumsum(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean(self, axis):
ma_mean = self.ma.mean(axis)
filled = self.a.copy()
filled[self.mask_a] = 0.0
count = 1 - self.ma.mask.astype(int)
expected_data = filled.sum(axis) / count.sum(axis)
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
def test_mean_int16(self):
ma = self.ma.astype("i2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f8"
expected = ma.astype("f8").mean()
assert_masked_equal(ma_mean, expected)
def test_mean_float16(self):
ma = self.ma.astype("f2")
ma_mean = ma.mean()
assert ma_mean.dtype == "f2"
expected = self.ma.mean().astype("f2")
assert_masked_equal(ma_mean, expected)
def test_mean_inplace(self):
expected = self.ma.mean(1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.mean(1, out=out)
assert result is out
assert_masked_equal(out, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Mean of empty slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_mean_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_mean = self.ma.mean(axis, where=where)
expected_data = self.ma.unmasked.mean(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_mean.unmasked, expected_data)
assert_array_equal(ma_mean.mask, expected_mask)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var(self, axis):
ma_var = self.ma.var(axis)
filled = (self.a - self.ma.mean(axis, keepdims=True)) ** 2
filled[self.mask_a] = 0.0
count = (1 - self.ma.mask.astype(int)).sum(axis)
expected_data = filled.sum(axis) / count
expected_mask = self.ma.mask.all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
ma_var1 = self.ma.var(axis, ddof=1)
expected_data1 = filled.sum(axis) / (count - 1)
expected_mask1 = self.ma.mask.all(axis) | (count <= 1)
assert_array_equal(ma_var1.unmasked, expected_data1)
assert_array_equal(ma_var1.mask, expected_mask1)
ma_var5 = self.ma.var(axis, ddof=5)
assert np.all(~np.isfinite(ma_var5.unmasked))
assert ma_var5.mask.all()
def test_var_int16(self):
ma = self.ma.astype("i2")
ma_var = ma.var()
assert ma_var.dtype == "f8"
expected = ma.astype("f8").var()
assert_masked_equal(ma_var, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_var_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_var = self.ma.var(axis, where=where)
expected_data = self.ma.unmasked.var(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_var.unmasked, expected_data)
assert_array_equal(ma_var.mask, expected_mask)
def test_std(self):
ma_std = self.ma.std(1, ddof=1)
ma_var1 = self.ma.var(1, ddof=1)
expected = np.sqrt(ma_var1)
assert_masked_equal(ma_std, expected)
def test_std_inplace(self):
expected = self.ma.std(1, ddof=1)
out = Masked(np.zeros_like(expected.unmasked))
result = self.ma.std(1, ddof=1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide")
@pytest.mark.filterwarnings("ignore:Degrees of freedom <= 0 for slice")
@pytest.mark.parametrize("axis", (0, 1, None))
def test_std_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_std = self.ma.std(axis, where=where)
expected_data = self.ma.unmasked.std(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_std.unmasked, expected_data)
assert_array_equal(ma_std.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min(self, axis):
ma_min = self.ma.min(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.min(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert not np.any(ma_min.mask)
def test_min_with_masked_nan(self):
ma = Masked([3.0, np.nan, 2.0], mask=[False, True, False])
ma_min = ma.min()
assert_array_equal(ma_min.unmasked, np.array(2.0))
assert not ma_min.mask
@pytest.mark.parametrize("axis", (0, 1, None))
def test_min_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_min = self.ma.min(axis, where=where_final, initial=np.inf)
expected_data = self.ma.unmasked.min(axis, where=where_final, initial=np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_min.unmasked, expected_data)
assert_array_equal(ma_min.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max(self, axis):
ma_max = self.ma.max(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.max(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert not np.any(ma_max.mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_max_where(self, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_max = self.ma.max(axis, where=where_final, initial=-np.inf)
expected_data = self.ma.unmasked.max(axis, where=where_final, initial=-np.inf)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_max.unmasked, expected_data)
assert_array_equal(ma_max.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmin(self, axis):
ma_argmin = self.ma.argmin(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max()
expected_data = filled.argmin(axis)
assert_array_equal(ma_argmin, expected_data)
def test_argmin_only_one_unmasked_element(self):
# Regression test for example from @taldcroft at
# https://github.com/astropy/astropy/pull/11127#discussion_r600864559
ma = Masked(data=[1, 2], mask=[True, False])
assert ma.argmin() == 1
if not NUMPY_LT_1_22:
def test_argmin_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmin(axis=0, keepdims=True), np.array([[1, 0]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argmax(self, axis):
ma_argmax = self.ma.argmax(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.min()
expected_data = filled.argmax(axis)
assert_array_equal(ma_argmax, expected_data)
if not NUMPY_LT_1_22:
def test_argmax_keepdims(self):
ma = Masked(data=[[1, 2], [3, 4]], mask=[[True, False], [False, False]])
assert_array_equal(ma.argmax(axis=1, keepdims=True), np.array([[1], [1]]))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_argsort(self, axis):
ma_argsort = self.ma.argsort(axis)
filled = self.a.copy()
filled[self.mask_a] = self.a.max() * 1.1
expected_data = filled.argsort(axis)
assert_array_equal(ma_argsort, expected_data)
@pytest.mark.parametrize("order", [None, "a", ("a", "b"), ("b", "a")])
@pytest.mark.parametrize("axis", [0, 1])
def test_structured_argsort(self, axis, order):
ma_argsort = self.msa.argsort(axis, order=order)
filled = self.msa.filled(fill_value=np.array((np.inf, np.inf), dtype=self.sdt))
expected_data = filled.argsort(axis, order=order)
assert_array_equal(ma_argsort, expected_data)
def test_argsort_error(self):
with pytest.raises(ValueError, match="when the array has no fields"):
self.ma.argsort(axis=0, order="a")
@pytest.mark.parametrize("axis", (0, 1))
def test_sort(self, axis):
ma_sort = self.ma.copy()
ma_sort.sort(axis)
indices = self.ma.argsort(axis)
expected_data = np.take_along_axis(self.ma.unmasked, indices, axis)
expected_mask = np.take_along_axis(self.ma.mask, indices, axis)
assert_array_equal(ma_sort.unmasked, expected_data)
assert_array_equal(ma_sort.mask, expected_mask)
@pytest.mark.parametrize("kth", [1, 3])
def test_argpartition(self, kth):
ma = self.ma.ravel()
ma_argpartition = ma.argpartition(kth)
partitioned = ma[ma_argpartition]
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
@pytest.mark.parametrize("kth", [1, 3])
def test_partition(self, kth):
partitioned = self.ma.flatten()
partitioned.partition(kth)
assert (partitioned[:kth] < partitioned[kth]).all()
assert (partitioned[kth:] >= partitioned[kth]).all()
if partitioned[kth].mask:
assert all(partitioned.mask[kth:])
else:
assert not any(partitioned.mask[:kth])
def test_all_explicit(self):
a1 = np.array(
[
[1.0, 2.0],
[3.0, 4.0],
]
)
a2 = np.array(
[
[1.0, 0.0],
[3.0, 4.0],
]
)
if self._data_cls is not np.ndarray:
a1 = self._data_cls(a1, self.a.unit)
a2 = self._data_cls(a2, self.a.unit)
ma1 = Masked(
a1,
mask=[
[False, False],
[True, True],
],
)
ma2 = Masked(
a2,
mask=[
[False, True],
[False, True],
],
)
ma1_eq_ma2 = ma1 == ma2
assert_array_equal(
ma1_eq_ma2.unmasked,
np.array(
[
[True, False],
[True, True],
]
),
)
assert_array_equal(
ma1_eq_ma2.mask,
np.array(
[
[False, True],
[True, True],
]
),
)
assert ma1_eq_ma2.all()
assert not (ma1 != ma2).all()
ma_eq1 = ma1_eq_ma2.all(1)
assert_array_equal(ma_eq1.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
ma_eq0 = ma1_eq_ma2.all(0)
assert_array_equal(ma_eq0.mask, np.array([False, True]))
assert bool(ma_eq1[0]) is True
assert bool(ma_eq1[1]) is False
@pytest.mark.parametrize("method", ["any", "all"])
@pytest.mark.parametrize(
"array,axis",
[("a", 0), ("a", 1), ("a", None), ("b", None), ("c", 0), ("c", 1), ("c", None)],
)
def test_all_and_any(self, array, axis, method):
ma = getattr(self, "m" + array)
ma_eq = ma == ma
ma_all_or_any = getattr(ma_eq, method)(axis=axis)
filled = ma_eq.unmasked.copy()
filled[ma_eq.mask] = method == "all"
a_all_or_any = getattr(filled, method)(axis=axis)
all_masked = ma.mask.all(axis)
assert_array_equal(ma_all_or_any.mask, all_masked)
assert_array_equal(ma_all_or_any.unmasked, a_all_or_any)
# interpretation as bool
as_bool = [bool(a) for a in ma_all_or_any.ravel()]
expected = [bool(a) for a in (a_all_or_any & ~all_masked).ravel()]
assert as_bool == expected
def test_any_inplace(self):
ma_eq = self.ma == self.ma
expected = ma_eq.any(1)
out = Masked(np.zeros_like(expected.unmasked))
result = ma_eq.any(1, out=out)
assert result is out
assert_masked_equal(result, expected)
@pytest.mark.parametrize("method", ("all", "any"))
@pytest.mark.parametrize("axis", (0, 1, None))
def test_all_and_any_where(self, method, axis):
where = np.array(
[
[True, False, False],
[True, True, True],
]
)
where_final = ~self.ma.mask & where
ma_eq = self.ma == self.ma
ma_any = getattr(ma_eq, method)(axis, where=where)
expected_data = getattr(ma_eq.unmasked, method)(axis, where=where_final)
expected_mask = np.logical_or.reduce(
self.ma.mask, axis=axis, where=where_final
) | (~where_final).all(axis)
assert_array_equal(ma_any.unmasked, expected_data)
assert_array_equal(ma_any.mask, expected_mask)
@pytest.mark.parametrize("offset", (0, 1))
def test_diagonal(self, offset):
mda = self.ma.diagonal(offset=offset)
expected = Masked(
self.a.diagonal(offset=offset), self.mask_a.diagonal(offset=offset)
)
assert_masked_equal(mda, expected)
@pytest.mark.parametrize("offset", (0, 1))
def test_trace(self, offset):
mta = self.ma.trace(offset=offset)
expected = Masked(
self.a.trace(offset=offset), self.mask_a.trace(offset=offset, dtype=bool)
)
assert_masked_equal(mta, expected)
def test_clip(self):
maclip = self.ma.clip(self.b, self.c)
expected = Masked(self.a.clip(self.b, self.c), self.mask_a)
assert_masked_equal(maclip, expected)
def test_clip_masked_min_max(self):
maclip = self.ma.clip(self.mb, self.mc)
# Need to be careful with min, max because of Longitude, which wraps.
dmax = np.maximum(np.maximum(self.a, self.b), self.c).max()
dmin = np.minimum(np.minimum(self.a, self.b), self.c).min()
expected = Masked(
self.a.clip(self.mb.filled(dmin), self.mc.filled(dmax)), mask=self.mask_a
)
assert_masked_equal(maclip, expected)
class TestMaskedQuantityMethods(TestMaskedArrayMethods, QuantitySetup):
pass
class TestMaskedLongitudeMethods(TestMaskedArrayMethods, LongitudeSetup):
pass
class TestMaskedArrayProductMethods(MaskedArraySetup):
# These cannot work on Quantity, so done separately
@pytest.mark.parametrize("axis", (0, 1, None))
def test_prod(self, axis):
ma_sum = self.ma.prod(axis)
expected_data = self.a.prod(axis)
expected_mask = self.ma.mask.any(axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1, None))
def test_cumprod(self, axis):
ma_sum = self.ma.cumprod(axis)
expected_data = self.a.cumprod(axis)
mask = self.mask_a
if axis is None:
mask = mask.ravel()
expected_mask = np.logical_or.accumulate(mask, axis=axis)
assert_array_equal(ma_sum.unmasked, expected_data)
assert_array_equal(ma_sum.mask, expected_mask)
def test_masked_str_explicit():
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype="f8,f8")
msa = Masked(sa, [(False, True), (False, False)])
assert str(msa) == "[(1., ——) (3., 4.)]"
assert str(msa[0]) == "(1., ——)"
assert str(msa[1]) == "(3., 4.)"
with np.printoptions(precision=3, floatmode="fixed"):
assert str(msa) == "[(1.000, ———) (3.000, 4.000)]"
def test_masked_repr_explicit():
# Use explicit endianness to ensure tests pass on all architectures
sa = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=">f8,>f8")
msa = Masked(sa, [(False, True), (False, False)])
assert (
repr(msa)
== "MaskedNDArray([(1., ——), (3., 4.)], dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[0]) == "MaskedNDArray((1., ——), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
assert (
repr(msa[1]) == "MaskedNDArray((3., 4.), dtype=[('f0', '>f8'), ('f1', '>f8')])"
)
def test_masked_repr_summary():
ma = Masked(np.arange(15.0), mask=[True] + [False] * 14)
with np.printoptions(threshold=2):
assert repr(ma) == "MaskedNDArray([———, 1., 2., ..., 12., 13., 14.])"
def test_masked_repr_nodata():
assert repr(Masked([])) == "MaskedNDArray([], dtype=float64)"
class TestMaskedArrayRepr(MaskedArraySetup):
def test_array_str(self):
# very blunt check they work at all.
str(self.ma)
str(self.mb)
str(self.mc)
str(self.msa)
str(self.msb)
str(self.msc)
def test_scalar_str(self):
assert self.mb[0].shape == ()
str(self.mb[0])
assert self.msb[0].shape == ()
str(self.msb[0])
assert self.msc[0].shape == ()
str(self.msc[0])
def test_array_repr(self):
repr(self.ma)
repr(self.mb)
repr(self.mc)
repr(self.msa)
repr(self.msb)
repr(self.msc)
def test_scalar_repr(self):
repr(self.mb[0])
repr(self.msb[0])
repr(self.msc[0])
class TestMaskedQuantityRepr(TestMaskedArrayRepr, QuantitySetup):
pass
class TestMaskedRecarray(MaskedArraySetup):
@classmethod
def setup_class(self):
super().setup_class()
self.ra = self.sa.view(np.recarray)
self.mra = Masked(self.ra, mask=self.mask_sa)
def test_recarray_setup(self):
assert isinstance(self.mra, Masked)
assert isinstance(self.mra, np.recarray)
assert np.all(self.mra.unmasked == self.ra)
assert np.all(self.mra.mask == self.mask_sa)
assert_array_equal(self.mra.view(np.ndarray), self.sa)
assert isinstance(self.mra.a, Masked)
assert_array_equal(self.mra.a.unmasked, self.sa["a"])
assert_array_equal(self.mra.a.mask, self.mask_sa["a"])
def test_recarray_setting(self):
mra = self.mra.copy()
mra.a = self.msa["b"]
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_getting(self, attr):
mra_a = self.mra.field(attr)
assert isinstance(mra_a, Masked)
assert_array_equal(mra_a.unmasked, self.sa["a"])
assert_array_equal(mra_a.mask, self.mask_sa["a"])
@pytest.mark.parametrize("attr", [0, "a"])
def test_recarray_field_setting(self, attr):
mra = self.mra.copy()
mra.field(attr, self.msa["b"])
assert_array_equal(mra.a.unmasked, self.msa["b"].unmasked)
assert_array_equal(mra.a.mask, self.msa["b"].mask)
class TestMaskedArrayInteractionWithNumpyMA(MaskedArraySetup):
def test_masked_array_from_masked(self):
"""Check that we can initialize a MaskedArray properly."""
np_ma = np.ma.MaskedArray(self.ma)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
def test_view_as_masked_array(self):
"""Test that we can be viewed as a MaskedArray."""
np_ma = self.ma.view(np.ma.MaskedArray)
assert type(np_ma) is np.ma.MaskedArray
assert type(np_ma.data) is self._data_cls
assert type(np_ma.mask) is np.ndarray
assert_array_equal(np_ma.data, self.a)
assert_array_equal(np_ma.mask, self.mask_a)
class TestMaskedQuantityInteractionWithNumpyMA(
TestMaskedArrayInteractionWithNumpyMA, QuantitySetup
):
pass
|
317e2473ce31927eb0fdc9c4a67d5f4cf4e35f43d3afe531839a64f083a5621a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import representation as r
from astropy.time import Time
from astropy.utils.masked import Masked
class TestRepresentations:
def setup_class(self):
self.x = np.array([3.0, 5.0, 0.0]) << u.m
self.y = np.array([4.0, 12.0, 1.0]) << u.m
self.z = np.array([0.0, 0.0, 1.0]) << u.m
self.c = r.CartesianRepresentation(self.x, self.y, self.z)
self.mask = np.array([False, False, True])
self.mx = Masked(self.x, self.mask)
self.my = Masked(self.y, self.mask)
self.mz = Masked(self.z, self.mask)
self.mc = r.CartesianRepresentation(self.mx, self.my, self.mz)
def test_initialization(self):
check = self.mc.z == self.mz
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.mc.x, self.mx)
assert_array_equal(self.mc.y, self.my)
assert_array_equal(self.mc.z, self.mz)
def test_norm(self):
# Need stacking and erfa override.
norm = self.mc.norm()
assert_array_equal(norm.unmasked, self.c.norm())
assert_array_equal(norm.mask, self.mask)
def test_transformation(self):
msr = self.mc.represent_as(r.SphericalRepresentation)
sr = self.c.represent_as(r.SphericalRepresentation)
for comp in msr.components:
mc = getattr(msr, comp)
c = getattr(sr, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
# Transformation back. This also tests erfa.ufunc.s2p, which
# is special in having a core dimension only in the output.
cr2 = sr.represent_as(r.CartesianRepresentation)
mcr2 = msr.represent_as(r.CartesianRepresentation)
for comp in mcr2.components:
mc = getattr(mcr2, comp)
c = getattr(cr2, comp)
assert_array_equal(mc.unmasked, c)
assert_array_equal(mc.mask, self.mask)
class TestSkyCoord:
def setup_class(self):
self.ra = np.array([3.0, 5.0, 0.0]) << u.hourangle
self.dec = np.array([4.0, 12.0, 1.0]) << u.deg
self.sc = SkyCoord(self.ra, self.dec)
self.mask = np.array([False, False, True])
self.mra = Masked(self.ra, self.mask)
self.mdec = Masked(self.dec, self.mask)
self.msc = SkyCoord(self.mra, self.mdec)
def test_initialization(self):
check = self.msc.dec == self.mdec
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
assert_array_equal(self.msc.data.lon, self.mra)
assert_array_equal(self.msc.data.lat, self.mdec)
def test_transformation(self):
gcrs = self.sc.gcrs
mgcrs = self.msc.gcrs
assert_array_equal(mgcrs.data.lon.mask, self.msc.data.lon.mask)
assert_array_equal(mgcrs.data.lon.unmasked, gcrs.data.lon)
assert_array_equal(mgcrs.data.lat.unmasked, gcrs.data.lat)
class TestTime:
def setup_class(self):
self.s = np.array(
[
"2010-11-12T13:14:15.160",
"2010-11-12T13:14:15.161",
"2011-12-13T14:15:16.170",
]
)
self.t = Time(self.s)
# Time formats will currently strip any ndarray subtypes, so we cannot
# initialize a Time with a Masked version of self.s yet. Instead, we
# work around it, for now only testing that masked are preserved by
# transformations.
self.mask = np.array([False, False, True])
self.mt = self.t._apply(Masked, self.mask)
def test_initialization(self):
assert_array_equal(self.mt.jd1.mask, self.mask)
assert_array_equal(self.mt.jd2.mask, self.mask)
assert_array_equal(self.mt.jd1.unmasked, self.t.jd1)
assert_array_equal(self.mt.jd2.unmasked, self.t.jd2)
@pytest.mark.parametrize("format_", ["jd", "cxcsec", "jyear"])
def test_different_formats(self, format_):
# Formats do not yet work with everything; e.g., isot is not supported
# since the Masked class does not yet support structured arrays.
tfmt = getattr(self.t, format_)
mtfmt = getattr(self.mt, format_)
check = mtfmt == tfmt
assert_array_equal(check.unmasked, np.ones(3, bool))
assert_array_equal(check.mask, self.mask)
@pytest.mark.parametrize("scale", ["tai", "tcb", "ut1"])
def test_transformation(self, scale):
tscl = getattr(self.t, scale)
mtscl = getattr(self.mt, scale)
assert_array_equal(mtscl.jd1.mask, self.mask)
assert_array_equal(mtscl.jd2.mask, self.mask)
assert_array_equal(mtscl.jd1.unmasked, tscl.jd1)
assert_array_equal(mtscl.jd2.unmasked, tscl.jd2)
|
6feeed70d1b9fb33f8bd437f3b1c4c097796499d7a1e5c4148c94b9081adbb5f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test all functions covered by __array_function__.
Here, run through all functions, with simple tests just to check the helpers.
More complicated tests of functionality, including with subclasses, are done
in test_functions.
TODO: finish full coverage (see also `~astropy.utils.masked.function_helpers`)
- np.linalg
- np.fft (is there any point?)
- np.lib.nanfunctions
"""
import inspect
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy.units.tests.test_quantity_non_ufuncs import get_wrapped_functions
from astropy.utils.compat import NUMPY_LT_1_23, NUMPY_LT_1_24, NUMPY_LT_1_25
from astropy.utils.masked import Masked, MaskedNDArray
from astropy.utils.masked.function_helpers import (
APPLY_TO_BOTH_FUNCTIONS,
DISPATCHED_FUNCTIONS,
IGNORED_FUNCTIONS,
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
)
from .test_masked import MaskedArraySetup, assert_masked_equal
all_wrapped_functions = get_wrapped_functions(np)
all_wrapped = set(all_wrapped_functions.values())
class BasicTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = Masked(
func(self.a, *args, **kwargs), mask=func(self.mask_a, *args, **kwargs)
)
assert_masked_equal(out, expected)
def check2(self, func, *args, **kwargs):
out = func(self.ma, self.mb, *args, **kwargs)
expected = Masked(
func(self.a, self.b, *args, **kwargs),
mask=func(self.mask_a, self.mask_b, *args, **kwargs),
)
if isinstance(out, (tuple, list)):
for o, x in zip(out, expected):
assert_masked_equal(o, x)
else:
assert_masked_equal(out, expected)
class NoMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o, expected)
class InvariantMaskTestSetup(MaskedArraySetup):
def check(self, func, *args, **kwargs):
o = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, self.mask_a)
class TestShapeInformation(BasicTestSetup):
def test_shape(self):
assert np.shape(self.ma) == (2, 3)
def test_size(self):
assert np.size(self.ma) == 6
def test_ndim(self):
assert np.ndim(self.ma) == 2
class TestShapeManipulation(BasicTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (6, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
self.check(np.atleast_1d)
o, so = np.atleast_1d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1,)
def test_atleast_2d(self):
self.check(np.atleast_2d)
o, so = np.atleast_2d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1)
def test_atleast_3d(self):
self.check(np.atleast_3d)
o, so = np.atleast_3d(self.mb[0], self.mc[0])
assert o.shape == o.mask.shape == so.shape == so.mask.shape == (1, 1, 1)
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.mc)
assert o.shape == o.mask.shape == (2,)
assert_array_equal(o.unmasked, self.c.squeeze())
assert_array_equal(o.mask, self.mask_c.squeeze())
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
self.check(np.broadcast_to, (3, 2, 3))
self.check(np.broadcast_to, (3, 2, 3), subok=False)
def test_broadcast_arrays(self):
self.check2(np.broadcast_arrays)
self.check2(np.broadcast_arrays, subok=False)
class TestArgFunctions(MaskedArraySetup):
def check(self, function, *args, fill_value=np.nan, **kwargs):
o = function(self.ma, *args, **kwargs)
a_filled = self.ma.filled(fill_value=fill_value)
expected = function(a_filled, *args, **kwargs)
assert_array_equal(o, expected)
def test_argmin(self):
self.check(np.argmin, fill_value=np.inf)
def test_argmax(self):
self.check(np.argmax, fill_value=-np.inf)
def test_argsort(self):
self.check(np.argsort, fill_value=np.nan)
def test_lexsort(self):
self.check(np.lexsort, fill_value=np.nan)
def test_nonzero(self):
self.check(np.nonzero, fill_value=0.0)
@pytest.mark.filterwarnings("ignore:Calling nonzero on 0d arrays is deprecated")
def test_nonzero_0d(self):
res1 = Masked(1, mask=False).nonzero()
assert len(res1) == 1
assert_array_equal(res1[0], np.ones(()).nonzero()[0])
res2 = Masked(1, mask=True).nonzero()
assert len(res2) == 1
assert_array_equal(res2[0], np.zeros(()).nonzero()[0])
def test_argwhere(self):
self.check(np.argwhere, fill_value=0.0)
def test_argpartition(self):
self.check(np.argpartition, 2, fill_value=np.inf)
def test_flatnonzero(self):
self.check(np.flatnonzero, fill_value=0.0)
class TestAlongAxis(MaskedArraySetup):
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
out = np.take_along_axis(self.ma, indices, axis=0)
expected = np.take_along_axis(self.a, indices, axis=0)
expected_mask = np.take_along_axis(self.mask_a, indices, axis=0)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_put_along_axis(self):
ma = self.ma.copy()
indices = np.expand_dims(np.argmax(self.ma, axis=0), axis=0)
np.put_along_axis(ma, indices, axis=0, values=-1)
expected = self.a.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, self.mask_a)
np.put_along_axis(ma, indices, axis=0, values=np.ma.masked)
assert_array_equal(ma.unmasked, expected)
expected_mask = self.mask_a.copy()
np.put_along_axis(expected_mask, indices, axis=0, values=True)
assert_array_equal(ma.mask, expected_mask)
@pytest.mark.parametrize("axis", (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.ma)
expected = np.apply_along_axis(np.square, axis, self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.parametrize("axes", [(1,), 0, (0, -1)])
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.mean(np.square(x), axis)
out = np.apply_over_axes(function, self.ma, axes)
expected = self.ma
for axis in axes if isinstance(axes, tuple) else (axes,):
expected = (expected**2).mean(axis, keepdims=True)
assert_array_equal(out.unmasked, expected.unmasked)
assert_array_equal(out.mask, expected.mask)
def test_apply_over_axes_no_reduction(self):
out = np.apply_over_axes(np.cumsum, self.ma, 0)
expected = self.ma.cumsum(axis=0)
assert_masked_equal(out, expected)
def test_apply_over_axes_wrong_size(self):
with pytest.raises(ValueError, match="not.*correct shape"):
np.apply_over_axes(lambda x, axis: x[..., np.newaxis], self.ma, 0)
class TestIndicesFrom(NoMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.arange(9).reshape(3, 3)
self.mask_a = np.eye(3, dtype=bool)
self.ma = Masked(self.a, self.mask_a)
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantMaskTestSetup):
@classmethod
def setup_class(self):
self.a = np.array([1 + 2j, 3 + 4j])
self.mask_a = np.array([True, False])
self.ma = Masked(self.a, mask=self.mask_a)
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantMaskTestSetup):
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.ma)
assert_array_equal(copy, self.ma)
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.ma)
assert_array_equal(farray, self.ma)
class TestArrayCreation(MaskedArraySetup):
def test_empty_like(self):
o = np.empty_like(self.ma)
assert o.shape == (2, 3)
assert isinstance(o, Masked)
assert isinstance(o, np.ndarray)
o2 = np.empty_like(prototype=self.ma)
assert o2.shape == (2, 3)
assert isinstance(o2, Masked)
assert isinstance(o2, np.ndarray)
o3 = np.empty_like(self.ma, subok=False)
assert type(o3) is MaskedNDArray
def test_zeros_like(self):
o = np.zeros_like(self.ma)
assert_array_equal(o.unmasked, np.zeros_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.zeros_like(a=self.ma)
assert_array_equal(o2.unmasked, np.zeros_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
def test_ones_like(self):
o = np.ones_like(self.ma)
assert_array_equal(o.unmasked, np.ones_like(self.a))
assert_array_equal(o.mask, np.zeros_like(self.mask_a))
o2 = np.ones_like(a=self.ma)
assert_array_equal(o2.unmasked, np.ones_like(self.a))
assert_array_equal(o2.mask, np.zeros_like(self.mask_a))
@pytest.mark.parametrize("value", [0.5, Masked(0.5, mask=True), np.ma.masked])
def test_full_like(self, value):
o = np.full_like(self.ma, value)
if value is np.ma.masked:
expected = Masked(o.unmasked, True)
else:
expected = Masked(np.empty_like(self.a))
expected[...] = value
assert_array_equal(o.unmasked, expected.unmasked)
assert_array_equal(o.mask, expected.mask)
class TestAccessingParts(BasicTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diag_1d_input(self):
ma = self.ma.ravel()
o = np.diag(ma)
assert_array_equal(o.unmasked, np.diag(self.a.ravel()))
assert_array_equal(o.mask, np.diag(self.mask_a.ravel()))
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False], self.ma, axis=0)
expected = np.compress([True, False], self.a, axis=0)
expected_mask = np.compress([True, False], self.mask_a, axis=0)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_extract(self):
o = np.extract([True, False, True], self.ma)
expected = np.extract([True, False, True], self.a)
expected_mask = np.extract([True, False, True], self.mask_a)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(MaskedArraySetup):
def test_put(self):
ma = self.ma.copy()
v = Masked([50, 150], [False, True])
np.put(ma, [0, 2], v)
expected = self.a.copy()
np.put(expected, [0, 2], [50, 150])
expected_mask = self.mask_a.copy()
np.put(expected_mask, [0, 2], [False, True])
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
# Indices cannot be masked.
np.put(ma, Masked([0, 2]), v)
with pytest.raises(TypeError):
# Array to put masked values in must be masked.
np.put(self.a.copy(), [0, 2], v)
def test_putmask(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.putmask(ma, mask, values)
expected = self.a.flatten()
np.putmask(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.putmask(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.putmask(self.a.flatten(), mask, values)
def test_place(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked([100, 200], mask=[False, True])
np.place(ma, mask, values)
expected = self.a.flatten()
np.place(expected, mask, values.unmasked)
expected_mask = self.mask_a.flatten()
np.place(expected_mask, mask, values.mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.place(self.a.flatten(), mask, values)
def test_copyto(self):
ma = self.ma.flatten()
mask = [True, False, False, False, True, False]
values = Masked(
np.arange(100, 650, 100), mask=[False, True, True, True, False, False]
)
np.copyto(ma, values, where=mask)
expected = self.a.flatten()
np.copyto(expected, values.unmasked, where=mask)
expected_mask = self.mask_a.flatten()
np.copyto(expected_mask, values.mask, where=mask)
assert_array_equal(ma.unmasked, expected)
assert_array_equal(ma.mask, expected_mask)
with pytest.raises(TypeError):
np.copyto(self.a.flatten(), values, where=mask)
@pytest.mark.parametrize("value", [0.25, np.ma.masked])
def test_fill_diagonal(self, value):
ma = self.ma[:2, :2].copy()
np.fill_diagonal(ma, value)
expected = ma.copy()
expected[np.diag_indices_from(expected)] = value
assert_array_equal(ma.unmasked, expected.unmasked)
assert_array_equal(ma.mask, expected.mask)
class TestRepeat(BasicTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(MaskedArraySetup):
# More tests at TestMaskedArrayConcatenation in test_functions.
def check(self, func, *args, **kwargs):
ma_list = kwargs.pop("ma_list", [self.ma, self.ma])
a_list = [Masked(ma).unmasked for ma in ma_list]
m_list = [Masked(ma).mask for ma in ma_list]
o = func(ma_list, *args, **kwargs)
expected = func(a_list, *args, **kwargs)
expected_mask = func(m_list, *args, **kwargs)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
self.check(np.concatenate, ma_list=[self.a, self.ma])
self.check(np.concatenate, dtype="f4")
out = Masked(np.empty((4, 3)))
result = np.concatenate([self.ma, self.ma], out=out)
assert out is result
expected = np.concatenate([self.a, self.a])
expected_mask = np.concatenate([self.mask_a, self.mask_a])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.concatenate([self.ma, self.ma], out=np.empty((4, 3)))
def test_stack(self):
self.check(np.stack)
def test_column_stack(self):
self.check(np.column_stack)
def test_hstack(self):
self.check(np.hstack)
def test_vstack(self):
self.check(np.vstack)
def test_dstack(self):
self.check(np.dstack)
def test_block(self):
self.check(np.block)
out = np.block([[0.0, Masked(1.0, True)], [Masked(1, False), Masked(2, False)]])
expected = np.array([[0, 1.0], [1, 2]])
expected_mask = np.array([[False, True], [False, False]])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_append(self):
out = np.append(self.ma, self.mc, axis=1)
expected = np.append(self.a, self.c, axis=1)
expected_mask = np.append(self.mask_a, self.mask_c, axis=1)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_insert(self):
obj = (1, 1)
values = Masked([50.0, 25.0], mask=[True, False])
out = np.insert(self.ma.flatten(), obj, values)
expected = np.insert(self.a.flatten(), obj, [50.0, 25.0])
expected_mask = np.insert(self.mask_a.flatten(), obj, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(TypeError):
np.insert(self.a.flatten(), obj, values)
with pytest.raises(TypeError):
np.insert(self.ma.flatten(), Masked(obj), values)
class TestSplit:
@classmethod
def setup_class(self):
self.a = np.arange(54.0).reshape(3, 3, 6)
self.mask_a = np.zeros(self.a.shape, dtype=bool)
self.mask_a[1, 1, 1] = True
self.mask_a[0, 1, 4] = True
self.mask_a[1, 2, 5] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, func, *args, **kwargs):
out = func(self.ma, *args, **kwargs)
expected = func(self.a, *args, **kwargs)
expected_mask = func(self.mask_a, *args, **kwargs)
assert len(out) == len(expected)
for o, x, xm in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, xm)
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestMethodLikes(MaskedArraySetup):
def check(self, function, *args, method=None, **kwargs):
if method is None:
method = function.__name__
o = function(self.ma, *args, **kwargs)
x = getattr(self.ma, method)(*args, **kwargs)
assert_masked_equal(o, x)
def test_amax(self):
self.check(np.amax, method="max")
def test_amin(self):
self.check(np.amin, method="min")
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
self.check(np.any)
def test_all(self):
self.check(np.all)
def test_sometrue(self):
self.check(np.sometrue, method="any")
def test_alltrue(self):
self.check(np.alltrue, method="all")
def test_prod(self):
self.check(np.prod)
def test_product(self):
self.check(np.product, method="prod")
def test_cumprod(self):
self.check(np.cumprod)
def test_cumproduct(self):
self.check(np.cumproduct, method="cumprod")
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_, method="round")
def test_around(self):
self.check(np.around, method="round")
def test_clip(self):
self.check(np.clip, 2.0, 4.0)
self.check(np.clip, self.mb, self.mc)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
self.check(np.var)
class TestUfuncLike(InvariantMaskTestSetup):
def test_fix(self):
self.check(np.fix)
def test_angle(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.angle(ma)
expected = np.angle(ma.unmasked)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_i0(self):
self.check(np.i0)
def test_sinc(self):
self.check(np.sinc)
def test_where(self):
mask = [True, False, True]
out = np.where(mask, self.ma, 1000.0)
expected = np.where(mask, self.a, 1000.0)
expected_mask = np.where(mask, self.mask_a, False)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
mask2 = Masked(mask, [True, False, False])
out2 = np.where(mask2, self.ma, 1000.0)
expected2 = np.where(mask, self.a, 1000.0)
expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask
assert_array_equal(out2.unmasked, expected2)
assert_array_equal(out2.mask, expected_mask2)
def test_where_single_arg(self):
m = Masked(np.arange(3), mask=[True, False, False])
out = np.where(m)
expected = m.nonzero()
assert isinstance(out, tuple) and len(out) == 1
assert_array_equal(out[0], expected[0])
def test_where_wrong_number_of_arg(self):
with pytest.raises(ValueError, match="either both or neither"):
np.where([True, False, False], self.a)
def test_choose(self):
a = np.array([0, 1]).reshape((2, 1))
result = np.choose(a, (self.ma, self.mb))
expected = np.choose(a, (self.a, self.b))
expected_mask = np.choose(a, (self.mask_a, self.mask_b))
assert_array_equal(result.unmasked, expected)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.choose(a, (self.ma, self.mb), out=out)
assert result2 is out
assert_array_equal(result2, result)
with pytest.raises(TypeError):
np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected))
def test_choose_masked(self):
ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1))
out = ma.choose((self.ma, self.mb))
expected = np.choose(ma.filled(0), (self.a, self.b))
expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
with pytest.raises(ValueError):
ma.unmasked.choose((self.ma, self.mb))
@pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)])
def test_select(self, default):
a, mask_a, ma = self.a, self.mask_a, self.ma
out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default)
expected = np.select(
[a < 1.5, a > 3.5],
[a, a + 1],
default=-1 if default is not np.ma.masked else 0,
)
expected_mask = np.select(
[a < 1.5, a > 3.5],
[mask_a, mask_a],
default=getattr(default, "mask", False),
)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_real_if_close(self):
a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j])
mask_a = np.array([True, False, True, False])
ma = Masked(a, mask=mask_a)
out = np.real_if_close(ma)
expected = np.real_if_close(a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_a)
def test_tril(self):
self.check(np.tril)
def test_triu(self):
self.check(np.triu)
def test_unwrap(self):
self.check(np.unwrap)
def test_nan_to_num(self):
self.check(np.nan_to_num)
ma = Masked([np.nan, 1.0], mask=[True, False])
o = np.nan_to_num(ma, copy=False)
assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False]))
assert ma is o
class TestUfuncLikeTests:
@classmethod
def setup_class(self):
self.a = np.array([[-np.inf, +np.inf, np.nan, 3.0, 4.0]] * 2)
self.mask_a = np.array([[False] * 5, [True] * 4 + [False]])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([[3.0001], [3.9999]])
self.mask_b = np.array([[True], [False]])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, func):
out = func(self.ma)
expected = func(self.a)
assert type(out) is MaskedNDArray
assert out.dtype.kind == "b"
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
assert not np.may_share_memory(out.mask, self.mask_a)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
o = np.isreal(Masked([1.0 + 1j], mask=False))
assert not o.unmasked and not o.mask
o = np.isreal(Masked([1.0 + 1j], mask=True))
assert not o.unmasked and o.mask
def test_iscomplex(self):
self.check(np.iscomplex)
o = np.iscomplex(Masked([1.0 + 1j], mask=False))
assert o.unmasked and not o.mask
o = np.iscomplex(Masked([1.0 + 1j], mask=True))
assert o.unmasked and o.mask
def test_isclose(self):
out = np.isclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)
expected_mask = self.mask_a | self.mask_b
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_allclose(self):
out = np.allclose(self.ma, self.mb, atol=0.01)
expected = np.isclose(self.ma, self.mb, atol=0.01)[
self.mask_a | self.mask_b
].all()
assert_array_equal(out, expected)
def test_array_equal(self):
assert not np.array_equal(self.ma, self.ma)
assert not np.array_equal(self.ma, self.a)
assert np.array_equal(self.ma, self.ma, equal_nan=True)
assert np.array_equal(self.ma, self.a, equal_nan=True)
assert not np.array_equal(self.ma, self.mb)
ma2 = self.ma.copy()
ma2.mask |= np.isnan(self.a)
assert np.array_equal(ma2, self.ma)
def test_array_equiv(self):
assert np.array_equiv(self.mb, self.mb)
assert np.array_equiv(self.mb, self.b)
assert not np.array_equiv(self.ma, self.mb)
assert np.array_equiv(self.mb, np.stack([self.mb, self.mb]))
class TestOuterLikeFunctions(MaskedArraySetup):
def test_outer(self):
result = np.outer(self.ma, self.mb)
expected_data = np.outer(self.a.ravel(), self.b.ravel())
expected_mask = np.logical_or.outer(self.mask_a.ravel(), self.mask_b.ravel())
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
out = np.zeros_like(result)
result2 = np.outer(self.ma, self.mb, out=out)
assert result2 is out
assert result2 is not result
assert_masked_equal(result2, result)
out2 = np.zeros_like(result.unmasked)
with pytest.raises(TypeError):
np.outer(self.ma, self.mb, out=out2)
def test_kron(self):
result = np.kron(self.ma, self.mb)
expected_data = np.kron(self.a, self.b)
expected_mask = np.logical_or.outer(self.mask_a, self.mask_b).reshape(
result.shape
)
assert_array_equal(result.unmasked, expected_data)
assert_array_equal(result.mask, expected_mask)
class TestReductionLikeFunctions(MaskedArraySetup):
def test_average(self):
o = np.average(self.ma)
assert_masked_equal(o, self.ma.mean())
o = np.average(self.ma, weights=self.mb, axis=-1)
expected = np.average(self.a, weights=self.b, axis=-1)
expected_mask = (self.mask_a | self.mask_b).any(-1)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
def test_trace(self):
o = np.trace(self.ma)
expected = np.trace(self.a)
expected_mask = np.trace(self.mask_a).astype(bool)
assert_array_equal(o.unmasked, expected)
assert_array_equal(o.mask, expected_mask)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_count_nonzero(self, axis):
o = np.count_nonzero(self.ma, axis=axis)
expected = np.count_nonzero(self.ma.filled(0), axis=axis)
assert_array_equal(o, expected)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestPartitionLikeFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(36.0).reshape(6, 6)
self.mask_a = np.zeros_like(self.a, bool)
# On purpose fill diagonal, so we get all masked elements.
self.mask_a[np.tril_indices_from(self.a)] = True
self.ma = Masked(self.a, mask=self.mask_a)
def check(self, function, *args, **kwargs):
# Check function by comparing to nan-equivalent, with masked
# values set to NaN.
o = function(self.ma, *args, **kwargs)
nanfunc = getattr(np, "nan" + function.__name__)
nanfilled = self.ma.filled(np.nan)
expected = nanfunc(nanfilled, *args, **kwargs)
assert_array_equal(o.filled(np.nan), expected)
assert_array_equal(o.mask, np.isnan(expected))
# Also check that we can give an output MaskedArray.
if NUMPY_LT_1_25 and kwargs.get("keepdims", False):
# numpy bug gh-22714 prevents using out with keepdims=True.
# This is fixed in numpy 1.25.
return
out = np.zeros_like(o)
o2 = function(self.ma, *args, out=out, **kwargs)
assert o2 is out
assert_masked_equal(o2, o)
# But that a regular array cannot be used since it has no mask.
with pytest.raises(TypeError):
function(self.ma, *args, out=np.zeros_like(expected), **kwargs)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_median(self, axis, keepdims):
self.check(np.median, axis=axis, keepdims=keepdims)
@pytest.mark.parametrize("keepdims", [False, True])
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_quantile(self, axis, keepdims):
self.check(np.quantile, q=[0.25, 0.5], axis=axis, keepdims=keepdims)
def test_quantile_out_of_range(self):
with pytest.raises(ValueError, match="must be in the range"):
np.quantile(self.ma, q=1.5)
@pytest.mark.parametrize("axis", [None, 0, 1])
def test_percentile(self, axis):
self.check(np.percentile, q=50, axis=axis)
class TestIntDiffFunctions(MaskedArraySetup):
def test_diff(self):
out = np.diff(self.ma)
expected = np.diff(self.a)
expected_mask = self.mask_a[:, 1:] | self.mask_a[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_diff_prepend_append(self):
out = np.diff(self.ma, prepend=Masked(-1, mask=True), append=1)
expected = np.diff(self.a, prepend=-1, append=1.0)
mask = np.concatenate(
[np.ones((2, 1), bool), self.mask_a, np.zeros((2, 1), bool)], axis=-1
)
expected_mask = mask[:, 1:] | mask[:, :-1]
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_trapz(self):
ma = self.ma.copy()
ma.mask[1] = False
out = np.trapz(ma)
assert_array_equal(out.unmasked, np.trapz(self.a))
assert_array_equal(out.mask, np.array([True, False]))
def test_gradient(self):
out = np.gradient(self.ma)
expected = np.gradient(self.a)
expected_mask = [
(self.mask_a[1:] | self.mask_a[:-1]).repeat(2, axis=0),
np.stack(
[
self.mask_a[:, 0] | self.mask_a[:, 1],
self.mask_a[:, 0] | self.mask_a[:, 2],
self.mask_a[:, 1] | self.mask_a[:, 2],
],
axis=-1,
),
]
for o, x, m in zip(out, expected, expected_mask):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestSpaceFunctions:
@classmethod
def setup_class(self):
self.a = np.arange(1.0, 7.0).reshape(2, 3)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([2.5, 10.0, 3.0])
self.mask_b = np.array([False, True, False])
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, *args, **kwargs):
out = function(self.ma, self.mb, 5)
expected = function(self.a, self.b, 5)
expected_mask = np.broadcast_to(
self.mask_a | self.mask_b, expected.shape
).copy()
# TODO: make implementation that also ensures start point mask is
# determined just by start point? (as for geomspace in numpy 1.20)?
expected_mask[-1] = self.mask_b
if function is np.geomspace:
expected_mask[0] = self.mask_a
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_linspace(self):
self.check(np.linspace, 5)
def test_logspace(self):
self.check(np.logspace, 10)
def test_geomspace(self):
self.check(np.geomspace, 5)
class TestInterpolationFunctions(MaskedArraySetup):
def test_interp(self):
xp = np.arange(5.0)
fp = np.array([1.0, 5.0, 6.0, 19.0, 20.0])
mask_fp = np.array([False, False, False, True, False])
mfp = Masked(fp, mask=mask_fp)
x = np.array([1.5, 17.0])
mask_x = np.array([False, True])
mx = Masked(x, mask=mask_x)
out = np.interp(mx, xp, mfp)
expected = np.interp(x, xp[~mask_fp], fp[~mask_fp])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, mask_x)
def test_piecewise(self):
condlist = [self.a < 1, self.a >= 1]
out = np.piecewise(self.ma, condlist, [Masked(-1, mask=True), 1.0])
expected = np.piecewise(self.a, condlist, [-1, 1.0])
expected_mask = np.piecewise(self.mask_a, condlist, [True, False])
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
condlist2 = [self.a < 1, self.a >= 3]
out2 = np.piecewise(
self.ma,
condlist2,
[Masked(-1, True), 1, lambda x: Masked(np.full_like(x, 2.0), mask=~x.mask)],
)
expected = np.piecewise(self.a, condlist2, [-1, 1, 2])
expected_mask = np.piecewise(
self.mask_a, condlist2, [True, False, lambda x: ~x]
)
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
with pytest.raises(ValueError, match="with 2 condition"):
np.piecewise(self.ma, condlist2, [])
def test_regression_12978(self):
"""Regression tests for https://github.com/astropy/astropy/pull/12978"""
# This case produced incorrect results
mask = [False, True, False]
x = np.array([1, 2, 3])
xp = Masked(np.array([1, 2, 3]), mask=mask)
fp = Masked(np.array([1, 2, 3]), mask=mask)
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
# This case raised a ValueError
xp = np.array([1, 3])
fp = Masked(np.array([1, 3]))
result = np.interp(x, xp, fp)
assert_array_equal(result, x)
class TestBincount(MaskedArraySetup):
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
mask_i = np.array([True, False, False, True, False, False])
mi = Masked(i, mask=mask_i)
out = np.bincount(mi)
expected = np.bincount(i[~mask_i])
assert_array_equal(out, expected)
w = np.arange(len(i))
mask_w = np.array([True] + [False] * 5)
mw = Masked(w, mask=mask_w)
out2 = np.bincount(i, mw)
expected = np.bincount(i, w)
expected_mask = np.array([False, True, False, False, False])
assert_array_equal(out2.unmasked, expected)
assert_array_equal(out2.mask, expected_mask)
out3 = np.bincount(mi, mw)
expected = np.bincount(i[~mask_i], w[~mask_i])
expected_mask = np.array([False, False, False, False, False])
assert_array_equal(out3.unmasked, expected)
assert_array_equal(out3.mask, expected_mask)
class TestSortFunctions(MaskedArraySetup):
def test_sort(self):
o = np.sort(self.ma)
expected = self.ma.copy()
expected.sort()
assert_masked_equal(o, expected)
def test_sort_complex(self):
ma = Masked(
np.array([1 + 2j, 0 + 4j, 3 + 0j, -1 - 1j]),
mask=[True, False, False, False],
)
o = np.sort_complex(ma)
indx = np.lexsort((ma.unmasked.imag, ma.unmasked.real, ma.mask))
expected = ma[indx]
assert_masked_equal(o, expected)
@pytest.mark.skipif(not NUMPY_LT_1_24, reason="np.msort is deprecated")
def test_msort(self):
o = np.msort(self.ma)
expected = np.sort(self.ma, axis=0)
assert_masked_equal(o, expected)
def test_partition(self):
o = np.partition(self.ma, 1)
expected = self.ma.copy()
expected.partition(1)
assert_masked_equal(o, expected)
class TestStringFunctions:
# More elaborate tests done in test_masked.py
@classmethod
def setup_class(self):
self.ma = Masked(np.arange(3), mask=[True, False, False])
def test_array2string(self):
out0 = np.array2string(self.ma)
assert out0 == "[— 1 2]"
# Arguments are interpreted as usual.
out1 = np.array2string(self.ma, separator=", ")
assert out1 == "[—, 1, 2]"
# If we do pass in a formatter, though, it should be used.
out2 = np.array2string(self.ma, separator=", ", formatter={"all": hex})
assert out2 == "[———, 0x1, 0x2]"
# Also as positional argument (no, nobody will do this!)
out3 = np.array2string(
self.ma, None, None, None, ", ", "", np._NoValue, {"int": hex}
)
assert out3 == out2
# But not if the formatter is not relevant for us.
out4 = np.array2string(self.ma, separator=", ", formatter={"float": hex})
assert out4 == out1
def test_array_repr(self):
out = np.array_repr(self.ma)
assert out == "MaskedNDArray([—, 1, 2])"
ma2 = self.ma.astype("f4")
out2 = np.array_repr(ma2)
assert out2 == "MaskedNDArray([——, 1., 2.], dtype=float32)"
def test_array_str(self):
out = np.array_str(self.ma)
assert out == "[— 1 2]"
class TestBitFunctions:
@classmethod
def setup_class(self):
self.a = np.array([15, 255, 0], dtype="u1")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.unpackbits(self.a).reshape(6, 4)
self.mask_b = np.array([False] * 15 + [True, True] + [False] * 7).reshape(6, 4)
self.mb = Masked(self.b, mask=self.mask_b)
@pytest.mark.parametrize("axis", [None, 1, 0])
def test_packbits(self, axis):
out = np.packbits(self.mb, axis=axis)
if axis is None:
expected = self.a
else:
expected = np.packbits(self.b, axis=axis)
expected_mask = np.packbits(self.mask_b, axis=axis) > 0
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, expected_mask)
def test_unpackbits(self):
out = np.unpackbits(self.ma)
mask = np.where(self.mask_a, np.uint8(255), np.uint8(0))
expected_mask = np.unpackbits(mask) > 0
assert_array_equal(out.unmasked, self.b.ravel())
assert_array_equal(out.mask, expected_mask)
class TestIndexFunctions(MaskedArraySetup):
"""Does not seem much sense to support these..."""
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.ma, 3)
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.ma,), 3)
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.ma)
class TestDtypeFunctions(MaskedArraySetup):
def check(self, function, *args, **kwargs):
out = function(self.ma, *args, **kwargs)
expected = function(self.a, *args, **kwargs)
assert out == expected
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.a.dtype)
self.check(np.can_cast, "f4")
def test_min_scalar_type(self):
out = np.min_scalar_type(self.ma[0, 0])
expected = np.min_scalar_type(self.a[0, 0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(MaskedArraySetup):
def test_meshgrid(self):
a = np.arange(1.0, 4.0)
mask_a = np.array([True, False, False])
ma = Masked(a, mask=mask_a)
b = np.array([2.5, 10.0, 3.0, 4.0])
mask_b = np.array([False, True, False, True])
mb = Masked(b, mask=mask_b)
oa, ob = np.meshgrid(ma, mb)
xa, xb = np.broadcast_arrays(a, b[:, np.newaxis])
ma, mb = np.broadcast_arrays(mask_a, mask_b[:, np.newaxis])
for o, x, m in ((oa, xa, ma), (ob, xb, mb)):
assert_array_equal(o.unmasked, x)
assert_array_equal(o.mask, m)
class TestMemoryFunctions(MaskedArraySetup):
def test_shares_memory(self):
assert np.shares_memory(self.ma, self.ma.unmasked)
assert not np.shares_memory(self.ma, self.ma.mask)
def test_may_share_memory(self):
assert np.may_share_memory(self.ma, self.ma.unmasked)
assert not np.may_share_memory(self.ma, self.ma.mask)
class TestDatetimeFunctions:
# Could in principle support np.is_busday, np.busday_count, np.busday_offset.
@classmethod
def setup_class(self):
self.a = np.array(["2020-12-31", "2021-01-01", "2021-01-02"], dtype="M")
self.mask_a = np.array([False, True, False])
self.ma = Masked(self.a, mask=self.mask_a)
self.b = np.array([["2021-01-07"], ["2021-01-31"]], dtype="M")
self.mask_b = np.array([[False], [True]])
self.mb = Masked(self.b, mask=self.mask_b)
def test_datetime_as_string(self):
out = np.datetime_as_string(self.ma)
expected = np.datetime_as_string(self.a)
assert_array_equal(out.unmasked, expected)
assert_array_equal(out.mask, self.mask_a)
@pytest.mark.filterwarnings("ignore:all-nan")
class TestNaNFunctions:
def setup_class(self):
self.a = np.array(
[
[np.nan, np.nan, 3.0],
[4.0, 5.0, 6.0],
]
)
self.mask_a = np.array(
[
[True, False, False],
[False, True, False],
]
)
self.b = np.arange(1, 7).reshape(2, 3)
self.mask_b = self.mask_a
self.ma = Masked(self.a, mask=self.mask_a)
self.mb = Masked(self.b, mask=self.mask_b)
def check(self, function, exact_fill_value=None, masked_result=True, **kwargs):
result = function(self.ma, **kwargs)
expected_data = function(self.ma.filled(np.nan), **kwargs)
expected_mask = np.isnan(expected_data)
if masked_result:
assert isinstance(result, Masked)
assert_array_equal(result.mask, expected_mask)
assert np.all(result == expected_data)
else:
assert not isinstance(result, Masked)
assert_array_equal(result, expected_data)
assert not np.any(expected_mask)
out = np.zeros_like(result)
result2 = function(self.ma, out=out, **kwargs)
assert result2 is out
assert_array_equal(result2, result)
def check_arg(self, function, **kwargs):
# arg functions do not have an 'out' argument, so just test directly.
result = function(self.ma, **kwargs)
assert not isinstance(result, Masked)
expected = function(self.ma.filled(np.nan), **kwargs)
assert_array_equal(result, expected)
def test_nanmin(self):
self.check(np.nanmin)
self.check(np.nanmin, axis=0)
self.check(np.nanmin, axis=1)
resi = np.nanmin(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.array([2, 4]))
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanmax(self):
self.check(np.nanmax)
def test_nanargmin(self):
self.check_arg(np.nanargmin)
self.check_arg(np.nanargmin, axis=1)
def test_nanargmax(self):
self.check_arg(np.nanargmax)
def test_nansum(self):
self.check(np.nansum, masked_result=False)
resi = np.nansum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([5, 10]))
def test_nanprod(self):
self.check(np.nanprod, masked_result=False)
resi = np.nanprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([6, 24]))
def test_nancumsum(self):
self.check(np.nancumsum, masked_result=False)
resi = np.nancumsum(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[0, 2, 5], [4, 4, 10]]))
def test_nancumprod(self):
self.check(np.nancumprod, masked_result=False)
resi = np.nancumprod(self.mb, axis=1)
assert not isinstance(resi, Masked)
assert_array_equal(resi, np.array([[1, 2, 6], [4, 4, 24]]))
def test_nanmean(self):
self.check(np.nanmean)
resi = np.nanmean(self.mb, axis=1)
assert_array_equal(resi.unmasked, np.mean(self.mb, axis=1).unmasked)
assert_array_equal(resi.mask, np.array([False, False]))
def test_nanvar(self):
self.check(np.nanvar)
self.check(np.nanvar, ddof=1)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nanquantile(self):
self.check(np.nanquantile, q=0.5)
def test_nanpercentile(self):
self.check(np.nanpercentile, q=50)
untested_functions = set()
if NUMPY_LT_1_23:
deprecated_functions = {
# Deprecated, removed in numpy 1.23
np.asscalar,
np.alen,
}
else:
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
# Get covered functions
tested_functions = set()
for cov_cls in list(filter(inspect.isclass, locals().values())):
for k, v in cov_cls.__dict__.items():
if inspect.isfunction(v) and k.startswith("test"):
f = k.replace("test_", "")
if f in all_wrapped_functions:
tested_functions.add(all_wrapped_functions[f])
def test_basic_testing_completeness():
assert all_wrapped == (tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS)
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped == (tested_functions | untested_functions)
class TestFunctionHelpersCompleteness:
@pytest.mark.parametrize(
"one, two",
itertools.combinations(
(
MASKED_SAFE_FUNCTIONS,
UNSUPPORTED_FUNCTIONS,
set(APPLY_TO_BOTH_FUNCTIONS.keys()),
set(DISPATCHED_FUNCTIONS.keys()),
),
2,
),
)
def test_no_duplicates(self, one, two):
assert not one.intersection(two)
def test_all_included(self):
included_in_helpers = (
MASKED_SAFE_FUNCTIONS
| UNSUPPORTED_FUNCTIONS
| set(APPLY_TO_BOTH_FUNCTIONS.keys())
| set(DISPATCHED_FUNCTIONS.keys())
)
assert all_wrapped == included_in_helpers
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_ignored_are_untested(self):
assert IGNORED_FUNCTIONS == untested_functions
|
d3118faae84eea9ee080c92c4249ad253c326eec28fecd627b89e1ed3c5205b9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy import log
from astropy.io.fits import getdata
from astropy.visualization.mpl_normalize import simple_norm
__all__ = ["fits2bitmap", "main"]
def fits2bitmap(
filename,
ext=0,
out_fn=None,
stretch="linear",
power=1.0,
asinh_a=0.1,
min_cut=None,
max_cut=None,
min_percent=None,
max_percent=None,
percent=None,
cmap="Greys_r",
):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.image as mimg
from astropy.utils.introspection import minversion
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical(f"data in FITS extension {ext} is not a 2D array")
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith(".fits"):
out_fn = os.path.splitext(out_fn)[0]
out_fn += ".png"
# explicitly define the output format
out_format = os.path.splitext(out_fn)[1][1:]
try:
if minversion(matplotlib, "3.5"):
matplotlib.colormaps[cmap]
else:
from matplotlib import cm
cm.get_cmap(cmap)
except (ValueError, KeyError):
log.critical(f"{cmap} is not a valid matplotlib colormap name.")
return 1
norm = simple_norm(
image,
stretch=stretch,
power=power,
asinh_a=asinh_a,
min_cut=min_cut,
max_cut=max_cut,
min_percent=min_percent,
max_percent=max_percent,
percent=percent,
)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin="lower", format=out_format)
log.info(f"Saved file to {out_fn}.")
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description="Create a bitmap file from a FITS image."
)
parser.add_argument(
"-e",
"--ext",
metavar="hdu",
default=0,
help="Specify the HDU extension number or name (Default is 0).",
)
parser.add_argument(
"-o",
metavar="filename",
type=str,
default=None,
help=(
"Filename for the output image (Default is a "
"PNG file with the same name as the FITS file)."
),
)
parser.add_argument(
"--stretch",
type=str,
default="linear",
help=(
'Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").'
),
)
parser.add_argument(
"--power",
type=float,
default=1.0,
help='Power index for "power" stretching (Default is 1.0).',
)
parser.add_argument(
"--asinh_a",
type=float,
default=0.1,
help=(
"The value in normalized image where the asinh "
"curve transitions from linear to logarithmic "
'behavior (used only for "asinh" stretch) '
"(Default is 0.1)."
),
)
parser.add_argument(
"--min_cut",
type=float,
default=None,
help="The pixel value of the minimum cut level (Default is the image minimum).",
)
parser.add_argument(
"--max_cut",
type=float,
default=None,
help="The pixel value of the maximum cut level (Default is the image maximum).",
)
parser.add_argument(
"--min_percent",
type=float,
default=None,
help=(
"The percentile value used to determine the "
"minimum cut level (Default is 0)."
),
)
parser.add_argument(
"--max_percent",
type=float,
default=None,
help=(
"The percentile value used to determine the "
"maximum cut level (Default is 100)."
),
)
parser.add_argument(
"--percent",
type=float,
default=None,
help=(
"The percentage of the image values used to "
"determine the pixel values of the minimum and "
"maximum cut levels (Default is 100)."
),
)
parser.add_argument(
"--cmap",
metavar="colormap_name",
type=str,
default="Greys_r",
help='matplotlib color map name (Default is "Greys_r").',
)
parser.add_argument(
"filename", nargs="+", help="Path to one or more FITS files to convert"
)
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(
filename,
ext=args.ext,
out_fn=args.o,
stretch=args.stretch,
min_cut=args.min_cut,
max_cut=args.max_cut,
min_percent=args.min_percent,
max_percent=args.max_percent,
percent=args.percent,
power=args.power,
asinh_a=args.asinh_a,
cmap=args.cmap,
)
|
6543a76914c37e65b3a89bfd96db1056f7b2fee602cf1c33de8bafe47ae8af8a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Helpers functions for different kinds of WCSAxes instances
"""
import numpy as np
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredEllipse, AnchoredSizeBar
import astropy.units as u
from astropy.wcs.utils import proj_plane_pixel_scales
__all__ = ["add_beam", "add_scalebar"]
CORNERS = {
"top right": 1,
"top left": 2,
"bottom left": 3,
"bottom right": 4,
"right": 5,
"left": 6,
"bottom": 8,
"top": 9,
}
def add_beam(
ax,
header=None,
major=None,
minor=None,
angle=None,
corner="bottom left",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""
Display the beam shape and size
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the beam shape and size is displayed. The WCS
must be celestial.
header : :class:`~astropy.io.fits.Header`, optional
Header containing the beam parameters. If specified, the ``BMAJ``,
``BMIN``, and ``BPA`` keywords will be searched in the FITS header
to set the major and minor axes and the position angle on the sky.
major : float or :class:`~astropy.units.Quantity`, optional
Major axis of the beam in degrees or an angular quantity.
minor : float, or :class:`~astropy.units.Quantity`, optional
Minor axis of the beam in degrees or an angular quantity.
angle : float or :class:`~astropy.units.Quantity`, optional
Position angle of the beam on the sky in degrees or an angular
quantity in the anticlockwise direction.
corner : str, optional
The beam location. Acceptable values are ``'left'``, ``'right'``,
``'top'``, 'bottom', ``'top left'``, ``'top right'``, ``'bottom left'``
(default), and ``'bottom right'``.
frame : bool, optional
Whether to display a frame behind the beam (default is ``False``).
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the beam, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to :class:`matplotlib.patches.Ellipse`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if header and major:
raise ValueError(
"Either header or major/minor/angle must be specified, not both."
)
if header:
major = header["BMAJ"]
minor = header["BMIN"]
angle = header["BPA"]
if isinstance(major, u.Quantity):
major = major.to(u.degree).value
if isinstance(minor, u.Quantity):
minor = minor.to(u.degree).value
if isinstance(angle, u.Quantity):
angle = angle.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show beam when WCS is not celestial")
minor /= degrees_per_pixel
major /= degrees_per_pixel
corner = CORNERS[corner]
beam = AnchoredEllipse(
ax.transData,
width=minor,
height=major,
angle=angle,
loc=corner,
pad=pad,
borderpad=borderpad,
frameon=frame,
)
beam.ellipse.set(**kwargs)
ax.add_artist(beam)
def add_scalebar(
ax,
length,
label=None,
corner="bottom right",
frame=False,
borderpad=0.4,
pad=0.5,
**kwargs,
):
"""Add a scale bar
Parameters
----------
ax : :class:`~astropy.visualization.wcsaxes.WCSAxes`
WCSAxes instance in which the scale bar is displayed. The WCS must be
celestial.
length : float or :class:`~astropy.units.Quantity`
The lenght of the scalebar in degrees or an angular quantity
label : str, optional
Label to place below the scale bar
corner : str, optional
Where to place the scale bar. Acceptable values are:, ``'left'``,
``'right'``, ``'top'``, ``'bottom'``, ``'top left'``, ``'top right'``,
``'bottom left'`` and ``'bottom right'`` (default)
frame : bool, optional
Whether to display a frame behind the scale bar (default is ``False``)
borderpad : float, optional
Border padding, in fraction of the font size. Default is 0.4.
pad : float, optional
Padding around the scale bar, in fraction of the font size. Default is 0.5.
kwargs
Additional arguments are passed to
:class:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
Notes
-----
This function may be inaccurate when:
- The pixel scales at the reference pixel are different from the pixel scales
within the image extent (e.g., when the reference pixel is well outside of
the image extent and the projection is non-linear)
- The pixel scales in the two directions are very different from each other
(e.g., rectangular pixels)
"""
if isinstance(length, u.Quantity):
length = length.to(u.degree).value
if ax.wcs.is_celestial:
pix_scale = proj_plane_pixel_scales(ax.wcs)
sx = pix_scale[0]
sy = pix_scale[1]
degrees_per_pixel = np.sqrt(sx * sy)
else:
raise ValueError("Cannot show scalebar when WCS is not celestial")
length = length / degrees_per_pixel
corner = CORNERS[corner]
scalebar = AnchoredSizeBar(
ax.transData,
length,
label,
corner,
pad=pad,
borderpad=borderpad,
sep=5,
frameon=frame,
**kwargs,
)
ax.add_artist(scalebar)
|
0ab45f25edf8947c2a681492203f71db04d84fd14027c438f35015c55c5a32df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Note: This file includes code derived from pywcsgrid2
#
# This file contains Matplotlib transformation objects (e.g. from pixel to world
# coordinates, but also world-to-world).
import abc
import numpy as np
from matplotlib.path import Path
from matplotlib.transforms import Transform
from astropy import units as u
from astropy.coordinates import (
BaseCoordinateFrame,
SkyCoord,
UnitSphericalRepresentation,
frame_transform_graph,
)
__all__ = [
"CurvedTransform",
"CoordinateTransform",
"World2PixelTransform",
"Pixel2WorldTransform",
]
class CurvedTransform(Transform, metaclass=abc.ABCMeta):
"""
Abstract base class for non-affine curved transforms
"""
input_dims = 2
output_dims = 2
is_separable = False
def transform_path(self, path):
"""
Transform a Matplotlib Path
Parameters
----------
path : :class:`~matplotlib.path.Path`
The path to transform
Returns
-------
path : :class:`~matplotlib.path.Path`
The resulting path
"""
return Path(self.transform(path.vertices), path.codes)
transform_path_non_affine = transform_path
def transform(self, input):
raise NotImplementedError("")
def inverted(self):
raise NotImplementedError("")
class CoordinateTransform(CurvedTransform):
has_inverse = True
def __init__(self, input_system, output_system):
super().__init__()
self._input_system_name = input_system
self._output_system_name = output_system
if isinstance(self._input_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._input_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._input_system_name} not found")
else:
self.input_system = frame_cls()
elif isinstance(self._input_system_name, BaseCoordinateFrame):
self.input_system = self._input_system_name
else:
raise TypeError(
"input_system should be a WCS instance, string, or a coordinate frame"
" instance"
)
if isinstance(self._output_system_name, str):
frame_cls = frame_transform_graph.lookup_name(self._output_system_name)
if frame_cls is None:
raise ValueError(f"Frame {self._output_system_name} not found")
else:
self.output_system = frame_cls()
elif isinstance(self._output_system_name, BaseCoordinateFrame):
self.output_system = self._output_system_name
else:
raise TypeError(
"output_system should be a WCS instance, string, or a coordinate frame"
" instance"
)
if self.output_system == self.input_system:
self.same_frames = True
else:
self.same_frames = False
@property
def same_frames(self):
return self._same_frames
@same_frames.setter
def same_frames(self, same_frames):
self._same_frames = same_frames
def transform(self, input_coords):
"""
Transform one set of coordinates to another
"""
if self.same_frames:
return input_coords
input_coords = input_coords * u.deg
x_in, y_in = input_coords[:, 0], input_coords[:, 1]
c_in = SkyCoord(
UnitSphericalRepresentation(x_in, y_in), frame=self.input_system
)
# We often need to transform arrays that contain NaN values, and filtering
# out the NaN values would have a performance hit, so instead we just pass
# on all values and just ignore Numpy warnings
with np.errstate(all="ignore"):
c_out = c_in.transform_to(self.output_system)
lon = c_out.spherical.lon.deg
lat = c_out.spherical.lat.deg
return np.concatenate((lon[:, np.newaxis], lat[:, np.newaxis]), axis=1)
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return CoordinateTransform(self._output_system_name, self._input_system_name)
class World2PixelTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
@property
@abc.abstractmethod
def input_dims(self):
"""
The number of input world dimensions
"""
@abc.abstractmethod
def transform(self, world):
"""
Transform world to pixel coordinates. You should pass in a NxM array
where N is the number of points to transform, and M is the number of
dimensions. This then returns the (x, y) pixel coordinates
as a Nx2 array.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
class Pixel2WorldTransform(CurvedTransform, metaclass=abc.ABCMeta):
"""
Base transformation from pixel to world coordinates
"""
has_inverse = True
frame_out = None
@property
@abc.abstractmethod
def output_dims(self):
"""
The number of output world dimensions
"""
@abc.abstractmethod
def transform(self, pixel):
"""
Transform pixel to world coordinates. You should pass in a Nx2 array
of (x, y) pixel coordinates to transform to world coordinates. This
will then return an NxM array where M is the number of dimensions.
"""
@abc.abstractmethod
def inverted(self):
"""
Return the inverse of the transform
"""
|
a316dc354112ac651bf4c558b363a733b8a77c3d3d27e03084ecf1e1ac40c73b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from .axislabels import AxisLabels
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .frame import EllipticalFrame, RectangularFrame1D
from .grid_paths import get_gridline_path, get_lon_lat_path
from .ticklabels import TickLabels
from .ticks import Ticks
__all__ = ["CoordinateHelper"]
# Matplotlib's gridlines use Line2D, but ours use PathPatch.
# Patches take a slightly different format of linestyle argument.
LINES_TO_PATCHES_LINESTYLE = {
"-": "solid",
"--": "dashed",
"-.": "dashdot",
":": "dotted",
"none": "none",
"None": "none",
" ": "none",
"": "none",
}
def wrap_angle_at(values, coord_wrap):
# On ARM processors, np.mod emits warnings if there are NaN values in the
# array, although this doesn't seem to happen on other processors.
with np.errstate(invalid="ignore"):
return np.mod(values - coord_wrap, 360.0) - (360.0 - coord_wrap)
class CoordinateHelper:
"""
Helper class to control one of the coordinates in the
:class:`~astropy.visualization.wcsaxes.WCSAxes`.
Parameters
----------
parent_axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate helper belongs to.
parent_map : :class:`~astropy.visualization.wcsaxes.CoordinatesMap`
The :class:`~astropy.visualization.wcsaxes.CoordinatesMap` object this
coordinate belongs to.
transform : `~matplotlib.transforms.Transform`
The transform corresponding to this coordinate system.
coord_index : int
The index of this coordinate in the
:class:`~astropy.visualization.wcsaxes.CoordinatesMap`.
coord_type : {'longitude', 'latitude', 'scalar'}
The type of this coordinate, which is used to determine the wrapping and
boundary behavior of coordinates. Longitudes wrap at ``coord_wrap``,
latitudes have to be in the range -90 to 90, and scalars are unbounded
and do not wrap.
coord_unit : `~astropy.units.Unit`
The unit that this coordinate is in given the output of transform.
format_unit : `~astropy.units.Unit`, optional
The unit to use to display the coordinates.
coord_wrap : float
The angle at which the longitude wraps (defaults to 360)
frame : `~astropy.visualization.wcsaxes.frame.BaseFrame`
The frame of the :class:`~astropy.visualization.wcsaxes.WCSAxes`.
"""
def __init__(
self,
parent_axes=None,
parent_map=None,
transform=None,
coord_index=None,
coord_type="scalar",
coord_unit=None,
coord_wrap=None,
frame=None,
format_unit=None,
default_label=None,
):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.parent_map = parent_map
self.transform = transform
self.coord_index = coord_index
self.coord_unit = coord_unit
self._format_unit = format_unit
self.frame = frame
self.default_label = default_label or ""
self._auto_axislabel = True
# Disable auto label for elliptical frames as it puts labels in
# annoying places.
if issubclass(self.parent_axes.frame_class, EllipticalFrame):
self._auto_axislabel = False
self.set_coord_type(coord_type, coord_wrap)
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
self.ticks.display_minor_ticks(rcParams["xtick.minor.visible"])
self.minor_frequency = 5
# Initialize axis labels
self.axislabels = AxisLabels(
self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure(),
)
# Initialize container for the grid lines
self.grid_lines = []
# Initialize grid style. Take defaults from matplotlib.rcParams.
# Based on matplotlib.axis.YTick._get_gridline.
self.grid_lines_kwargs = {
"visible": False,
"facecolor": "none",
"edgecolor": rcParams["grid.color"],
"linestyle": LINES_TO_PATCHES_LINESTYLE[rcParams["grid.linestyle"]],
"linewidth": rcParams["grid.linewidth"],
"alpha": rcParams["grid.alpha"],
"transform": self.parent_axes.transData,
}
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : {'lines', 'contours'}
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
if grid_type == "lines" and not self.transform.has_inverse:
raise ValueError(
"The specified transform has no inverse, so the "
"grid cannot be drawn using grid_type='lines'"
)
if grid_type is None:
grid_type = "lines" if self.transform.has_inverse else "contours"
if grid_type in ("lines", "contours"):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if "color" in kwargs:
kwargs["edgecolor"] = kwargs.pop("color")
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs["visible"]:
if not draw_grid:
self.grid_lines_kwargs["visible"] = False
else:
self.grid_lines_kwargs["visible"] = True
def set_coord_type(self, coord_type, coord_wrap=None):
"""
Set the coordinate type for the axis.
Parameters
----------
coord_type : str
One of 'longitude', 'latitude' or 'scalar'
coord_wrap : float, optional
The value to wrap at for angular coordinates
"""
self.coord_type = coord_type
if coord_type == "longitude" and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != "longitude" and coord_wrap is not None:
raise NotImplementedError(
"coord_wrap is not yet supported for non-longitude coordinates"
)
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == "scalar":
self._coord_scale_to_deg = None
self._formatter_locator = ScalarFormatterLocator(unit=self.coord_unit)
elif coord_type in ["longitude", "latitude"]:
if self.coord_unit is u.deg:
self._coord_scale_to_deg = None
else:
self._coord_scale_to_deg = self.coord_unit.to(u.deg)
self._formatter_locator = AngleFormatterLocator(
unit=self.coord_unit, format_unit=self._format_unit
)
else:
raise ValueError(
"coord_type should be one of 'scalar', 'longitude', or 'latitude'"
)
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or `~matplotlib.ticker.Formatter`
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, str):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter instance")
def format_coord(self, value, format="auto"):
"""
Given the value of a coordinate, will format it according to the
format of the formatter_locator.
Parameters
----------
value : float
The value to format
format : {'auto', 'ascii', 'latex'}, optional
The format to use - by default the formatting will be adjusted
depending on whether Matplotlib is using LaTeX or MathTex. To
get plain ASCII strings, use format='ascii'.
"""
if not hasattr(self, "_fl_spacing"):
return "" # _update_ticks has not been called yet
fl = self._formatter_locator
if isinstance(fl, AngleFormatterLocator):
# Convert to degrees if needed
if self._coord_scale_to_deg is not None:
value *= self._coord_scale_to_deg
if self.coord_type == "longitude":
value = wrap_angle_at(value, self.coord_wrap)
value = value * u.degree
value = value.to_value(fl._unit)
spacing = self._fl_spacing
string = fl.formatter(values=[value] * fl._unit, spacing=spacing, format=format)
return string[0]
def set_separator(self, separator):
"""
Set the separator to use for the angle major tick labels.
Parameters
----------
separator : str or tuple or None
The separator between numbers in sexagesimal representation. Can be
either a string or a tuple (or `None` for default).
"""
if not (self._formatter_locator.__class__ == AngleFormatterLocator):
raise TypeError("Separator can only be specified for angle coordinates")
if isinstance(separator, (str, tuple)) or separator is None:
self._formatter_locator.sep = separator
else:
raise TypeError("separator should be a string, a tuple, or None")
def set_format_unit(self, unit, decimal=None, show_decimal_unit=True):
"""
Set the unit for the major tick labels.
Parameters
----------
unit : class:`~astropy.units.Unit`
The unit to which the tick labels should be converted to.
decimal : bool, optional
Whether to use decimal formatting. By default this is `False`
for degrees or hours (which therefore use sexagesimal formatting)
and `True` for all other units.
show_decimal_unit : bool, optional
Whether to include units when in decimal mode.
"""
self._formatter_locator.format_unit = u.Unit(unit)
self._formatter_locator.decimal = decimal
self._formatter_locator.show_decimal_unit = show_decimal_unit
def get_format_unit(self):
"""
Get the unit for the major tick labels.
"""
return self._formatter_locator.format_unit
def set_ticks(
self,
values=None,
spacing=None,
number=None,
size=None,
width=None,
color=None,
alpha=None,
direction=None,
exclude_overlapping=None,
):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple, optional
A valid Matplotlib color for the ticks
alpha : float, optional
The alpha value (transparency) for the ticks.
direction : {'in','out'}, optional
Whether the ticks should point inwards or outwards.
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError(
"At most one of values, spacing, or number should be specified"
)
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if width is not None:
self.ticks.set_linewidth(width)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
if direction is not None:
if direction in ("in", "out"):
self.ticks.set_tick_out(direction == "out")
else:
raise ValueError("direction should be 'in' or 'out'")
if exclude_overlapping is not None:
warnings.warn(
"exclude_overlapping= should be passed to "
"set_ticklabel instead of set_ticks",
AstropyDeprecationWarning,
)
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticks_visible(self, visible):
"""
Set whether ticks are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide ticks
along this coordinate.
"""
self.ticks.set_visible(visible)
def set_ticklabel(
self, color=None, size=None, pad=None, exclude_overlapping=None, **kwargs
):
"""
Set the visual properties for the tick labels.
Parameters
----------
size : float, optional
The size of the ticks labels in points
color : str or tuple, optional
A valid Matplotlib color for the tick labels
pad : float, optional
Distance in points between tick and label.
exclude_overlapping : bool, optional
Whether to exclude tick labels that overlap over each other.
**kwargs
Other keyword arguments are passed to :class:`matplotlib.text.Text`.
"""
if size is not None:
self.ticklabels.set_size(size)
if color is not None:
self.ticklabels.set_color(color)
if pad is not None:
self.ticklabels.set_pad(pad)
if exclude_overlapping is not None:
self.ticklabels.set_exclude_overlapping(exclude_overlapping)
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_ticklabel_visible(self, visible):
"""
Set whether the tick labels are visible or not.
Parameters
----------
visible : bool
The visibility of ticks. Setting as ``False`` will hide this
coordinate's tick labels.
"""
self.ticklabels.set_visible(visible)
def set_axislabel(self, text, minpad=1, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
minpad : float, optional
The padding for the label in terms of axis label font size.
**kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
fontdict = kwargs.pop("fontdict", None)
# NOTE: When using plt.xlabel/plt.ylabel, minpad can get set explicitly
# to None so we need to make sure that in that case we change to a
# default numerical value.
if minpad is None:
minpad = 1
self.axislabels.set_text(text)
self.axislabels.set_minpad(minpad)
self.axislabels.set(**kwargs)
if fontdict is not None:
self.axislabels.update(fontdict)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_auto_axislabel(self, auto_label):
"""
Render default axis labels if no explicit label is provided.
Parameters
----------
auto_label : `bool`
`True` if default labels will be rendered.
"""
self._auto_axislabel = bool(auto_label)
def get_auto_axislabel(self):
"""
Render default axis labels if no explicit label is provided.
Returns
-------
auto_axislabel : `bool`
`True` if default labels will be rendered.
"""
return self._auto_axislabel
def _get_default_axislabel(self):
unit = self.get_format_unit() or self.coord_unit
if not unit or unit is u.one or self.coord_type in ("longitude", "latitude"):
return f"{self.default_label}"
else:
return f"{self.default_label} [{unit:latex}]"
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
def set_axislabel_visibility_rule(self, rule):
"""
Set the rule used to determine when the axis label is drawn.
Parameters
----------
rule : str
If the rule is 'always' axis labels will always be drawn on the
axis. If the rule is 'ticks' the label will only be drawn if ticks
were drawn on that axis. If the rule is 'labels' the axis label
will only be drawn if tick labels were drawn on that axis.
"""
self.axislabels.set_visibility_rule(rule)
def get_axislabel_visibility_rule(self, rule):
"""
Get the rule used to determine when the axis label is drawn.
"""
return self.axislabels.get_visibility_rule()
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw_grid(self, renderer):
renderer.open_group("grid lines")
self._update_ticks()
if self.grid_lines_kwargs["visible"]:
if isinstance(self.frame, RectangularFrame1D):
self._update_grid_lines_1d()
else:
if self._grid_type == "lines":
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == "lines":
frame_patch = self.frame.patch
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(frame_patch)
p.draw(renderer)
elif self._grid is not None:
for line in self._grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group("grid lines")
def _draw_ticks(self, renderer, bboxes, ticklabels_bbox):
"""
Draw all ticks and ticklabels.
"""
renderer.open_group("ticks")
self.ticks.draw(renderer)
self.ticklabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
tick_out_size=self.ticks.out_size,
)
renderer.close_group("ticks")
def _draw_axislabels(self, renderer, bboxes, ticklabels_bbox, visible_ticks):
# Render the default axis label if no axis label is set.
if self._auto_axislabel and not self.get_axislabel():
self.set_axislabel(self._get_default_axislabel())
renderer.open_group("axis labels")
self.axislabels.draw(
renderer,
bboxes=bboxes,
ticklabels_bbox=ticklabels_bbox,
coord_ticklabels_bbox=ticklabels_bbox[self],
ticks_locs=self.ticks.ticks_locs,
visible_ticks=visible_ticks,
)
renderer.close_group("axis labels")
def _update_ticks(self):
if self.coord_index is None:
return
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_map.get_coord_range()
# First find the ticks we want to show
tick_world_coordinates, self._fl_spacing = self.locator(
*coord_range[self.coord_index]
)
if self.ticks.get_display_minor_ticks():
minor_ticks_w_coordinates = self._formatter_locator.minor_locator(
self._fl_spacing,
self.get_minor_frequency(),
*coord_range[self.coord_index],
)
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
from . import conf
frame = self.frame.sample(conf.frame_boundary_samples)
self.ticks.clear()
self.ticklabels.clear()
self.lblinfo = []
self.lbl_world = []
# Look up parent axes' transform from data to figure coordinates.
#
# See:
# https://matplotlib.org/stable/tutorials/advanced/transforms_tutorial.html#the-transformation-pipeline
transData = self.parent_axes.transData
invertedTransLimits = transData.inverted()
for axis, spine in frame.items():
if spine.data.size == 0:
continue
if not isinstance(self.frame, RectangularFrame1D):
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:, self.coord_index]
if np.isnan(world0).all():
continue
axes0 = transData.transform(pixel0)
# Advance 2 pixels in figure coordinates
pixel1 = axes0.copy()
pixel1[:, 0] += 2.0
pixel1 = invertedTransLimits.transform(pixel1)
with np.errstate(invalid="ignore"):
world1 = self.transform.transform(pixel1)[:, self.coord_index]
# Advance 2 pixels in figure coordinates
pixel2 = axes0.copy()
pixel2[:, 1] += 2.0 if self.frame.origin == "lower" else -2.0
pixel2 = invertedTransLimits.transform(pixel2)
with np.errstate(invalid="ignore"):
world2 = self.transform.transform(pixel2)[:, self.coord_index]
dx = world1 - world0
dy = world2 - world0
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
dx *= self._coord_scale_to_deg
dy *= self._coord_scale_to_deg
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.0)
dy = wrap_angle_at(dy, 180.0)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack(
[spine.normal_angle, spine.normal_angle[-1]]
)
with np.errstate(invalid="ignore"):
reset = ((normal_angle_full - tick_angle) % 360 > 90.0) & (
(tick_angle - normal_angle_full) % 360 > 90.0
)
tick_angle[reset] -= 180.0
else:
rotation = 90 if axis == "b" else -90
tick_angle = np.zeros((conf.frame_boundary_samples,)) + rotation
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
w1 = w1 * self._coord_scale_to_deg
w2 = w2 * self._coord_scale_to_deg
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
with np.errstate(invalid="ignore"):
w1[w2 - w1 > 180.0] += 360
w2[w1 - w2 > 180.0] += 360
if self._coord_scale_to_deg is not None:
w1 = w1 / self._coord_scale_to_deg
w2 = w2 / self._coord_scale_to_deg
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
self._compute_ticks(tick_world_coordinates, spine, axis, w1, w2, tick_angle)
if self.ticks.get_display_minor_ticks():
self._compute_ticks(
minor_ticks_w_coordinates,
spine,
axis,
w1,
w2,
tick_angle,
ticks="minor",
)
# format tick labels, add to scene
text = self.formatter(
self.lbl_world * tick_world_coordinates.unit, spacing=self._fl_spacing
)
for kwargs, txt in zip(self.lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _compute_ticks(
self, tick_world_coordinates, spine, axis, w1, w2, tick_angle, ticks="major"
):
if self.coord_type == "longitude":
tick_world_coordinates_values = tick_world_coordinates.to_value(u.deg)
tick_world_coordinates_values = np.hstack(
[tick_world_coordinates_values, tick_world_coordinates_values + 360]
)
tick_world_coordinates_values *= u.deg.to(self.coord_unit)
else:
tick_world_coordinates_values = tick_world_coordinates.to_value(
self.coord_unit
)
for t in tick_world_coordinates_values:
# Find steps where a tick is present. We have to check
# separately for the case where the tick falls exactly on the
# frame points, otherwise we'll get two matches, one for w1 and
# one for w2.
with np.errstate(invalid="ignore"):
intersections = np.hstack(
[
np.nonzero((t - w1) == 0)[0],
np.nonzero(((t - w1) * (t - w2)) < 0)[0],
]
)
# But we also need to check for intersection with the last w2
if t - w2[-1] == 0:
intersections = np.append(intersections, len(w2) - 1)
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
if np.allclose(w1[imin], w2[imin], rtol=1.0e-13, atol=1.0e-13):
continue # tick is exactly aligned with frame
else:
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (
spine.data[imax, 0] - spine.data[imin, 0]
)
y_data_i = spine.data[imin, 1] + frac * (
spine.data[imax, 1] - spine.data[imin, 1]
)
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.0:
delta_angle -= 360.0
elif delta_angle < -180.0:
delta_angle += 360.0
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == "longitude":
if self._coord_scale_to_deg is not None:
t *= self._coord_scale_to_deg
world = wrap_angle_at(t, self.coord_wrap)
if self._coord_scale_to_deg is not None:
world /= self._coord_scale_to_deg
else:
world = t
if ticks == "major":
self.ticks.add(
axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac,
)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
self.lblinfo.append(
dict(
axis=axis,
data=(x_data_i, y_data_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac,
)
)
self.lbl_world.append(world)
else:
self.ticks.add_minor(
minor_axis=axis,
minor_pixel=(x_data_i, y_data_i),
minor_world=world,
minor_angle=angle_i,
minor_axis_displacement=imin + frac,
)
def display_minor_ticks(self, display_minor_ticks):
"""
Display minor ticks for this coordinate.
Parameters
----------
display_minor_ticks : bool
Whether or not to display minor ticks.
"""
self.ticks.display_minor_ticks(display_minor_ticks)
def get_minor_frequency(self):
return self.minor_frequency
def set_minor_frequency(self, frequency):
"""
Set the frequency of minor ticks per major ticks.
Parameters
----------
frequency : int
The number of minor ticks per major ticks.
"""
self.minor_frequency = frequency
def _update_grid_lines_1d(self):
if self.coord_index is None:
return
x_ticks_pos = [a[0] for a in self.ticks.pixel["b"]]
ymin, ymax = self.parent_axes.get_ylim()
self.grid_lines = []
for x_coord in x_ticks_pos:
pixel = [[x_coord, ymin], [x_coord, ymax]]
self.grid_lines.append(Path(pixel))
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
if self.coord_index is None:
return
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
tick_world_coordinates_values = tick_world_coordinates.to_value(self.coord_unit)
n_coord = len(tick_world_coordinates_values)
from . import conf
n_samples = conf.grid_samples
xy_world = np.zeros((n_samples * n_coord, 2))
self.grid_lines = []
for iw, w in enumerate(tick_world_coordinates_values):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
if self.coord_index == 0:
xy_world[subset, 0] = np.repeat(w, n_samples)
xy_world[subset, 1] = np.linspace(
coord_range[1][0], coord_range[1][1], n_samples
)
else:
xy_world[subset, 0] = np.linspace(
coord_range[0][0], coord_range[0][1], n_samples
)
xy_world[subset, 1] = np.repeat(w, n_samples)
# We now convert all the world coordinates to pixel coordinates in a
# single go rather than doing this in the gridline to path conversion
# to fully benefit from vectorized coordinate transformations.
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
for iw in range(n_coord):
subset = slice(iw * n_samples, (iw + 1) * n_samples)
self.grid_lines.append(
self._get_gridline(
xy_world[subset], pixel[subset], xy_world_round[subset]
)
)
def add_tickable_gridline(self, name, constant):
"""
Define a gridline that can be used for ticks and labels.
This gridline is not itself drawn, but instead can be specified in calls to
methods such as
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
for drawing ticks and labels. Since the gridline has a constant value in this
coordinate, and thus would not have any ticks or labels for the same coordinate,
the call to
:meth:`~astropy.visualization.wcsaxes.coordinate_helpers.CoordinateHelper.set_ticklabel_position`
would typically be made on the complementary coordinate.
Parameters
----------
name : str
The name for the gridline, usually a single character, but can be longer
constant : `~astropy.units.Quantity`
The constant coordinate value of the gridline
Notes
-----
A limitation is that the tickable part of the gridline must be contiguous. If
the gridline consists of more than one disconnected segment within the plot
extent, only one of those segments will be made tickable.
"""
if self.coord_index is None:
return
if name in self.frame:
raise ValueError(f"The frame already has a spine with the name '{name}'")
coord_range = self.parent_map.get_coord_range()
constant = constant.to_value(self.coord_unit)
from . import conf
n_samples = conf.grid_samples
# See comment in _update_grid_lines() about a WCS with more than 2 axes
xy_world = np.zeros((n_samples, 2))
xy_world[:, self.coord_index] = np.repeat(constant, n_samples)
# If the complementary coordinate is longitude, we attempt to close the gridline
# If such closure is a discontinuity, it will be filtered out later
if self.parent_map[1 - self.coord_index].coord_type == "longitude":
xy_world[:-1, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples - 1,
)
xy_world[-1, 1 - self.coord_index] = coord_range[1 - self.coord_index][0]
else:
xy_world[:, 1 - self.coord_index] = np.linspace(
coord_range[1 - self.coord_index][0],
coord_range[1 - self.coord_index][1],
n_samples,
)
# Transform line to pixel coordinates
pixel = self.transform.inverted().transform(xy_world)
# Create round-tripped values for checking
xy_world_round = self.transform.transform(pixel)
# Get the path of the gridline, which masks hidden parts
gridline = self._get_gridline(xy_world, pixel, xy_world_round)
def data_for_spine(spine):
vertices = gridline.vertices.copy()
codes = gridline.codes.copy()
# Retain the parts of the gridline within the rectangular plot bounds.
# We ought to use the potentially non-rectangular plot frame, but
# calculating that patch requires updating all spines first, which is a
# catch-22.
xmin, xmax = spine.parent_axes.get_xlim()
ymin, ymax = spine.parent_axes.get_ylim()
keep = (
(vertices[:, 0] >= xmin)
& (vertices[:, 0] <= xmax)
& (vertices[:, 1] >= ymin)
& (vertices[:, 1] <= ymax)
)
codes[~keep] = Path.MOVETO
codes[1:][~keep[:-1]] = Path.MOVETO
# We isolate the last segment (the last run of LINETOs), which must be preceded
# by at least one MOVETO and may be succeeded by MOVETOs.
# We have to account for longitude wrapping as well.
# Bail out if there is no visible segment
lineto = np.flatnonzero(codes == Path.LINETO)
if np.size(lineto) == 0:
return np.zeros((0, 2))
# Find the start of the last segment (the last MOVETO before the LINETOs)
last_segment = np.flatnonzero(codes[: lineto[-1]] == Path.MOVETO)[-1]
# Double the gridline if it is closed (i.e., spans all longitudes)
if vertices[0, 0] == vertices[-1, 0] and vertices[0, 1] == vertices[-1, 1]:
codes = np.concatenate([codes, codes[1:]])
vertices = np.vstack([vertices, vertices[1:, :]])
# Stop the last segment before any trailing MOVETOs
moveto = np.flatnonzero(codes[last_segment + 1 :] == Path.MOVETO)
if np.size(moveto) > 0:
return vertices[last_segment : last_segment + moveto[0] + 1, :]
else:
return vertices[last_segment:n_samples, :]
self.frame[name] = self.frame.spine_class(
self.frame.parent_axes, self.frame.transform, data_func=data_for_spine
)
def _get_gridline(self, xy_world, pixel, xy_world_round):
if self.coord_type == "scalar":
return get_gridline_path(xy_world, pixel)
else:
return get_lon_lat_path(xy_world, pixel, xy_world_round)
def _clear_grid_contour(self):
if hasattr(self, "_grid") and self._grid:
for line in self._grid.collections:
line.remove()
def _update_grid_contour(self):
if self.coord_index is None:
return
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
from . import conf
res = conf.contour_grid_samples
x, y = np.meshgrid(np.linspace(xmin, xmax, res), np.linspace(ymin, ymax, res))
pixel = np.array([x.ravel(), y.ravel()]).T
world = self.transform.transform(pixel)
field = world[:, self.coord_index].reshape(res, res).T
coord_range = self.parent_map.get_coord_range()
tick_world_coordinates, spacing = self.locator(*coord_range[self.coord_index])
# tick_world_coordinates is a Quantities array and we only needs its values
tick_world_coordinates_values = tick_world_coordinates.value
if self.coord_type == "longitude":
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (
tick_world_coordinates_values[0] + tick_world_coordinates_values[1]
)
field = wrap_angle_at(field, mid)
tick_world_coordinates_values = wrap_angle_at(
tick_world_coordinates_values, mid
)
# Replace wraps by NaN
with np.errstate(invalid="ignore"):
reset = (np.abs(np.diff(field[:, :-1], axis=0)) > 180) | (
np.abs(np.diff(field[:-1, :], axis=1)) > 180
)
field[:-1, :-1][reset] = np.nan
field[1:, :-1][reset] = np.nan
field[:-1, 1:][reset] = np.nan
field[1:, 1:][reset] = np.nan
if len(tick_world_coordinates_values) > 0:
with np.errstate(invalid="ignore"):
self._grid = self.parent_axes.contour(
x,
y,
field.transpose(),
levels=np.sort(tick_world_coordinates_values),
)
else:
self._grid = None
def tick_params(self, which="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this will not work correctly if
the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this will not work
correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
# First do some sanity checking on the keyword arguments
# colors= is a fallback default for color and labelcolor
if "colors" in kwargs:
if "color" not in kwargs:
kwargs["color"] = kwargs["colors"]
if "labelcolor" not in kwargs:
kwargs["labelcolor"] = kwargs["colors"]
# The only property that can be set *specifically* for minor ticks is
# the length. In future we could consider having a separate Ticks instance
# for minor ticks so that e.g. the color can be set separately.
if which == "minor":
if len(set(kwargs) - {"length"}) > 0:
raise ValueError(
"When setting which='minor', the only "
"property that can be set at the moment is "
"'length' (the minor tick length)"
)
else:
if "length" in kwargs:
self.ticks.set_minor_ticksize(kwargs["length"])
return
# At this point, we can now ignore the 'which' argument.
# Set the tick arguments
self.set_ticks(
size=kwargs.get("length"),
width=kwargs.get("width"),
color=kwargs.get("color"),
direction=kwargs.get("direction"),
)
# Set the tick position
position = None
for arg in ("bottom", "left", "top", "right"):
if arg in kwargs and position is None:
position = ""
if kwargs.get(arg):
position += arg[0]
if position is not None:
self.set_ticks_position(position)
# Set the tick label arguments.
self.set_ticklabel(
color=kwargs.get("labelcolor"),
size=kwargs.get("labelsize"),
pad=kwargs.get("pad"),
)
# Set the tick label position
position = None
for arg in ("bottom", "left", "top", "right"):
if "label" + arg in kwargs and position is None:
position = ""
if kwargs.get("label" + arg):
position += arg[0]
if position is not None:
self.set_ticklabel_position(position)
# And the grid settings
if "grid_color" in kwargs:
self.grid_lines_kwargs["edgecolor"] = kwargs["grid_color"]
if "grid_alpha" in kwargs:
self.grid_lines_kwargs["alpha"] = kwargs["grid_alpha"]
if "grid_linewidth" in kwargs:
self.grid_lines_kwargs["linewidth"] = kwargs["grid_linewidth"]
if "grid_linestyle" in kwargs:
if kwargs["grid_linestyle"] in LINES_TO_PATCHES_LINESTYLE:
self.grid_lines_kwargs["linestyle"] = LINES_TO_PATCHES_LINESTYLE[
kwargs["grid_linestyle"]
]
else:
self.grid_lines_kwargs["linestyle"] = kwargs["grid_linestyle"]
|
cd16f439257dbb42260038c532aa60921f1bebe83a88dfa6ed8d42275db4cd83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.coordinates import Angle
from astropy.units import UnitsError
DMS_RE = re.compile("^dd(:mm(:ss(.(s)+)?)?)?$")
HMS_RE = re.compile("^hh(:mm(:ss(.(s)+)?)?)?$")
DDEC_RE = re.compile("^d(.(d)+)?$")
DMIN_RE = re.compile("^m(.(m)+)?$")
DSEC_RE = re.compile("^s(.(s)+)?$")
SCAL_RE = re.compile("^x(.(x)+)?$")
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit(
"custom_degree",
represents=u.degree,
format={"generic": "\xb0", "latex": r"^\circ", "unicode": "°"},
),
u.arcmin: u.def_unit(
"custom_arcmin",
represents=u.arcmin,
format={"generic": "'", "latex": r"^\prime", "unicode": "′"},
),
u.arcsec: u.def_unit(
"custom_arcsec",
represents=u.arcsec,
format={"generic": '"', "latex": r"^{\prime\prime}", "unicode": "″"},
),
u.hourangle: u.def_unit(
"custom_hourangle",
represents=u.hourangle,
format={
"generic": "h",
"latex": r"^{\mathrm{h}}",
"unicode": r"$\mathregular{^h}$",
},
),
}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specified")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError(
"value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit)
)
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
decimal=None,
format_unit=None,
show_decimal_unit=True,
):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError(
"Units should be degrees or hours when using non-decimal"
" (sexagesimal) mode"
)
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (
not isinstance(spacing, u.Quantity) or spacing.unit.physical_type != "angle"
):
raise TypeError(
"spacing should be an astropy.units.Quantity "
"instance with units of angle"
)
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if "." in value:
self._precision = len(value) - value.index(".") - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(":") + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing to"
" match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10.0**self._precision)
else:
if self._fields == 1:
spacing = 1.0 * u.degree
elif self._fields == 2:
spacing = 1.0 * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1.0 * u.arcsec
else:
spacing = u.arcsec / (10.0**self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format="auto"):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(
f"{spacing:.10f}".replace("0", " ").strip().split(".", 1)[1]
)
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == "latex" or (
format == "auto" and rcParams["text.usetex"]
)
if decimal:
if self.show_decimal_unit:
sep = "fromunit"
if is_latex:
fmt = "latex"
else:
if unit is u.hourangle:
fmt = "unicode"
else:
fmt = "generic"
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = "fromunit"
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = "fromunit"
if unit == u.degree:
if is_latex:
fmt = "latex"
else:
sep = ("\xb0", "'", '"')
fmt = None
else:
if format == "ascii":
fmt = None
elif is_latex:
fmt = "latex"
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (
r"$\mathregular{^h}$",
r"$\mathregular{^m}$",
r"$\mathregular{^s}$",
)
fmt = None
angles = Angle(values)
string = angles.to_string(
unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt,
).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(
self,
values=None,
number=None,
spacing=None,
format=None,
unit=None,
format_unit=None,
):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(
values=values,
number=number,
spacing=spacing,
format=format,
unit=unit,
format_unit=format_unit,
)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if "." in value:
self._precision = len(value) - value.index(".") - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn(
"Spacing is too small - resetting spacing to match format"
)
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.0e-10:
warnings.warn(
"Spacing is not a multiple of base spacing - resetting spacing"
" to match format"
)
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith("%"):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10.0**self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if (
self.format is not None
and (not self.format.startswith("%"))
and dv < self.base_spacing
):
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(
dv.to_value(self._format_unit)
) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format="auto"):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.0:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith("%"):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [
("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit))
for x in values
]
else:
return []
|
3831d95563f2f2d4fe9d99a7a72e09f442e2074ef3b88a2c0f99feeb3417a91c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
from functools import partial
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.axes import Axes, subplot_class_factory
from matplotlib.transforms import Affine2D, Bbox, Transform
import astropy.units as u
from astropy.coordinates import BaseCoordinateFrame, SkyCoord
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_PIL
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, BaseLowLevelWCS
from .coordinates_map import CoordinatesMap
from .frame import RectangularFrame, RectangularFrame1D
from .transforms import CoordinateTransform
from .utils import get_coord_meta, transform_contour_set_inplace
from .wcsapi import IDENTITY, transform_coord_meta_from_wcs
__all__ = ["WCSAxes", "WCSAxesSubplot"]
VISUAL_PROPERTIES = ["facecolor", "edgecolor", "linewidth", "alpha", "linestyle"]
class _WCSAxesArtist(Artist):
"""This is a dummy artist to enforce the correct z-order of axis ticks,
tick labels, and gridlines.
FIXME: This is a bit of a hack. ``Axes.draw`` sorts the artists by zorder
and then renders them in sequence. For normal Matplotlib axes, the ticks,
tick labels, and gridlines are included in this list of artists and hence
are automatically drawn in the correct order. However, ``WCSAxes`` disables
the native ticks, labels, and gridlines. Instead, ``WCSAxes.draw`` renders
ersatz ticks, labels, and gridlines by explicitly calling the functions
``CoordinateHelper._draw_ticks``, ``CoordinateHelper._draw_grid``, etc.
This hack would not be necessary if ``WCSAxes`` drew ticks, tick labels,
and gridlines in the standary way."""
def draw(self, renderer, *args, **kwargs):
self.axes.draw_wcsaxes(renderer)
class WCSAxes(Axes):
"""
The main axes class that can be used to show world coordinates from a WCS.
Parameters
----------
fig : `~matplotlib.figure.Figure`
The figure to add the axes to
*args
``*args`` can be a single ``(left, bottom, width, height)``
rectangle or a single `matplotlib.transforms.Bbox`. This specifies
the rectangle (in figure coordinates) where the Axes is positioned.
``*args`` can also consist of three numbers or a single three-digit
number; in the latter case, the digits are considered as
independent numbers. The numbers are interpreted as ``(nrows,
ncols, index)``: ``(nrows, ncols)`` specifies the size of an array
of subplots, and ``index`` is the 1-based index of the subplot
being created. Finally, ``*args`` can also directly be a
`matplotlib.gridspec.SubplotSpec` instance.
wcs : :class:`~astropy.wcs.WCS`, optional
The WCS for the data. If this is specified, ``transform`` cannot be
specified.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data. If this is specified, ``wcs`` cannot be
specified.
coord_meta : dict, optional
A dictionary providing additional metadata when ``transform`` is
specified. This should include the keys ``type``, ``wrap``, and
``unit``. Each of these should be a list with as many items as the
dimension of the WCS. The ``type`` entries should be one of
``longitude``, ``latitude``, or ``scalar``, the ``wrap`` entries should
give, for the longitude, the angle at which the coordinate wraps (and
`None` otherwise), and the ``unit`` should give the unit of the
coordinates as :class:`~astropy.units.Unit` instances. This can
optionally also include a ``format_unit`` entry giving the units to use
for the tick labels (if not specified, this defaults to ``unit``).
transData : `~matplotlib.transforms.Transform`, optional
Can be used to override the default data -> pixel mapping.
slices : tuple, optional
For WCS transformations with more than two dimensions, we need to
choose which dimensions are being shown in the 2D image. The slice
should contain one ``x`` entry, one ``y`` entry, and the rest of the
values should be integers indicating the slice through the data. The
order of the items in the slice should be the same as the order of the
dimensions in the :class:`~astropy.wcs.WCS`, and the opposite of the
order of the dimensions in Numpy. For example, ``(50, 'x', 'y')`` means
that the first WCS dimension (last Numpy dimension) will be sliced at
an index of 50, the second WCS and Numpy dimension will be shown on the
x axis, and the final WCS dimension (first Numpy dimension) will be
shown on the y-axis (and therefore the data will be plotted using
``data[:, :, 50].transpose()``)
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
"""
def __init__(
self,
fig,
*args,
wcs=None,
transform=None,
coord_meta=None,
transData=None,
slices=None,
frame_class=None,
**kwargs,
):
""" """
super().__init__(fig, *args, **kwargs)
self._bboxes = []
if frame_class is not None:
self.frame_class = frame_class
elif wcs is not None and (
wcs.pixel_n_dim == 1 or (slices is not None and "y" not in slices)
):
self.frame_class = RectangularFrame1D
else:
self.frame_class = RectangularFrame
if not (transData is None):
# User wants to override the transform for the final
# data->pixel mapping
self.transData = transData
self.reset_wcs(
wcs=wcs, slices=slices, transform=transform, coord_meta=coord_meta
)
self._hide_parent_artists()
self.format_coord = self._display_world_coords
self._display_coords_index = 0
fig.canvas.mpl_connect("key_press_event", self._set_cursor_prefs)
self.patch = self.coords.frame.patch
self._wcsaxesartist = _WCSAxesArtist()
self.add_artist(self._wcsaxesartist)
self._drawn = False
def _display_world_coords(self, x, y):
if not self._drawn:
return ""
if self._display_coords_index == -1:
return f"{x} {y} (pixel)"
pixel = np.array([x, y])
coords = self._all_coords[self._display_coords_index]
world = coords._transform.transform(np.array([pixel]))[0]
coord_strings = []
for idx, coord in enumerate(coords):
if coord.coord_index is not None:
coord_strings.append(
coord.format_coord(world[coord.coord_index], format="ascii")
)
coord_string = " ".join(coord_strings)
if self._display_coords_index == 0:
system = "world"
else:
system = f"world, overlay {self._display_coords_index}"
coord_string = f"{coord_string} ({system})"
return coord_string
def _set_cursor_prefs(self, event, **kwargs):
if event.key == "w":
self._display_coords_index += 1
if self._display_coords_index + 1 > len(self._all_coords):
self._display_coords_index = -1
def _hide_parent_artists(self):
# Turn off spines and current axes
for s in self.spines.values():
s.set_visible(False)
self.xaxis.set_visible(False)
if self.frame_class is not RectangularFrame1D:
self.yaxis.set_visible(False)
# We now overload ``imshow`` because we need to make sure that origin is
# set to ``lower`` for all images, which means that we need to flip RGB
# images.
def imshow(self, X, *args, **kwargs):
"""
Wrapper to Matplotlib's :meth:`~matplotlib.axes.Axes.imshow`.
If an RGB image is passed as a PIL object, it will be flipped
vertically and ``origin`` will be set to ``lower``, since WCS
transformations - like FITS files - assume that the origin is the lower
left pixel of the image (whereas RGB images have the origin in the top
left).
All arguments are passed to :meth:`~matplotlib.axes.Axes.imshow`.
"""
origin = kwargs.pop("origin", "lower")
# plt.imshow passes origin as None, which we should default to lower.
if origin is None:
origin = "lower"
elif origin == "upper":
raise ValueError("Cannot use images with origin='upper' in WCSAxes.")
if HAS_PIL:
from PIL.Image import Image
if minversion("PIL", "9.1"):
from PIL.Image import Transpose
FLIP_TOP_BOTTOM = Transpose.FLIP_TOP_BOTTOM
else:
from PIL.Image import FLIP_TOP_BOTTOM
if isinstance(X, Image) or hasattr(X, "getpixel"):
X = X.transpose(FLIP_TOP_BOTTOM)
return super().imshow(X, *args, origin=origin, **kwargs)
def contour(self, *args, **kwargs):
"""
Plot contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contour`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contour`.
"""
# In Matplotlib, when calling contour() with a transform, each
# individual path in the contour map is transformed separately. However,
# this is much too slow for us since each call to the transforms results
# in an Astropy coordinate transformation, which has a non-negligible
# overhead - therefore a better approach is to override contour(), call
# the Matplotlib one with no transform, then apply the transform in one
# go to all the segments that make up the contour map.
transform = kwargs.pop("transform", None)
cset = super().contour(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def contourf(self, *args, **kwargs):
"""
Plot filled contours.
This is a custom implementation of :meth:`~matplotlib.axes.Axes.contourf`
which applies the transform (if specified) to all contours in one go for
performance rather than to each contour line individually. All
positional and keyword arguments are the same as for
:meth:`~matplotlib.axes.Axes.contourf`.
"""
# See notes for contour above.
transform = kwargs.pop("transform", None)
cset = super().contourf(*args, **kwargs)
if transform is not None:
# The transform passed to self.contour will normally include
# a transData component at the end, but we can remove that since
# we are already working in data space.
transform = transform - self.transData
transform_contour_set_inplace(cset, transform)
return cset
def _transform_plot_args(self, *args, **kwargs):
"""
Apply transformations to arguments to ``plot_coord`` and
``scatter_coord``
"""
if isinstance(args[0], (SkyCoord, BaseCoordinateFrame)):
# Extract the frame from the first argument.
frame0 = args[0]
if isinstance(frame0, SkyCoord):
frame0 = frame0.frame
native_frame = self._transform_pixel2world.frame_out
# Transform to the native frame of the plot
frame0 = frame0.transform_to(native_frame)
plot_data = []
for coord in self.coords:
if coord.coord_type == "longitude":
plot_data.append(frame0.spherical.lon.to_value(u.deg))
elif coord.coord_type == "latitude":
plot_data.append(frame0.spherical.lat.to_value(u.deg))
else:
raise NotImplementedError(
"Coordinates cannot be plotted with this "
"method because the WCS does not represent longitude/latitude."
)
if "transform" in kwargs.keys():
raise TypeError(
"The 'transform' keyword argument is not allowed,"
" as it is automatically determined by the input coordinate frame."
)
transform = self.get_transform(native_frame)
kwargs.update({"transform": transform})
args = tuple(plot_data) + args[1:]
return args, kwargs
def plot_coord(self, *args, **kwargs):
"""
Plot `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.plot_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.plot`. All other arguments are the same as
`matplotlib.axes.Axes.plot`. If not specified a ``transform`` keyword
argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to plot on the axes. This is converted to the
first two arguments to `matplotlib.axes.Axes.plot`.
See Also
--------
matplotlib.axes.Axes.plot :
This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().plot(*args, **kwargs)
def scatter_coord(self, *args, **kwargs):
"""
Scatter `~astropy.coordinates.SkyCoord` or
`~astropy.coordinates.BaseCoordinateFrame` objects onto the axes.
The first argument to
:meth:`~astropy.visualization.wcsaxes.WCSAxes.scatter_coord` should be a
coordinate, which will then be converted to the first two parameters to
`matplotlib.axes.Axes.scatter`. All other arguments are the same as
`matplotlib.axes.Axes.scatter`. If not specified a ``transform``
keyword argument will be created based on the coordinate.
Parameters
----------
coordinate : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate object to scatter on the axes. This is converted to
the first two arguments to `matplotlib.axes.Axes.scatter`.
See Also
--------
matplotlib.axes.Axes.scatter : This method is called from this function with all arguments passed to it.
"""
args, kwargs = self._transform_plot_args(*args, **kwargs)
return super().scatter(*args, **kwargs)
def reset_wcs(self, wcs=None, slices=None, transform=None, coord_meta=None):
"""
Reset the current Axes, to use a new WCS object.
"""
# Here determine all the coordinate axes that should be shown.
if wcs is None and transform is None:
self.wcs = IDENTITY
else:
# We now force call 'set', which ensures the WCS object is
# consistent, which will only be important if the WCS has been set
# by hand. For example if the user sets a celestial WCS by hand and
# forgets to set the units, WCS.wcs.set() will do this.
if wcs is not None:
# Check if the WCS object is an instance of `astropy.wcs.WCS`
# This check is necessary as only `astropy.wcs.WCS` supports
# wcs.set() method
if isinstance(wcs, WCS):
wcs.wcs.set()
if isinstance(wcs, BaseHighLevelWCS):
wcs = wcs.low_level_wcs
self.wcs = wcs
# If we are making a new WCS, we need to preserve the path object since
# it may already be used by objects that have been plotted, and we need
# to continue updating it. CoordinatesMap will create a new frame
# instance, but we can tell that instance to keep using the old path.
if hasattr(self, "coords"):
previous_frame = {
"path": self.coords.frame._path,
"color": self.coords.frame.get_color(),
"linewidth": self.coords.frame.get_linewidth(),
}
else:
previous_frame = {"path": None}
if self.wcs is not None:
transform, coord_meta = transform_coord_meta_from_wcs(
self.wcs, self.frame_class, slices=slices
)
self.coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
previous_frame_path=previous_frame["path"],
)
self._transform_pixel2world = transform
if previous_frame["path"] is not None:
self.coords.frame.set_color(previous_frame["color"])
self.coords.frame.set_linewidth(previous_frame["linewidth"])
self._all_coords = [self.coords]
# Common default settings for Rectangular Frame
for ind, pos in enumerate(
coord_meta.get("default_axislabel_position", ["b", "l"])
):
self.coords[ind].set_axislabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticklabel_position", ["b", "l"])
):
self.coords[ind].set_ticklabel_position(pos)
for ind, pos in enumerate(
coord_meta.get("default_ticks_position", ["bltr", "bltr"])
):
self.coords[ind].set_ticks_position(pos)
if rcParams["axes.grid"]:
self.grid()
def draw_wcsaxes(self, renderer):
if not self.axison:
return
# Here need to find out range of all coordinates, and update range for
# each coordinate axis. For now, just assume it covers the whole sky.
self._bboxes = []
# This generates a structure like [coords][axis] = [...]
ticklabels_bbox = defaultdict(partial(defaultdict, list))
visible_ticks = []
for coords in self._all_coords:
# Draw grids
coords.frame.update()
for coord in coords:
coord._draw_grid(renderer)
for coords in self._all_coords:
# Draw tick labels
for coord in coords:
coord._draw_ticks(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox[coord],
)
visible_ticks.extend(coord.ticklabels.get_visible_axes())
for coords in self._all_coords:
# Draw axis labels
for coord in coords:
coord._draw_axislabels(
renderer,
bboxes=self._bboxes,
ticklabels_bbox=ticklabels_bbox,
visible_ticks=visible_ticks,
)
self.coords.frame.draw(renderer)
def draw(self, renderer, **kwargs):
"""Draw the axes."""
# Before we do any drawing, we need to remove any existing grid lines
# drawn with contours, otherwise if we try and remove the contours
# part way through drawing, we end up with the issue mentioned in
# https://github.com/astropy/astropy/issues/12446
for coords in self._all_coords:
for coord in coords:
coord._clear_grid_contour()
# In Axes.draw, the following code can result in the xlim and ylim
# values changing, so we need to force call this here to make sure that
# the limits are correct before we update the patch.
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.apply_aspect(pos)
else:
self.apply_aspect()
if self._axisbelow is True:
self._wcsaxesartist.set_zorder(0.5)
elif self._axisbelow is False:
self._wcsaxesartist.set_zorder(2.5)
else:
# 'line': above patches, below lines
self._wcsaxesartist.set_zorder(1.5)
# We need to make sure that that frame path is up to date
self.coords.frame._update_patch_path()
super().draw(renderer, **kwargs)
self._drawn = True
# Matplotlib internally sometimes calls set_xlabel(label=...).
def set_xlabel(self, xlabel=None, labelpad=1, loc=None, **kwargs):
"""Set x-label."""
if xlabel is None:
xlabel = kwargs.pop("label", None)
if xlabel is None:
raise TypeError(
"set_xlabel() missing 1 required positional argument: 'xlabel'"
)
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(xlabel, minpad=labelpad, **kwargs)
break
def set_ylabel(self, ylabel=None, labelpad=1, loc=None, **kwargs):
"""Set y-label"""
if ylabel is None:
ylabel = kwargs.pop("label", None)
if ylabel is None:
raise TypeError(
"set_ylabel() missing 1 required positional argument: 'ylabel'"
)
if self.frame_class is RectangularFrame1D:
return super().set_ylabel(ylabel, labelpad=labelpad, **kwargs)
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
coord.set_axislabel(ylabel, minpad=labelpad, **kwargs)
break
def get_xlabel(self):
for coord in self.coords:
if (
"b" in coord.axislabels.get_visible_axes()
or "h" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_ylabel(self):
if self.frame_class is RectangularFrame1D:
return super().get_ylabel()
for coord in self.coords:
if (
"l" in coord.axislabels.get_visible_axes()
or "c" in coord.axislabels.get_visible_axes()
):
return coord.get_axislabel()
def get_coords_overlay(self, frame, coord_meta=None):
"""Get coordinates overlay on given frame.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame`
Frame to get overlay for. If a string must correspond to
one of the coordinate frames registered in the astropy
frame transform graph.
coord_meta : dict
Metadata for the coordinates overlay.
Returns
-------
overlay : `~astropy.visualization.wcsaxes.CoordinatesMap`
Coordinates overlay.
"""
# Here we can't use get_transform because that deals with
# pixel-to-pixel transformations when passing a WCS object.
if isinstance(frame, WCS):
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
else:
transform = self._get_transform_no_transdata(frame)
if coord_meta is None:
coord_meta = get_coord_meta(frame)
coords = CoordinatesMap(
self,
transform=transform,
coord_meta=coord_meta,
frame_class=self.frame_class,
)
self._all_coords.append(coords)
# Common settings for overlay
coords[0].set_axislabel_position("t")
coords[1].set_axislabel_position("r")
coords[0].set_ticklabel_position("t")
coords[1].set_ticklabel_position("r")
self.overlay_coords = coords
return coords
def get_transform(self, frame):
"""
Return a transform from the specified frame to display coordinates.
This does not include the transData transformation
Parameters
----------
frame : :class:`~astropy.wcs.WCS` or :class:`~matplotlib.transforms.Transform` or str
The ``frame`` parameter can have several possible types:
* :class:`~astropy.wcs.WCS` instance: assumed to be a
transformation from pixel to world coordinates, where the
world coordinates are the same as those in the WCS
transformation used for this ``WCSAxes`` instance. This is
used for example to show contours, since this involves
plotting an array in pixel coordinates that are not the
final data coordinate and have to be transformed to the
common world coordinate system first.
* :class:`~matplotlib.transforms.Transform` instance: it is
assumed to be a transform to the world coordinates that are
part of the WCS used to instantiate this ``WCSAxes``
instance.
* ``'pixel'`` or ``'world'``: return a transformation that
allows users to plot in pixel/data coordinates (essentially
an identity transform) and ``world`` (the default
world-to-pixel transformation used to instantiate the
``WCSAxes`` instance).
* ``'fk5'`` or ``'galactic'``: return a transformation from
the specified frame to the pixel/data coordinates.
* :class:`~astropy.coordinates.BaseCoordinateFrame` instance.
"""
return self._get_transform_no_transdata(frame).inverted() + self.transData
def _get_transform_no_transdata(self, frame):
"""
Return a transform from data to the specified frame
"""
if isinstance(frame, (BaseLowLevelWCS, BaseHighLevelWCS)):
if isinstance(frame, BaseHighLevelWCS):
frame = frame.low_level_wcs
transform, coord_meta = transform_coord_meta_from_wcs(
frame, self.frame_class
)
transform_world2pixel = transform.inverted()
if self._transform_pixel2world.frame_out == transform_world2pixel.frame_in:
return self._transform_pixel2world + transform_world2pixel
else:
return (
self._transform_pixel2world
+ CoordinateTransform(
self._transform_pixel2world.frame_out,
transform_world2pixel.frame_in,
)
+ transform_world2pixel
)
elif isinstance(frame, str) and frame == "pixel":
return Affine2D()
elif isinstance(frame, Transform):
return self._transform_pixel2world + frame
else:
if isinstance(frame, str) and frame == "world":
return self._transform_pixel2world
else:
coordinate_transform = CoordinateTransform(
self._transform_pixel2world.frame_out, frame
)
if coordinate_transform.same_frames:
return self._transform_pixel2world
else:
return self._transform_pixel2world + coordinate_transform
def get_tightbbox(self, renderer, *args, **kwargs):
# FIXME: we should determine what to do with the extra arguments here.
# Note that the expected signature of this method is different in
# Matplotlib 3.x compared to 2.x, but we only support 3.x now.
if not self.get_visible():
return
# Do a draw to populate the self._bboxes list
self.draw_wcsaxes(renderer)
bb = [b for b in self._bboxes if b and (b.width != 0 or b.height != 0)]
bb.append(super().get_tightbbox(renderer, *args, **kwargs))
if bb:
_bbox = Bbox.union(bb)
return _bbox
else:
return self.get_window_extent(renderer)
def grid(self, b=None, axis="both", *, which="major", **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments. This behaves like `matplotlib.axes.Axes`
except that if no arguments are specified, the grid is shown rather
than toggled.
Parameters
----------
b : bool
Whether to show the gridlines.
axis : 'both', 'x', 'y'
Which axis to turn the gridlines on/off for.
which : str
Currently only ``'major'`` is supported.
"""
if not hasattr(self, "coords"):
return
if which != "major":
raise NotImplementedError(
"Plotting the grid for the minor ticks is not supported."
)
if axis == "both":
self.coords.grid(draw_grid=b, **kwargs)
elif axis == "x":
self.coords[0].grid(draw_grid=b, **kwargs)
elif axis == "y":
self.coords[1].grid(draw_grid=b, **kwargs)
else:
raise ValueError("axis should be one of x/y/both")
def tick_params(self, axis="both", **kwargs):
"""
Method to set the tick and tick label parameters in the same way as the
:meth:`~matplotlib.axes.Axes.tick_params` method in Matplotlib.
This is provided for convenience, but the recommended API is to use
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticks_position`,
:meth:`~astropy.visualization.wcsaxes.CoordinateHelper.set_ticklabel_position`,
and :meth:`~astropy.visualization.wcsaxes.CoordinateHelper.grid`.
Parameters
----------
axis : int or str, optional
Which axis to apply the parameters to. This defaults to 'both'
but this can also be set to an `int` or `str` that refers to the
axis to apply it to, following the valid values that can index
``ax.coords``. Note that ``'x'`` and ``'y``' are also accepted in
the case of rectangular axes.
which : {'both', 'major', 'minor'}, optional
Which ticks to apply the settings to. By default, setting are
applied to both major and minor ticks. Note that if ``'minor'`` is
specified, only the length of the ticks can be set currently.
direction : {'in', 'out'}, optional
Puts ticks inside the axes, or outside the axes.
length : float, optional
Tick length in points.
width : float, optional
Tick width in points.
color : color, optional
Tick color (accepts any valid Matplotlib color)
pad : float, optional
Distance in points between tick and label.
labelsize : float or str, optional
Tick label font size in points or as a string (e.g., 'large').
labelcolor : color, optional
Tick label color (accepts any valid Matplotlib color)
colors : color, optional
Changes the tick color and the label color to the same value
(accepts any valid Matplotlib color).
bottom, top, left, right : bool, optional
Where to draw the ticks. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
labelbottom, labeltop, labelleft, labelright : bool, optional
Where to draw the tick labels. Note that this can only be given if a
specific coordinate is specified via the ``axis`` argument, and it
will not work correctly if the frame is not rectangular.
grid_color : color, optional
The color of the grid lines (accepts any valid Matplotlib color).
grid_alpha : float, optional
Transparency of grid lines: 0 (transparent) to 1 (opaque).
grid_linewidth : float, optional
Width of grid lines in points.
grid_linestyle : str, optional
The style of the grid lines (accepts any valid Matplotlib line
style).
"""
if not hasattr(self, "coords"):
# Axes haven't been fully initialized yet, so just ignore, as
# Axes.__init__ calls this method
return
if axis == "both":
for pos in ("bottom", "left", "top", "right"):
if pos in kwargs:
raise ValueError(f"Cannot specify {pos}= when axis='both'")
if "label" + pos in kwargs:
raise ValueError(f"Cannot specify label{pos}= when axis='both'")
for coord in self.coords:
coord.tick_params(**kwargs)
elif axis in self.coords:
self.coords[axis].tick_params(**kwargs)
elif axis in ("x", "y") and self.frame_class is RectangularFrame:
spine = "b" if axis == "x" else "l"
for coord in self.coords:
if spine in coord.axislabels.get_visible_axes():
coord.tick_params(**kwargs)
# In the following, we put the generated subplot class in a temporary class and
# we then inherit it - if we don't do this, the generated class appears to
# belong in matplotlib, not in WCSAxes, from the API's point of view.
class WCSAxesSubplot(subplot_class_factory(WCSAxes)):
"""
A subclass class for WCSAxes
"""
pass
|
09c156e7e788ff31ef8832efe4a9e0b7d8aa52d776ae4e917206b0aa8c2d40ce | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# The following few lines skip this module when running tests if matplotlib is
# not available (and will have no impact otherwise)
try:
import pytest
pytest.importorskip("matplotlib")
del pytest
except ImportError:
pass
from astropy import config as _config
from .coordinate_helpers import CoordinateHelper
from .coordinates_map import CoordinatesMap
from .core import *
from .helpers import *
from .patches import *
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.visualization.wcsaxes`.
"""
coordinate_range_samples = _config.ConfigItem(
50,
"The number of samples along each image axis when determining "
"the range of coordinates in a plot.",
)
frame_boundary_samples = _config.ConfigItem(
1000,
"How many points to sample along the axes when determining tick locations.",
)
grid_samples = _config.ConfigItem(
1000, "How many points to sample along grid lines."
)
contour_grid_samples = _config.ConfigItem(
200, "The grid size to use when drawing a grid using contours"
)
conf = Conf()
|
17a2a74878f47d486b0b51e821f00c531604c3acaf0ba90b092f1c4f1bccda64 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
def sort_using(X, Y):
return [x for (y, x) in sorted(zip(Y, X))]
class TickLabels(Text):
def __init__(self, frame, *args, **kwargs):
self.clear()
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes("all")
self.set_pad(rcParams["xtick.major.pad"])
self._exclude_overlapping = False
# Stale if either xy positions haven't been calculated, or if
# something changes that requires recomputing the positions
self._stale = True
# Check rcParams
if "color" not in kwargs:
self.set_color(rcParams["xtick.color"])
if "size" not in kwargs:
self.set_size(rcParams["xtick.labelsize"])
def clear(self):
self.world = defaultdict(list)
self.data = defaultdict(list)
self.angle = defaultdict(list)
self.text = defaultdict(list)
self.disp = defaultdict(list)
def add(
self,
axis=None,
world=None,
pixel=None,
angle=None,
text=None,
axis_displacement=None,
data=None,
):
"""
Add a label.
Parameters
----------
axis : str
Axis to add label to.
world : Quantity
Coordinate value along this axis.
pixel : [float, float]
Pixel coordinates of the label. Deprecated and no longer used.
angle : float
Angle of the label.
text : str
Label text.
axis_displacement : float
Displacement from axis.
data : [float, float]
Data coordinates of the label.
"""
required_args = ["axis", "world", "angle", "text", "axis_displacement", "data"]
if pixel is not None:
warnings.warn(
"Setting the pixel coordinates of a label does nothing and is"
" deprecated, as these can only be accurately calculated when"
" Matplotlib is drawing a figure. To prevent this warning pass the"
f" following arguments as keyword arguments: {required_args}",
AstropyDeprecationWarning,
)
if (
axis is None
or world is None
or angle is None
or text is None
or axis_displacement is None
or data is None
):
raise TypeError(
f"All of the following arguments must be provided: {required_args}"
)
self.world[axis].append(world)
self.data[axis].append(data)
self.angle[axis].append(angle)
self.text[axis].append(text)
self.disp[axis].append(axis_displacement)
self._stale = True
def sort(self):
"""
Sort by axis displacement, which allows us to figure out which parts
of labels to not repeat.
"""
for axis in self.world:
self.world[axis] = sort_using(self.world[axis], self.disp[axis])
self.data[axis] = sort_using(self.data[axis], self.disp[axis])
self.angle[axis] = sort_using(self.angle[axis], self.disp[axis])
self.text[axis] = sort_using(self.text[axis], self.disp[axis])
self.disp[axis] = sort_using(self.disp[axis], self.disp[axis])
self._stale = True
def simplify_labels(self):
"""
Figure out which parts of labels can be dropped to avoid repetition.
"""
self.sort()
for axis in self.world:
t1 = self.text[axis][0]
for i in range(1, len(self.world[axis])):
t2 = self.text[axis][i]
if len(t1) != len(t2):
t1 = self.text[axis][i]
continue
start = 0
# In the following loop, we need to ignore the last character,
# hence the len(t1) - 1. This is because if we have two strings
# like 13d14m15s we want to make sure that we keep the last
# part (15s) even if the two labels are identical.
for j in range(len(t1) - 1):
if t1[j] != t2[j]:
break
if t1[j] not in "-0123456789.":
start = j + 1
t1 = self.text[axis][i]
if start != 0:
starts_dollar = self.text[axis][i].startswith("$")
self.text[axis][i] = self.text[axis][i][start:]
if starts_dollar:
self.text[axis][i] = "$" + self.text[axis][i]
# Remove any empty LaTeX inline math mode string
if self.text[axis][i] == "$$":
self.text[axis][i] = ""
self._stale = True
def set_pad(self, value):
self._pad = value
self._stale = True
def get_pad(self):
return self._pad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
self._stale = True
def get_visible_axes(self):
if self._visible_axes == "all":
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def set_exclude_overlapping(self, exclude_overlapping):
self._exclude_overlapping = exclude_overlapping
def _set_xy_alignments(self, renderer, tick_out_size):
"""
Compute and set the x, y positions and the horizontal/vertical alignment of
each label.
"""
if not self._stale:
return
self.simplify_labels()
text_size = renderer.points_to_pixels(self.get_size())
visible_axes = self.get_visible_axes()
self.xy = {axis: {} for axis in visible_axes}
self.ha = {axis: {} for axis in visible_axes}
self.va = {axis: {} for axis in visible_axes}
for axis in visible_axes:
for i in range(len(self.world[axis])):
# In the event that the label is empty (which is not expected
# but could happen in unforeseen corner cases), we should just
# skip to the next label.
if self.text[axis][i] == "":
continue
x, y = self._frame.parent_axes.transData.transform(self.data[axis][i])
pad = renderer.points_to_pixels(self.get_pad() + tick_out_size)
if isinstance(self._frame, RectangularFrame):
# This is just to preserve the current results, but can be
# removed next time the reference images are re-generated.
if np.abs(self.angle[axis][i]) < 45.0:
ha = "right"
va = "bottom"
dx = -pad
dy = -text_size * 0.5
elif np.abs(self.angle[axis][i] - 90.0) < 45:
ha = "center"
va = "bottom"
dx = 0
dy = -text_size - pad
elif np.abs(self.angle[axis][i] - 180.0) < 45:
ha = "left"
va = "bottom"
dx = pad
dy = -text_size * 0.5
else:
ha = "center"
va = "bottom"
dx = 0
dy = pad
x = x + dx
y = y + dy
else:
# This is the more general code for arbitrarily oriented
# axes
# Set initial position and find bounding box
self.set_text(self.text[axis][i])
self.set_position((x, y))
bb = super().get_window_extent(renderer)
# Find width and height, as well as angle at which we
# transition which side of the label we use to anchor the
# label.
width = bb.width
height = bb.height
# Project axis angle onto bounding box
ax = np.cos(np.radians(self.angle[axis][i]))
ay = np.sin(np.radians(self.angle[axis][i]))
# Set anchor point for label
if np.abs(self.angle[axis][i]) < 45.0:
dx = width
dy = ay * height
elif np.abs(self.angle[axis][i] - 90.0) < 45:
dx = ax * width
dy = height
elif np.abs(self.angle[axis][i] - 180.0) < 45:
dx = -width
dy = ay * height
else:
dx = ax * width
dy = -height
dx *= 0.5
dy *= 0.5
# Find normalized vector along axis normal, so as to be
# able to nudge the label away by a constant padding factor
dist = np.hypot(dx, dy)
ddx = dx / dist
ddy = dy / dist
dx += ddx * pad
dy += ddy * pad
x = x - dx
y = y - dy
ha = "center"
va = "center"
self.xy[axis][i] = (x, y)
self.ha[axis][i] = ha
self.va[axis][i] = va
self._stale = False
def _get_bb(self, axis, i, renderer):
"""
Get the bounding box of an individual label. n.b. _set_xy_alignment()
must be called before this method.
"""
if self.text[axis][i] == "":
return
self.set_text(self.text[axis][i])
self.set_position(self.xy[axis][i])
self.set_ha(self.ha[axis][i])
self.set_va(self.va[axis][i])
return super().get_window_extent(renderer)
def draw(self, renderer, bboxes, ticklabels_bbox, tick_out_size):
if not self.get_visible():
return
self._set_xy_alignments(renderer, tick_out_size)
for axis in self.get_visible_axes():
for i in range(len(self.world[axis])):
# This implicitly sets the label text, position, alignment
bb = self._get_bb(axis, i, renderer)
if bb is None:
continue
# TODO: the problem here is that we might get rid of a label
# that has a key starting bit such as -0:30 where the -0
# might be dropped from all other labels.
if not self._exclude_overlapping or bb.count_overlaps(bboxes) == 0:
super().draw(renderer)
bboxes.append(bb)
ticklabels_bbox[axis].append(bb)
|
11ca2bb0eaea32fa238f658479563e8fb7ef5cc47eea56a0105b96dc3e94adb9 | # Functions/classes for WCSAxes related to APE14 WCSes
import numpy as np
from astropy import units as u
from astropy.coordinates import ICRS, BaseCoordinateFrame, SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import local_partial_pixel_derivatives
from astropy.wcs.wcsapi import SlicedLowLevelWCS
from .frame import EllipticalFrame, RectangularFrame, RectangularFrame1D
from .transforms import CurvedTransform
__all__ = [
"transform_coord_meta_from_wcs",
"WCSWorld2PixelTransform",
"WCSPixel2WorldTransform",
]
IDENTITY = WCS(naxis=2)
IDENTITY.wcs.ctype = ["X", "Y"]
IDENTITY.wcs.crval = [0.0, 0.0]
IDENTITY.wcs.crpix = [1.0, 1.0]
IDENTITY.wcs.cdelt = [1.0, 1.0]
def transform_coord_meta_from_wcs(wcs, frame_class, slices=None):
if slices is not None:
slices = tuple(slices)
if wcs.pixel_n_dim > 2:
if slices is None:
raise ValueError(
"WCS has more than 2 pixel dimensions, so 'slices' should be set"
)
elif len(slices) != wcs.pixel_n_dim:
raise ValueError(
"'slices' should have as many elements as WCS "
"has pixel dimensions (should be {})".format(wcs.pixel_n_dim)
)
is_fits_wcs = isinstance(wcs, WCS) or (
isinstance(wcs, SlicedLowLevelWCS) and isinstance(wcs._wcs, WCS)
)
coord_meta = {}
coord_meta["name"] = []
coord_meta["type"] = []
coord_meta["wrap"] = []
coord_meta["unit"] = []
coord_meta["visible"] = []
coord_meta["format_unit"] = []
for idx in range(wcs.world_n_dim):
axis_type = wcs.world_axis_physical_types[idx]
axis_unit = u.Unit(wcs.world_axis_units[idx])
coord_wrap = None
format_unit = axis_unit
coord_type = "scalar"
if axis_type is not None:
axis_type_split = axis_type.split(".")
if "pos.helioprojective.lon" in axis_type:
coord_wrap = 180.0
format_unit = u.arcsec
coord_type = "longitude"
elif "pos.helioprojective.lat" in axis_type:
format_unit = u.arcsec
coord_type = "latitude"
elif "pos.heliographic.stonyhurst.lon" in axis_type:
coord_wrap = 180.0
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.stonyhurst.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos.heliographic.carrington.lon" in axis_type:
coord_wrap = 360.0
format_unit = u.deg
coord_type = "longitude"
elif "pos.heliographic.carrington.lat" in axis_type:
format_unit = u.deg
coord_type = "latitude"
elif "pos" in axis_type_split:
if "lon" in axis_type_split:
coord_type = "longitude"
elif "lat" in axis_type_split:
coord_type = "latitude"
elif "ra" in axis_type_split:
coord_type = "longitude"
format_unit = u.hourangle
elif "dec" in axis_type_split:
coord_type = "latitude"
elif "alt" in axis_type_split:
coord_type = "longitude"
elif "az" in axis_type_split:
coord_type = "latitude"
elif "long" in axis_type_split:
coord_type = "longitude"
coord_meta["type"].append(coord_type)
coord_meta["wrap"].append(coord_wrap)
coord_meta["format_unit"].append(format_unit)
coord_meta["unit"].append(axis_unit)
# For FITS-WCS, for backward-compatibility, we need to make sure that we
# provide aliases based on CTYPE for the name.
if is_fits_wcs:
name = []
if isinstance(wcs, WCS):
name.append(wcs.wcs.ctype[idx].lower())
name.append(wcs.wcs.ctype[idx][:4].replace("-", "").lower())
elif isinstance(wcs, SlicedLowLevelWCS):
name.append(wcs._wcs.wcs.ctype[wcs._world_keep[idx]].lower())
name.append(
wcs._wcs.wcs.ctype[wcs._world_keep[idx]][:4]
.replace("-", "")
.lower()
)
if name[0] == name[1]:
name = name[0:1]
if axis_type:
if axis_type not in name:
name.insert(0, axis_type)
if wcs.world_axis_names and wcs.world_axis_names[idx]:
if wcs.world_axis_names[idx] not in name:
name.append(wcs.world_axis_names[idx])
name = tuple(name) if len(name) > 1 else name[0]
else:
name = axis_type or ""
if wcs.world_axis_names:
name = (
(name, wcs.world_axis_names[idx])
if wcs.world_axis_names[idx]
else name
)
coord_meta["name"].append(name)
coord_meta["default_axislabel_position"] = [""] * wcs.world_n_dim
coord_meta["default_ticklabel_position"] = [""] * wcs.world_n_dim
coord_meta["default_ticks_position"] = [""] * wcs.world_n_dim
# If the world axis has a name use it, else display the world axis physical type.
fallback_labels = [
name[0] if isinstance(name, (list, tuple)) else name
for name in coord_meta["name"]
]
coord_meta["default_axis_label"] = [
wcs.world_axis_names[i] or fallback_label
for i, fallback_label in enumerate(fallback_labels)
]
transform_wcs, invert_xy, world_map = apply_slices(wcs, slices)
transform = WCSPixel2WorldTransform(transform_wcs, invert_xy=invert_xy)
for i in range(len(coord_meta["type"])):
coord_meta["visible"].append(i in world_map)
inv_all_corr = [False] * wcs.world_n_dim
m = transform_wcs.axis_correlation_matrix.copy()
if invert_xy:
inv_all_corr = np.all(m, axis=1)
m = m[:, ::-1]
if frame_class is RectangularFrame:
for i, spine_name in enumerate("bltr"):
pos = np.nonzero(m[:, i % 2])[0]
# If all the axes we have are correlated with each other and we
# have inverted the axes, then we need to reverse the index so we
# put the 'y' on the left.
if inv_all_corr[i % 2]:
pos = pos[::-1]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta["default_axislabel_position"][index] = spine_name
coord_meta["default_ticklabel_position"][index] = spine_name
coord_meta["default_ticks_position"][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 2:
for index in world_map:
coord_meta["default_ticks_position"][index] = "bltr"
elif frame_class is RectangularFrame1D:
derivs = np.abs(
local_partial_pixel_derivatives(
transform_wcs,
*[0] * transform_wcs.pixel_n_dim,
normalize_by_world=False,
)
)[:, 0]
for i, spine_name in enumerate("bt"):
# Here we are iterating over the correlated axes in world axis order.
# We want to sort the correlated axes by their partial derivatives,
# so we put the most rapidly changing world axis on the bottom.
pos = np.nonzero(m[:, 0])[0]
order = np.argsort(derivs[pos])[::-1] # Sort largest to smallest
pos = pos[order]
if len(pos) > 0:
index = world_map[pos[0]]
coord_meta["default_axislabel_position"][index] = spine_name
coord_meta["default_ticklabel_position"][index] = spine_name
coord_meta["default_ticks_position"][index] = spine_name
m[pos[0], :] = 0
# In the special and common case where the frame is rectangular and
# we are dealing with 2-d WCS (after slicing), we show all ticks on
# all axes for backward-compatibility.
if len(world_map) == 1:
for index in world_map:
coord_meta["default_ticks_position"][index] = "bt"
elif frame_class is EllipticalFrame:
if "longitude" in coord_meta["type"]:
lon_idx = coord_meta["type"].index("longitude")
coord_meta["default_axislabel_position"][lon_idx] = "h"
coord_meta["default_ticklabel_position"][lon_idx] = "h"
coord_meta["default_ticks_position"][lon_idx] = "h"
if "latitude" in coord_meta["type"]:
lat_idx = coord_meta["type"].index("latitude")
coord_meta["default_axislabel_position"][lat_idx] = "c"
coord_meta["default_ticklabel_position"][lat_idx] = "c"
coord_meta["default_ticks_position"][lat_idx] = "c"
else:
for index in range(len(coord_meta["type"])):
if index in world_map:
coord_meta["default_axislabel_position"][
index
] = frame_class.spine_names
coord_meta["default_ticklabel_position"][
index
] = frame_class.spine_names
coord_meta["default_ticks_position"][index] = frame_class.spine_names
return transform, coord_meta
def apply_slices(wcs, slices):
"""
Take the input WCS and slices and return a sliced WCS for the transform and
a mapping of world axes in the sliced WCS to the input WCS.
"""
if isinstance(wcs, SlicedLowLevelWCS):
world_keep = list(wcs._world_keep)
else:
world_keep = list(range(wcs.world_n_dim))
# world_map is the index of the world axis in the input WCS for a given
# axis in the transform_wcs
world_map = list(range(wcs.world_n_dim))
transform_wcs = wcs
invert_xy = False
if slices is not None:
wcs_slice = list(slices)
wcs_slice[wcs_slice.index("x")] = slice(None)
if "y" in slices:
wcs_slice[wcs_slice.index("y")] = slice(None)
invert_xy = slices.index("x") > slices.index("y")
transform_wcs = SlicedLowLevelWCS(wcs, wcs_slice[::-1])
world_map = tuple(world_keep.index(i) for i in transform_wcs._world_keep)
return transform_wcs, invert_xy, world_map
def wcsapi_to_celestial_frame(wcs):
for cls, _, kwargs, *_ in wcs.world_axis_object_classes.values():
if issubclass(cls, SkyCoord):
return kwargs.get("frame", ICRS())
elif issubclass(cls, BaseCoordinateFrame):
return cls(**kwargs)
class WCSWorld2PixelTransform(CurvedTransform):
"""
WCS transformation from world to pixel coordinates
"""
has_inverse = True
frame_in = None
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError("Only pixel_n_dim =< 2 is supported")
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_in = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.wcs is other.wcs
and self.invert_xy == other.invert_xy
)
@property
def input_dims(self):
return self.wcs.world_n_dim
def transform(self, world):
# Convert to a list of arrays
world = list(world.T)
if len(world) != self.wcs.world_n_dim:
raise ValueError(
f"Expected {self.wcs.world_n_dim} world coordinates, got {len(world)} "
)
if len(world[0]) == 0:
pixel = np.zeros((0, 2))
else:
pixel = self.wcs.world_to_pixel_values(*world)
if self.invert_xy:
pixel = pixel[::-1]
pixel = np.array(pixel).T
return pixel
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSPixel2WorldTransform(self.wcs, invert_xy=self.invert_xy)
class WCSPixel2WorldTransform(CurvedTransform):
"""
WCS transformation from pixel to world coordinates
"""
has_inverse = True
def __init__(self, wcs, invert_xy=False):
super().__init__()
if wcs.pixel_n_dim > 2:
raise ValueError("Only pixel_n_dim =< 2 is supported")
self.wcs = wcs
self.invert_xy = invert_xy
self.frame_out = wcsapi_to_celestial_frame(wcs)
def __eq__(self, other):
return (
isinstance(other, type(self))
and self.wcs is other.wcs
and self.invert_xy == other.invert_xy
)
@property
def output_dims(self):
return self.wcs.world_n_dim
def transform(self, pixel):
# Convert to a list of arrays
pixel = list(pixel.T)
if len(pixel) != self.wcs.pixel_n_dim:
raise ValueError(
f"Expected {self.wcs.pixel_n_dim} world coordinates, got {len(pixel)} "
)
if self.invert_xy:
pixel = pixel[::-1]
if len(pixel[0]) == 0:
world = np.zeros((0, self.wcs.world_n_dim))
else:
world = self.wcs.pixel_to_world_values(*pixel)
if self.wcs.world_n_dim == 1:
world = [world]
world = np.array(world).T
return world
transform_non_affine = transform
def inverted(self):
"""
Return the inverse of the transform
"""
return WCSWorld2PixelTransform(self.wcs, invert_xy=self.invert_xy)
|
858dac09b8a37ac74d4415734bfa61094c74b5e4635229e777e7108413fc9a84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import defaultdict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.transforms import Affine2D
class Ticks(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
Attributes
----------
ticks_locs : dict
This is set when the ticks are drawn, and is a mapping from axis to
the locations of the ticks for that axis.
"""
def __init__(self, ticksize=None, tick_out=None, **kwargs):
if ticksize is None:
ticksize = rcParams["xtick.major.size"]
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams["xtick.minor.size"])
self.set_tick_out(rcParams["xtick.direction"] == "out")
self.clear()
line2d_kwargs = {
"color": rcParams["xtick.color"],
"linewidth": rcParams["xtick.major.width"],
}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.0], [0.0], **line2d_kwargs)
self.set_visible_axes("all")
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.0
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == "all":
return self.world.keys()
else:
return [x for x in self._visible_axes if x in self.world]
def clear(self):
self.world = defaultdict(list)
self.pixel = defaultdict(list)
self.angle = defaultdict(list)
self.disp = defaultdict(list)
self.minor_world = defaultdict(list)
self.minor_pixel = defaultdict(list)
self.minor_angle = defaultdict(list)
self.minor_disp = defaultdict(list)
def add(self, axis, world, pixel, angle, axis_displacement):
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(
self, minor_axis, minor_world, minor_pixel, minor_angle, minor_axis_displacement
):
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0.0, 0.0], [1.0, 0.0]])
def draw(self, renderer):
"""
Draw the ticks.
"""
self.ticks_locs = defaultdict(list)
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180.0 if self.get_tick_out() else 0.0
for axis in self.get_visible_axes():
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(
gc,
self._tickvert_path,
marker_transform,
Path(locs),
path_trans.get_affine(),
)
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
self.ticks_locs[axis].append(locs)
gc.restore()
|
a44e252370d7b6b531e7d43607cb761ada5f96fd6ce12e46d6ffa88f88977de3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from matplotlib.patches import Polygon
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
SphericalRepresentation,
UnitSphericalRepresentation,
)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["Quadrangle", "SphericalCircle"]
# Monkey-patch the docs to fix CapStyle and JoinStyle subs.
# TODO! delete when upstream fix matplotlib/matplotlib#19839
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``"
)
Polygon.__init__.__doc__ = Polygon.__init__.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``"
)
Polygon.set_capstyle.__doc__ = Polygon.set_capstyle.__doc__.replace(
"`.CapStyle`", "``matplotlib._enums.CapStyle``"
)
Polygon.set_joinstyle.__doc__ = Polygon.set_joinstyle.__doc__.replace(
"`.JoinStyle`", "``matplotlib._enums.JoinStyle``"
)
def _rotate_polygon(lon, lat, lon0, lat0):
"""
Given a polygon with vertices defined by (lon, lat), rotate the polygon
such that the North pole of the spherical coordinates is now at (lon0,
lat0). Therefore, to end up with a polygon centered on (lon0, lat0), the
polygon should initially be drawn around the North pole.
"""
# Create a representation object
polygon = UnitSphericalRepresentation(lon=lon, lat=lat)
# Determine rotation matrix to make it so that the circle is centered
# on the correct longitude/latitude.
transform_matrix = rotation_matrix(-lon0, axis="z") @ rotation_matrix(
-(0.5 * np.pi * u.radian - lat0), axis="y"
)
# Apply 3D rotation
polygon = polygon.to_cartesian()
polygon = polygon.transform(transform_matrix)
polygon = UnitSphericalRepresentation.from_cartesian(polygon)
return polygon.lon, polygon.lat
class SphericalCircle(Polygon):
"""
Create a patch representing a spherical circle - that is, a circle that is
formed of all the points that are within a certain angle of the central
coordinates on a sphere. Here we assume that latitude goes from -90 to +90
This class is needed in cases where the user wants to add a circular patch
to a celestial image, since otherwise the circle will be distorted, because
a fixed interval in longitude corresponds to a different angle on the sky
depending on the latitude.
Parameters
----------
center : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements
or a `~astropy.coordinates.SkyCoord` object.
radius : `~astropy.units.Quantity` ['angle']
The radius of the circle
resolution : int, optional
The number of points that make up the circle - increase this to get a
smoother circle.
vertex_unit : `~astropy.units.Unit`
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(self, center, radius, resolution=100, vertex_unit=u.degree, **kwargs):
# Extract longitude/latitude, either from a SkyCoord object, or
# from a tuple of two quantities or a single 2-element Quantity.
# The SkyCoord is converted to SphericalRepresentation, if not already.
if isinstance(center, SkyCoord):
rep_type = center.representation_type
if not issubclass(
rep_type, (SphericalRepresentation, UnitSphericalRepresentation)
):
warnings.warn(
f"Received `center` of representation type {rep_type} "
"will be converted to SphericalRepresentation ",
AstropyUserWarning,
)
longitude, latitude = center.spherical.lon, center.spherical.lat
else:
longitude, latitude = center
# Start off by generating the circle around the North pole
lon = np.linspace(0.0, 2 * np.pi, resolution + 1)[:-1] * u.radian
lat = np.repeat(0.5 * np.pi - radius.to_value(u.radian), resolution) * u.radian
lon, lat = _rotate_polygon(lon, lat, longitude, latitude)
# Extract new longitude/latitude in the requested units
lon = lon.to_value(vertex_unit)
lat = lat.to_value(vertex_unit)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
class Quadrangle(Polygon):
"""
Create a patch representing a latitude-longitude quadrangle.
The edges of the quadrangle lie on two lines of constant longitude and two
lines of constant latitude (or the equivalent component names in the
coordinate frame of interest, such as right ascension and declination).
Note that lines of constant latitude are not great circles.
Unlike `matplotlib.patches.Rectangle`, the edges of this patch will render
as curved lines if appropriate for the WCS transformation.
Parameters
----------
anchor : tuple or `~astropy.units.Quantity` ['angle']
This can be either a tuple of two `~astropy.units.Quantity` objects, or
a single `~astropy.units.Quantity` array with two elements.
width : `~astropy.units.Quantity` ['angle']
The width of the quadrangle in longitude (or, e.g., right ascension)
height : `~astropy.units.Quantity` ['angle']
The height of the quadrangle in latitude (or, e.g., declination)
resolution : int, optional
The number of points that make up each side of the quadrangle -
increase this to get a smoother quadrangle.
vertex_unit : `~astropy.units.Unit` ['angle']
The units in which the resulting polygon should be defined - this
should match the unit that the transformation (e.g. the WCS
transformation) expects as input.
Notes
-----
Additional keyword arguments are passed to `~matplotlib.patches.Polygon`
"""
def __init__(
self, anchor, width, height, resolution=100, vertex_unit=u.degree, **kwargs
):
# Extract longitude/latitude, either from a tuple of two quantities, or
# a single 2-element Quantity.
longitude, latitude = u.Quantity(anchor).to_value(vertex_unit)
# Convert the quadrangle dimensions to the appropriate units
width = width.to_value(vertex_unit)
height = height.to_value(vertex_unit)
# Create progressions in longitude and latitude
lon_seq = longitude + np.linspace(0, width, resolution + 1)
lat_seq = latitude + np.linspace(0, height, resolution + 1)
# Trace the path of the quadrangle
lon = np.concatenate(
[
lon_seq[:-1],
np.repeat(lon_seq[-1], resolution),
np.flip(lon_seq[1:]),
np.repeat(lon_seq[0], resolution),
]
)
lat = np.concatenate(
[
np.repeat(lat_seq[0], resolution),
lat_seq[:-1],
np.repeat(lat_seq[-1], resolution),
np.flip(lat_seq[1:]),
]
)
# Create polygon vertices
vertices = np.array([lon, lat]).transpose()
super().__init__(vertices, **kwargs)
|
5ffc70d730fc75496de08509b9813b1ebe79d696d5697f4c47113dfb8eb53130 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from collections import OrderedDict
from textwrap import indent
from .coordinate_helpers import CoordinateHelper
from .coordinate_range import find_coordinate_range
from .frame import RectangularFrame, RectangularFrame1D
class CoordinatesMap:
"""
A container for coordinate helpers that represents a coordinate system.
This object can be used to access coordinate helpers by index (like a list)
or by name (like a dictionary).
Parameters
----------
axes : :class:`~astropy.visualization.wcsaxes.WCSAxes`
The axes the coordinate map belongs to.
transform : `~matplotlib.transforms.Transform`, optional
The transform for the data.
coord_meta : dict, optional
A dictionary providing additional metadata. This should include the keys
``type``, ``wrap``, and ``unit``. Each of these should be a list with as
many items as the dimension of the coordinate system. The ``type``
entries should be one of ``longitude``, ``latitude``, or ``scalar``, the
``wrap`` entries should give, for the longitude, the angle at which the
coordinate wraps (and `None` otherwise), and the ``unit`` should give
the unit of the coordinates as :class:`~astropy.units.Unit` instances.
This can optionally also include a ``format_unit`` entry giving the
units to use for the tick labels (if not specified, this defaults to
``unit``).
frame_class : type, optional
The class for the frame, which should be a subclass of
:class:`~astropy.visualization.wcsaxes.frame.BaseFrame`. The default is to use a
:class:`~astropy.visualization.wcsaxes.frame.RectangularFrame`
previous_frame_path : `~matplotlib.path.Path`, optional
When changing the WCS of the axes, the frame instance will change but
we might want to keep re-using the same underlying matplotlib
`~matplotlib.path.Path` - in that case, this can be passed to this
keyword argument.
"""
def __init__(
self,
axes,
transform=None,
coord_meta=None,
frame_class=RectangularFrame,
previous_frame_path=None,
):
self._axes = axes
self._transform = transform
self.frame = frame_class(axes, self._transform, path=previous_frame_path)
# Set up coordinates
self._coords = []
self._aliases = {}
visible_count = 0
for index in range(len(coord_meta["type"])):
# Extract coordinate metadata
coord_type = coord_meta["type"][index]
coord_wrap = coord_meta["wrap"][index]
coord_unit = coord_meta["unit"][index]
name = coord_meta["name"][index]
visible = True
if "visible" in coord_meta:
visible = coord_meta["visible"][index]
format_unit = None
if "format_unit" in coord_meta:
format_unit = coord_meta["format_unit"][index]
default_label = name[0] if isinstance(name, (tuple, list)) else name
if "default_axis_label" in coord_meta:
default_label = coord_meta["default_axis_label"][index]
coord_index = None
if visible:
visible_count += 1
coord_index = visible_count - 1
self._coords.append(
CoordinateHelper(
parent_axes=axes,
parent_map=self,
transform=self._transform,
coord_index=coord_index,
coord_type=coord_type,
coord_wrap=coord_wrap,
coord_unit=coord_unit,
format_unit=format_unit,
frame=self.frame,
default_label=default_label,
)
)
# Set up aliases for coordinates
if isinstance(name, tuple):
for nm in name:
nm = nm.lower()
# Do not replace an alias already in the map if we have
# more than one alias for this axis.
if nm not in self._aliases:
self._aliases[nm] = index
else:
self._aliases[name.lower()] = index
def __getitem__(self, item):
if isinstance(item, str):
return self._coords[self._aliases[item.lower()]]
else:
return self._coords[item]
def __contains__(self, item):
if isinstance(item, str):
return item.lower() in self._aliases
else:
return 0 <= item < len(self._coords)
def set_visible(self, visibility):
raise NotImplementedError()
def __iter__(self):
yield from self._coords
def grid(self, draw_grid=True, grid_type=None, **kwargs):
"""
Plot gridlines for both coordinates.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended. By default, 'lines' is used if the transform has
an inverse, otherwise 'contours' is used.
"""
for coord in self:
coord.grid(draw_grid=draw_grid, grid_type=grid_type, **kwargs)
def get_coord_range(self):
xmin, xmax = self._axes.get_xlim()
if isinstance(self.frame, RectangularFrame1D):
extent = [xmin, xmax]
else:
ymin, ymax = self._axes.get_ylim()
extent = [xmin, xmax, ymin, ymax]
return find_coordinate_range(
self._transform,
extent,
[coord.coord_type for coord in self if coord.coord_index is not None],
[coord.coord_unit for coord in self if coord.coord_index is not None],
[coord.coord_wrap for coord in self if coord.coord_index is not None],
)
def _as_table(self):
# Import Table here to avoid importing the astropy.table package
# every time astropy.visualization.wcsaxes is imported.
from astropy.table import Table
rows = []
for icoord, coord in enumerate(self._coords):
aliases = [key for key, value in self._aliases.items() if value == icoord]
row = OrderedDict(
[
("index", icoord),
("aliases", " ".join(aliases)),
("type", coord.coord_type),
("unit", coord.coord_unit),
("wrap", coord.coord_wrap),
("format_unit", coord.get_format_unit()),
("visible", "no" if coord.coord_index is None else "yes"),
]
)
rows.append(row)
return Table(rows=rows)
def __repr__(self):
s = f"<CoordinatesMap with {len(self._coords)} world coordinates:\n\n"
table = indent(str(self._as_table()), " ")
return s + table + "\n\n>"
|
33293045c9665329547c5840cbf8909ecaa3d5776b32dcb234519a2c368bd3b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.coordinates import BaseCoordinateFrame
__all__ = [
"select_step_degree",
"select_step_hour",
"select_step_scalar",
"transform_contour_set_inplace",
]
def select_step_degree(dv):
# Modified from axis_artist, supports astropy.units
if dv > 1.0 * u.arcsec:
degree_limits_ = [1.5, 3, 7, 13, 20, 40, 70, 120, 270, 520]
degree_steps_ = [1, 2, 5, 10, 15, 30, 45, 90, 180, 360]
degree_units = [u.degree] * len(degree_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 8, 11, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 5, 10, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [u.arcsec] * len(second_limits_)
degree_limits = np.concatenate([second_limits_, minute_limits_, degree_limits_])
degree_steps = minsec_steps_ + minsec_steps_ + degree_steps_
degree_units = second_units + minute_units + degree_units
n = degree_limits.searchsorted(dv.to(u.degree))
step = degree_steps[n]
unit = degree_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(u.arcsec)) * u.arcsec
def select_step_hour(dv):
if dv > 15.0 * u.arcsec:
hour_limits_ = [1.5, 2.5, 3.5, 5, 7, 10, 15, 21, 36]
hour_steps_ = [1, 2, 3, 4, 6, 8, 12, 18, 24]
hour_units = [u.hourangle] * len(hour_steps_)
minsec_limits_ = [1.5, 2.5, 3.5, 4.5, 5.5, 8, 11, 14, 18, 25, 45]
minsec_steps_ = [1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30]
minute_limits_ = np.array(minsec_limits_) / 60.0
minute_units = [15.0 * u.arcmin] * len(minute_limits_)
second_limits_ = np.array(minsec_limits_) / 3600.0
second_units = [15.0 * u.arcsec] * len(second_limits_)
hour_limits = np.concatenate([second_limits_, minute_limits_, hour_limits_])
hour_steps = minsec_steps_ + minsec_steps_ + hour_steps_
hour_units = second_units + minute_units + hour_units
n = hour_limits.searchsorted(dv.to(u.hourangle))
step = hour_steps[n]
unit = hour_units[n]
return step * unit
else:
return select_step_scalar(dv.to_value(15.0 * u.arcsec)) * (15.0 * u.arcsec)
def select_step_scalar(dv):
log10_dv = np.log10(dv)
base = np.floor(log10_dv)
frac = log10_dv - base
steps = np.log10([1, 2, 5, 10])
imin = np.argmin(np.abs(frac - steps))
return 10.0 ** (base + steps[imin])
def get_coord_meta(frame):
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (None, None)
coord_meta["unit"] = (u.deg, u.deg)
from astropy.coordinates import frame_transform_graph
if isinstance(frame, str):
initial_frame = frame
frame = frame_transform_graph.lookup_name(frame)
if frame is None:
raise ValueError(f"Unknown frame: {initial_frame}")
if not isinstance(frame, BaseCoordinateFrame):
frame = frame()
names = list(frame.representation_component_names.keys())
coord_meta["name"] = names[:2]
return coord_meta
def transform_contour_set_inplace(cset, transform):
"""
Transform a contour set in-place using a specified
:class:`matplotlib.transform.Transform`
Using transforms with the native Matplotlib contour/contourf can be slow if
the transforms have a non-negligible overhead (which is the case for
WCS/SkyCoord transforms) since the transform is called for each individual
contour line. It is more efficient to stack all the contour lines together
temporarily and transform them in one go.
"""
# The contours are represented as paths grouped into levels. Each can have
# one or more paths. The approach we take here is to stack the vertices of
# all paths and transform them in one go. The pos_level list helps us keep
# track of where the set of segments for each overall contour level ends.
# The pos_segments list helps us keep track of where each segmnt ends for
# each contour level.
all_paths = []
pos_level = []
pos_segments = []
for collection in cset.collections:
paths = collection.get_paths()
if len(paths) == 0:
continue
all_paths.append(paths)
# The last item in pos isn't needed for np.split and in fact causes
# issues if we keep it because it will cause an extra empty array to be
# returned.
pos = np.cumsum([len(x) for x in paths])
pos_segments.append(pos[:-1])
pos_level.append(pos[-1])
# As above the last item isn't needed
pos_level = np.cumsum(pos_level)[:-1]
# Stack all the segments into a single (n, 2) array
vertices = [path.vertices for paths in all_paths for path in paths]
if len(vertices) > 0:
vertices = np.concatenate(vertices)
else:
return
# Transform all coordinates in one go
vertices = transform.transform(vertices)
# Split up into levels again
vertices = np.split(vertices, pos_level)
# Now re-populate the segments in the line collections
for ilevel, vert in enumerate(vertices):
vert = np.split(vert, pos_segments[ilevel])
for iseg, ivert in enumerate(vert):
all_paths[ilevel][iseg].vertices = ivert
|
7bd5c3b22572114865ab0163a88e975a331483e859f2bbff6d99f69e2315302d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.transforms as mtransforms
import numpy as np
from matplotlib import rcParams
from matplotlib.text import Text
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
# Use rcParams if the following parameters were not specified explicitly
if "weight" not in kwargs:
kwargs["weight"] = rcParams["axes.labelweight"]
if "size" not in kwargs:
kwargs["size"] = rcParams["axes.labelsize"]
if "color" not in kwargs:
kwargs["color"] = rcParams["axes.labelcolor"]
self._frame = frame
super().__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes("all")
self.set_ha("center")
self.set_va("center")
self._minpad = minpad
self._visibility_rule = "labels"
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == "all":
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def set_visibility_rule(self, value):
allowed = ["always", "labels", "ticks"]
if value not in allowed:
raise ValueError(
f"Axis label visibility rule must be one of{' / '.join(allowed)}"
)
self._visibility_rule = value
def get_visibility_rule(self):
return self._visibility_rule
def draw(
self,
renderer,
bboxes,
ticklabels_bbox,
coord_ticklabels_bbox,
ticks_locs,
visible_ticks,
):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
# Flatten the bboxes for all coords and all axes
ticklabels_bbox_list = []
for bbcoord in ticklabels_bbox.values():
for bbaxis in bbcoord.values():
ticklabels_bbox_list += bbaxis
for axis in self.get_visible_axes():
if self.get_visibility_rule() == "ticks":
if not ticks_locs[axis]:
continue
elif self.get_visibility_rule() == "labels":
if not coord_ticklabels_bbox:
continue
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x, y, normal_angle = self._frame[axis]._halfway_x_y_angle()
label_angle = (normal_angle - 90.0) % 360.0
if 135 < label_angle < 225:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if (
len(ticklabels_bbox_list) > 0
and ticklabels_bbox_list[0] is not None
):
coord_ticklabels_bbox[axis] = [
mtransforms.Bbox.union(ticklabels_bbox_list)
]
else:
coord_ticklabels_bbox[axis] = [None]
visible = (
axis in visible_ticks and coord_ticklabels_bbox[axis][0] is not None
)
if axis == "l":
if visible:
x = coord_ticklabels_bbox[axis][0].xmin
x = x - padding
elif axis == "r":
if visible:
x = coord_ticklabels_bbox[axis][0].x1
x = x + padding
elif axis == "b":
if visible:
y = coord_ticklabels_bbox[axis][0].ymin
y = y - padding
elif axis == "t":
if visible:
y = coord_ticklabels_bbox[axis][0].y1
y = y + padding
else: # arbitrary axis
x = x + np.cos(np.radians(normal_angle)) * (padding + text_size * 1.5)
y = y + np.sin(np.radians(normal_angle)) * (padding + text_size * 1.5)
self.set_position((x, y))
super().draw(renderer)
bb = super().get_window_extent(renderer)
bboxes.append(bb)
|
50c05d74e38c8611ad3d398ea2ffdb41387fea5ae24cc5f01f6669c38b2cb643 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy import units as u
# Algorithm inspired by PGSBOX from WCSLIB by M. Calabretta
LONLAT = {"longitude", "latitude"}
def wrap_180(values):
values_new = values % 360.0
with np.errstate(invalid="ignore"):
values_new[values_new > 180.0] -= 360
return values_new
def find_coordinate_range(transform, extent, coord_types, coord_units, coord_wraps):
"""
Find the range of coordinates to use for ticks/grids
Parameters
----------
transform : func
Function to transform pixel to world coordinates. Should take two
values (the pixel coordinates) and return two values (the world
coordinates).
extent : iterable
The range of the image viewport in pixel coordinates, given as [xmin,
xmax, ymin, ymax].
coord_types : list of str
Whether each coordinate is a ``'longitude'``, ``'latitude'``, or
``'scalar'`` value.
coord_units : list of `astropy.units.Unit`
The units for each coordinate.
coord_wraps : list of float
The wrap angles for longitudes.
"""
# Sample coordinates on a NX x NY grid.
from . import conf
if len(extent) == 4:
nx = ny = conf.coordinate_range_samples
x = np.linspace(extent[0], extent[1], nx + 1)
y = np.linspace(extent[2], extent[3], ny + 1)
xp, yp = np.meshgrid(x, y)
with np.errstate(invalid="ignore"):
world = transform.transform(np.vstack([xp.ravel(), yp.ravel()]).transpose())
else:
nx = conf.coordinate_range_samples
xp = np.linspace(extent[0], extent[1], nx + 1)[None]
with np.errstate(invalid="ignore"):
world = transform.transform(xp.T)
ranges = []
for coord_index, coord_type in enumerate(coord_types):
xw = world[:, coord_index].reshape(xp.shape)
if coord_type in LONLAT:
unit = coord_units[coord_index]
xw = xw * unit.to(u.deg)
# Iron out coordinates along first row
wjump = xw[0, 1:] - xw[0, :-1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * (wjump / 360.0).astype(int)
xw[0, 1:][reset] -= wjump[reset]
# Now iron out coordinates along all columns, starting with first row.
wjump = xw[1:] - xw[:1]
with np.errstate(invalid="ignore"):
reset = np.abs(wjump) > 180.0
if np.any(reset):
wjump = wjump + np.sign(wjump) * 180.0
wjump = 360.0 * (wjump / 360.0).astype(int)
xw[1:][reset] -= wjump[reset]
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min = np.nanmin(xw)
xw_max = np.nanmax(xw)
# Check if range is smaller when normalizing to the range 0 to 360
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(xw % 360.0)
xw_max_check = np.nanmax(xw % 360.0)
if xw_max_check - xw_min_check <= xw_max - xw_min < 360.0:
xw_min = xw_min_check
xw_max = xw_max_check
# Check if range is smaller when normalizing to the range -180 to 180
if coord_type in LONLAT:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
xw_min_check = np.nanmin(wrap_180(xw))
xw_max_check = np.nanmax(wrap_180(xw))
if (
xw_max_check - xw_min_check < 360.0
and xw_max - xw_min >= xw_max_check - xw_min_check
):
xw_min = xw_min_check
xw_max = xw_max_check
x_range = xw_max - xw_min
if coord_type == "longitude":
if x_range > 300.0:
xw_min = coord_wraps[coord_index] - 360
xw_max = coord_wraps[coord_index] - np.spacing(360.0)
elif xw_min < 0.0:
xw_min = max(-180.0, xw_min - 0.1 * x_range)
xw_max = min(+180.0, xw_max + 0.1 * x_range)
else:
xw_min = max(0.0, xw_min - 0.1 * x_range)
xw_max = min(360.0, xw_max + 0.1 * x_range)
elif coord_type == "latitude":
xw_min = max(-90.0, xw_min - 0.1 * x_range)
xw_max = min(+90.0, xw_max + 0.1 * x_range)
if coord_type in LONLAT:
xw_min *= u.deg.to(unit)
xw_max *= u.deg.to(unit)
ranges.append((xw_min, xw_max))
return ranges
|
d2184ec8d05c4515446ef9d69a1d02419af71ed58d281fd31f5b9042d49b2864 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import abc
from collections import OrderedDict
import numpy as np
from matplotlib import rcParams
from matplotlib.lines import Line2D, Path
from matplotlib.patches import PathPatch
__all__ = [
"RectangularFrame1D",
"Spine",
"BaseFrame",
"RectangularFrame",
"EllipticalFrame",
]
class Spine:
"""
A single side of an axes.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
Parameters
----------
parent_axes : `~astropy.visualization.wcsaxes.WCSAxes`
The parent axes
transform : `~matplotlib.transforms.Transform`
The transform from data to world
data_func : callable
If not ``None``, it should be a function that returns the appropriate spine
data when called with this object as the sole argument. If ``None``, the
spine data must be manually updated in ``update_spines()``.
"""
def __init__(self, parent_axes, transform, *, data_func=None):
self.parent_axes = parent_axes
self.transform = transform
self.data_func = data_func
self._data = None
self._pixel = None
self._world = None
@property
def data(self):
if self._data is None and self.data_func:
self.data = self.data_func(self)
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data)
self._update_normal()
@property
def world(self):
return self._world
@world.setter
def world(self, value):
self._world = value
if value is None:
self._data = None
self._pixel = None
else:
self._data = self.transform.transform(value)
self._pixel = self.parent_axes.transData.transform(self._data)
self._update_normal()
def _update_normal(self):
# Find angle normal to border and inwards, in display coordinate
dx = self.pixel[1:, 0] - self.pixel[:-1, 0]
dy = self.pixel[1:, 1] - self.pixel[:-1, 1]
self.normal_angle = np.degrees(np.arctan2(dx, -dy))
def _halfway_x_y_angle(self):
"""
Return the x, y, normal_angle values halfway along the spine
"""
x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1]
# Get distance along the path
d = np.hstack(
[0.0, np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]
)
xcen = np.interp(d[-1] / 2.0, d, x_disp)
ycen = np.interp(d[-1] / 2.0, d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.0) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self.normal_angle[imin] + 180.0
return xcen, ycen, normal_angle
class SpineXAligned(Spine):
"""
A single side of an axes, aligned with the X data axis.
This does not need to be a straight line, but represents a 'side' when
determining which part of the frame to put labels and ticks on.
"""
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
if value is None:
self._pixel = None
self._world = None
else:
self._pixel = self.parent_axes.transData.transform(self._data)
with np.errstate(invalid="ignore"):
self._world = self.transform.transform(self._data[:, 0:1])
self._update_normal()
@property
def pixel(self):
return self._pixel
@pixel.setter
def pixel(self, value):
self._pixel = value
if value is None:
self._data = None
self._world = None
else:
self._data = self.parent_axes.transData.inverted().transform(self._data)
self._world = self.transform.transform(self._data[:, 0:1])
self._update_normal()
class BaseFrame(OrderedDict, metaclass=abc.ABCMeta):
"""
Base class for frames, which are collections of
:class:`~astropy.visualization.wcsaxes.frame.Spine` instances.
"""
spine_class = Spine
def __init__(self, parent_axes, transform, path=None):
super().__init__()
self.parent_axes = parent_axes
self._transform = transform
self._linewidth = rcParams["axes.linewidth"]
self._color = rcParams["axes.edgecolor"]
self._path = path
for axis in self.spine_names:
self[axis] = self.spine_class(parent_axes, transform)
@property
def origin(self):
ymin, ymax = self.parent_axes.get_ylim()
return "lower" if ymin < ymax else "upper"
@property
def transform(self):
return self._transform
@transform.setter
def transform(self, value):
self._transform = value
for axis in self:
self[axis].transform = value
def _update_patch_path(self):
self.update_spines()
x, y = [], []
for axis in self.spine_names:
x.append(self[axis].data[:, 0])
y.append(self[axis].data[:, 1])
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
@property
def patch(self):
self._update_patch_path()
return PathPatch(
self._path,
transform=self.parent_axes.transData,
facecolor=rcParams["axes.facecolor"],
edgecolor="white",
)
def draw(self, renderer):
for axis in self.spine_names:
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(
x, y, linewidth=self._linewidth, color=self._color, zorder=1000
)
line.draw(renderer)
def sample(self, n_samples):
self.update_spines()
spines = OrderedDict()
for axis in self:
data = self[axis].data
spines[axis] = self.spine_class(self.parent_axes, self.transform)
if data.size > 0:
p = np.linspace(0.0, 1.0, data.shape[0])
p_new = np.linspace(0.0, 1.0, n_samples)
spines[axis].data = np.array(
[np.interp(p_new, p, d) for d in data.T]
).transpose()
else:
spines[axis].data = data
return spines
def set_color(self, color):
"""
Sets the color of the frame.
Parameters
----------
color : str
The color of the frame.
"""
self._color = color
def get_color(self):
return self._color
def set_linewidth(self, linewidth):
"""
Sets the linewidth of the frame.
Parameters
----------
linewidth : float
The linewidth of the frame in points.
"""
self._linewidth = linewidth
def get_linewidth(self):
return self._linewidth
def update_spines(self):
for spine in self.values():
if spine.data_func:
spine.data = spine.data_func(spine)
class RectangularFrame1D(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "bt"
spine_class = SpineXAligned
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
super().update_spines()
def _update_patch_path(self):
self.update_spines()
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose()
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
x = [xmin, xmax, xmax, xmin, xmin]
y = [ymin, ymin, ymax, ymax, ymin]
line = Line2D(
x,
y,
linewidth=self._linewidth,
color=self._color,
zorder=1000,
transform=self.parent_axes.transData,
)
line.draw(renderer)
class RectangularFrame(BaseFrame):
"""
A classic rectangular frame.
"""
spine_names = "brtl"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
self["b"].data = np.array(([xmin, ymin], [xmax, ymin]))
self["r"].data = np.array(([xmax, ymin], [xmax, ymax]))
self["t"].data = np.array(([xmax, ymax], [xmin, ymax]))
self["l"].data = np.array(([xmin, ymax], [xmin, ymin]))
super().update_spines()
class EllipticalFrame(BaseFrame):
"""
An elliptical frame.
"""
spine_names = "chv"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
dx = xmid - xmin
dy = ymid - ymin
theta = np.linspace(0.0, 2 * np.pi, 1000)
self["c"].data = np.array(
[xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]
).transpose()
self["h"].data = np.array(
[np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]
).transpose()
self["v"].data = np.array(
[np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]
).transpose()
super().update_spines()
def _update_patch_path(self):
"""Override path patch to include only the outer ellipse,
not the major and minor axes in the middle."""
self.update_spines()
vertices = self["c"].data
if self._path is None:
self._path = Path(vertices)
else:
self._path.vertices = vertices
def draw(self, renderer):
"""Override to draw only the outer ellipse,
not the major and minor axes in the middle.
FIXME: we may want to add a general method to give the user control
over which spines are drawn."""
axis = "c"
x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1]
line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000)
line.draw(renderer)
|
c11fe6e9a00a639a7829ca22d14c7ac4be7737e8a514fb55cb7606d2b1f84af9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.lines import Path
from astropy.coordinates.angle_utilities import angular_separation
# Tolerance for WCS round-tripping, relative to the scale size
ROUND_TRIP_RTOL = 1.0
# Tolerance for discontinuities relative to the median
DISCONT_FACTOR = 10.0
def get_lon_lat_path(lon_lat, pixel, lon_lat_check):
"""
Draw a curve, taking into account discontinuities.
Parameters
----------
lon_lat : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
lon_lat_check : ndarray
The world coordinates derived from converting from ``pixel``, which is
used to ensure round-tripping.
"""
# In some spherical projections, some parts of the curve are 'behind' or
# 'in front of' the plane of the image, so we find those by reversing the
# transformation and finding points where the result is not consistent.
sep = angular_separation(
np.radians(lon_lat[:, 0]),
np.radians(lon_lat[:, 1]),
np.radians(lon_lat_check[:, 0]),
np.radians(lon_lat_check[:, 1]),
)
# Define the relevant scale size using the separation between the first two points
scale_size = angular_separation(
*np.radians(lon_lat[0, :]), *np.radians(lon_lat[1, :])
)
with np.errstate(invalid="ignore"):
sep[sep > np.pi] -= 2.0 * np.pi
mask = np.abs(sep > ROUND_TRIP_RTOL * scale_size)
# Mask values with invalid pixel positions
mask = mask | np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(lon_lat.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# We start off by pre-computing the step in pixel coordinates from one
# point to the next. The idea is to look for large jumps that might indicate
# discontinuities.
step = np.sqrt(
(pixel[1:, 0] - pixel[:-1, 0]) ** 2 + (pixel[1:, 1] - pixel[:-1, 1]) ** 2
)
# We search for discontinuities by looking for places where the step
# is larger by more than a given factor compared to the median
# discontinuous = step > DISCONT_FACTOR * np.median(step)
discontinuous = step[1:] > DISCONT_FACTOR * step[:-1]
# Skip over discontinuities
codes[2:][discontinuous] = Path.MOVETO
# The above missed the first step, so check that too
if step[0] > DISCONT_FACTOR * step[1]:
codes[1] = Path.MOVETO
# Create the path
path = Path(pixel, codes=codes)
return path
def get_gridline_path(world, pixel):
"""
Draw a grid line
Parameters
----------
world : ndarray
The longitude and latitude values along the curve, given as a (n,2)
array.
pixel : ndarray
The pixel coordinates corresponding to ``lon_lat``
"""
# Mask values with invalid pixel positions
mask = np.isnan(pixel[:, 0]) | np.isnan(pixel[:, 1])
# We can now start to set up the codes for the Path.
codes = np.zeros(world.shape[0], dtype=np.uint8)
codes[:] = Path.LINETO
codes[0] = Path.MOVETO
codes[mask] = Path.MOVETO
# Also need to move to point *after* a hidden value
codes[1:][mask[:-1]] = Path.MOVETO
# We now go through and search for discontinuities in the curve that would
# be due to the curve going outside the field of view, invalid WCS values,
# or due to discontinuities in the projection.
# Create the path
path = Path(pixel, codes=codes)
return path
|
38fa46d27f53da5d5e8446ddd8a5c9f4b2d334fee7556aa812034d8498421f5d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_PLT, HAS_SCIPY
if HAS_PLT:
import matplotlib.pyplot as plt
import numpy as np
import pytest
from astropy.stats import histogram
from astropy.visualization import hist
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_hist_basic(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
for range in [None, (-2, 2)]:
n1, bins1, patches1 = plt.hist(x, 10, range=range)
n2, bins2, patches2 = hist(x, 10, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_hist_specify_ax(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
fig, ax = plt.subplots(2)
n1, bins1, patches1 = hist(x, 10, ax=ax[0])
assert patches1[0].axes is ax[0]
n2, bins2, patches2 = hist(x, 10, ax=ax[1])
assert patches2[0].axes is ax[1]
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_hist_autobin(rseed=0):
rng = np.random.default_rng(rseed)
x = rng.standard_normal(100)
# 'knuth' bintype depends on scipy that is optional dependency
if HAS_SCIPY:
bintypes = [10, np.arange(-3, 3, 10), "knuth", "scott", "freedman", "blocks"]
else:
bintypes = [10, np.arange(-3, 3, 10), "scott", "freedman", "blocks"]
for bintype in bintypes:
for range in [None, (-3, 3)]:
n1, bins1 = histogram(x, bintype, range=range)
n2, bins2, patches = hist(x, bintype, range=range)
assert_allclose(n1, n2)
assert_allclose(bins1, bins2)
def test_histogram_pathological_input():
# Regression test for https://github.com/astropy/astropy/issues/7758
# The key feature of the data below is that one of the points is very,
# very different than the rest. That leads to a large number of bins.
data = [
9.99999914e05,
-8.31312483e-03,
6.52755852e-02,
1.43104653e-03,
-2.26311017e-02,
2.82660007e-03,
1.80307521e-02,
9.26294279e-03,
5.06606026e-02,
2.05418011e-03,
]
with pytest.raises(ValueError):
hist(data, bins="freedman", max_bins=10000)
|
6bda045ebc507635195eecb593b663737fff391a43fff585b95de9e901c7d958 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.utils import NumpyRNGContext
from astropy.visualization.interval import (
AsymmetricPercentileInterval,
ManualInterval,
MinMaxInterval,
PercentileInterval,
ZScaleInterval,
)
class TestInterval:
data = np.linspace(-20.0, 60.0, 100)
def test_manual(self):
interval = ManualInterval(-10.0, +15.0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.0)
np.testing.assert_allclose(vmax, +15.0)
def test_manual_defaults(self):
interval = ManualInterval(vmin=-10.0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -10.0)
np.testing.assert_allclose(vmax, np.max(self.data))
interval = ManualInterval(vmax=15.0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, np.min(self.data))
np.testing.assert_allclose(vmax, 15.0)
def test_manual_zero_limit(self):
# Regression test for a bug that caused ManualInterval to compute the
# limit (min or max) if it was set to zero.
interval = ManualInterval(vmin=0, vmax=0)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, 0)
np.testing.assert_allclose(vmax, 0)
def test_manual_defaults_with_nan(self):
interval = ManualInterval()
data = np.copy(self.data)
data[0] = np.nan
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20)
np.testing.assert_allclose(vmax, +60)
def test_minmax(self):
interval = MinMaxInterval()
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -20.0)
np.testing.assert_allclose(vmax, +60.0)
def test_percentile(self):
interval = PercentileInterval(62.2)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -4.88)
np.testing.assert_allclose(vmax, 44.88)
def test_asymmetric_percentile(self):
interval = AsymmetricPercentileInterval(10.5, 70.5)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -11.6)
np.testing.assert_allclose(vmax, 36.4)
def test_asymmetric_percentile_nsamples(self):
with NumpyRNGContext(12345):
interval = AsymmetricPercentileInterval(10.5, 70.5, n_samples=20)
vmin, vmax = interval.get_limits(self.data)
np.testing.assert_allclose(vmin, -14.367676767676768)
np.testing.assert_allclose(vmax, 40.266666666666666)
class TestIntervalList(TestInterval):
# Make sure intervals work with lists
data = np.linspace(-20.0, 60.0, 100).tolist()
class TestInterval2D(TestInterval):
# Make sure intervals work with 2d arrays
data = np.linspace(-20.0, 60.0, 100).reshape(100, 1)
def test_zscale():
np.random.seed(42)
data = np.random.randn(100, 100) * 5 + 10
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, -9.6, atol=0.1)
np.testing.assert_allclose(vmax, 25.4, atol=0.1)
data = list(range(1000)) + [np.nan]
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 999, atol=0.1)
data = list(range(100))
interval = ZScaleInterval()
vmin, vmax = interval.get_limits(data)
np.testing.assert_allclose(vmin, 0, atol=0.1)
np.testing.assert_allclose(vmax, 99, atol=0.1)
def test_zscale_npoints():
"""
Regression test to ensure ZScaleInterval returns the minimum and
maximum of the data if the number of data points is less than
``min_pixels``.
"""
data = np.arange(4).reshape((2, 2))
interval = ZScaleInterval(min_npixels=5)
vmin, vmax = interval.get_limits(data)
assert vmin == 0
assert vmax == 3
def test_integers():
# Need to make sure integers get cast to float
interval = MinMaxInterval()
values = interval([1, 3, 4, 5, 6])
np.testing.assert_allclose(values, [0.0, 0.4, 0.6, 0.8, 1.0])
# Don't accept integer array in output
out = np.zeros(5, dtype=int)
with pytest.raises(
TypeError, match=r"Can only do in-place scaling for floating-point arrays"
):
values = interval([1, 3, 4, 5, 6], out=out)
# But integer input and floating point output is fine
out = np.zeros(5, dtype=float)
interval([1, 3, 4, 5, 6], out=out)
np.testing.assert_allclose(out, [0.0, 0.4, 0.6, 0.8, 1.0])
def test_constant_data():
"""Test intervals with constant data (avoiding divide-by-zero)."""
shape = (10, 10)
data = np.ones(shape)
interval = MinMaxInterval()
limits = interval.get_limits(data)
values = interval(data)
np.testing.assert_allclose(limits, (1.0, 1.0))
np.testing.assert_allclose(values, np.zeros(shape))
|
6888a489cb12d8b9e5f99d39c16784d871ec6b6e0ecdd5f4bc7bc77c1c6e7c85 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy.visualization.stretch import (
AsinhStretch,
ContrastBiasStretch,
HistEqStretch,
InvertedHistEqStretch,
InvertedLogStretch,
InvertedPowerDistStretch,
LinearStretch,
LogStretch,
PowerDistStretch,
PowerStretch,
SinhStretch,
SqrtStretch,
SquaredStretch,
)
DATA = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS = {}
RESULTS[LinearStretch()] = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
RESULTS[LinearStretch(intercept=0.5) + LinearStretch(slope=0.5)] = np.array(
[0.5, 0.625, 0.75, 0.875, 1.0]
)
RESULTS[SqrtStretch()] = np.array([0.0, 0.5, 0.70710678, 0.8660254, 1.0])
RESULTS[SquaredStretch()] = np.array([0.0, 0.0625, 0.25, 0.5625, 1.0])
RESULTS[PowerStretch(0.5)] = np.array([0.0, 0.5, 0.70710678, 0.8660254, 1.0])
RESULTS[PowerDistStretch()] = np.array([0.0, 0.004628, 0.030653, 0.177005, 1.0])
RESULTS[LogStretch()] = np.array([0.0, 0.799776, 0.899816, 0.958408, 1.0])
RESULTS[AsinhStretch()] = np.array([0.0, 0.549402, 0.77127, 0.904691, 1.0])
RESULTS[SinhStretch()] = np.array([0.0, 0.082085, 0.212548, 0.46828, 1.0])
RESULTS[ContrastBiasStretch(contrast=2.0, bias=0.4)] = np.array(
[-0.3, 0.2, 0.7, 1.2, 1.7]
)
RESULTS[HistEqStretch(DATA)] = DATA
RESULTS[HistEqStretch(DATA[::-1])] = DATA
RESULTS[HistEqStretch(DATA**0.5)] = np.array([0.0, 0.125, 0.25, 0.5674767, 1.0])
class TestStretch:
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_no_clip(self, stretch):
np.testing.assert_allclose(
stretch(DATA, clip=False), RESULTS[stretch], atol=1.0e-6
)
@pytest.mark.parametrize("ndim", [2, 3])
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_clip_ndimensional(self, stretch, ndim):
new_shape = DATA.shape + (1,) * ndim
np.testing.assert_allclose(
stretch(DATA.reshape(new_shape), clip=True).ravel(),
np.clip(RESULTS[stretch], 0.0, 1),
atol=1.0e-6,
)
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_clip(self, stretch):
np.testing.assert_allclose(
stretch(DATA, clip=True), np.clip(RESULTS[stretch], 0.0, 1), atol=1.0e-6
)
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_inplace(self, stretch):
data_in = DATA.copy()
result = np.zeros(DATA.shape)
stretch(data_in, out=result, clip=False)
np.testing.assert_allclose(result, RESULTS[stretch], atol=1.0e-6)
np.testing.assert_allclose(data_in, DATA)
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_round_trip(self, stretch):
np.testing.assert_allclose(
stretch.inverse(stretch(DATA, clip=False), clip=False), DATA
)
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_inplace_roundtrip(self, stretch):
result = np.zeros(DATA.shape)
stretch(DATA, out=result, clip=False)
stretch.inverse(result, out=result, clip=False)
np.testing.assert_allclose(result, DATA)
@pytest.mark.parametrize("stretch", RESULTS.keys())
def test_double_inverse(self, stretch):
np.testing.assert_allclose(
stretch.inverse.inverse(DATA), stretch(DATA), atol=1.0e-6
)
def test_inverted(self):
stretch_1 = SqrtStretch().inverse
stretch_2 = PowerStretch(2)
np.testing.assert_allclose(stretch_1(DATA), stretch_2(DATA))
def test_chaining(self):
stretch_1 = SqrtStretch() + SqrtStretch()
stretch_2 = PowerStretch(0.25)
stretch_3 = PowerStretch(4.0)
np.testing.assert_allclose(stretch_1(DATA), stretch_2(DATA))
np.testing.assert_allclose(stretch_1.inverse(DATA), stretch_3(DATA))
def test_clip_invalid():
stretch = SqrtStretch()
values = stretch([-1.0, 0.0, 0.5, 1.0, 1.5])
np.testing.assert_allclose(values, [0.0, 0.0, 0.70710678, 1.0, 1.0])
values = stretch([-1.0, 0.0, 0.5, 1.0, 1.5], clip=False)
np.testing.assert_allclose(values, [np.nan, 0.0, 0.70710678, 1.0, 1.2247448])
@pytest.mark.parametrize("a", [-2.0, -1, 1.0])
def test_invalid_powerdist_a(a):
match = "a must be >= 0, but cannot be set to 1"
with pytest.raises(ValueError, match=match):
PowerDistStretch(a=a)
with pytest.raises(ValueError, match=match):
InvertedPowerDistStretch(a=a)
@pytest.mark.parametrize("a", [-2.0, -1, 0.0])
def test_invalid_power_log_a(a):
match = "a must be > 0"
with pytest.raises(ValueError, match=match):
PowerStretch(a=a)
with pytest.raises(ValueError, match=match):
LogStretch(a=a)
with pytest.raises(ValueError, match=match):
InvertedLogStretch(a=a)
@pytest.mark.parametrize("a", [-2.0, -1, 0.0, 1.5])
def test_invalid_sinh_a(a):
match = "a must be > 0 and <= 1"
with pytest.raises(ValueError, match=match):
AsinhStretch(a=a)
with pytest.raises(ValueError, match=match):
SinhStretch(a=a)
def test_histeqstretch_invalid():
data = np.array([-np.inf, 0.00, 0.25, 0.50, 0.75, 1.00, np.inf])
result = np.array([0.0, 0.0, 0.25, 0.5, 0.75, 1.0, 1.0])
assert_equal(HistEqStretch(data)(data), result)
assert_equal(InvertedHistEqStretch(data)(data), result)
|
26672a68b638e9c49a783a8ece920c8e54b844c24d93a973600133b61d2cdec2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.utils.compat.optional_deps import HAS_PLT
if HAS_PLT:
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
def teardown_function(function):
plt.close("all")
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label="label")
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format="svg")
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_units_errbarr():
pytest.importorskip("matplotlib")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_quantity_subclass():
"""Check that subclasses are recognized.
This sadly is not done by matplotlib.units itself, though
there is a PR to change it:
https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_empty_hist():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.hist([1, 2, 3, 4] * u.mmag, bins=100)
# The second call results in an empty list being passed to the
# unit converter in matplotlib >= 3.1
ax.hist([] * u.mmag, bins=100)
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_radian_formatter():
with quantity_support():
fig, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3] * u.rad * np.pi)
fig.canvas.draw()
labels = [tl.get_text() for tl in ax.yaxis.get_ticklabels()]
assert labels == ["π/2", "π", "3π/2", "2π", "5π/2", "3π", "7π/2"]
|
fdd4fe3353206b8a847501f3e832afa548a110c5a7c30d716aff8813c674b43b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for RGB Images
"""
import os
import sys
import tempfile
import numpy as np
import pytest
from numpy.testing import assert_equal
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
from astropy.visualization import lupton_rgb
# Set display=True to get matplotlib imshow windows to help with debugging.
display = False
def display_rgb(rgb, title=None):
"""Display an rgb image using matplotlib (useful for debugging)"""
import matplotlib.pyplot as plt
plt.imshow(rgb, interpolation="nearest", origin="lower")
if title:
plt.title(title)
plt.show()
return plt
def saturate(image, satValue):
"""
Return image with all points above satValue set to NaN.
Simulates saturation on an image, so we can test 'replace_saturated_pixels'
"""
result = image.copy()
saturated = image > satValue
result[saturated] = np.nan
return result
def random_array(dtype, N=100):
return np.array(np.random.random(10) * 100, dtype=dtype)
def test_compute_intensity_1_float():
image_r = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_1_uint():
image_r = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r)
assert image_r.dtype == intensity.dtype
assert_equal(image_r, intensity)
def test_compute_intensity_3_float():
image_r = random_array(np.float64)
image_g = random_array(np.float64)
image_b = random_array(np.float64)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r + image_g + image_b) / 3.0)
def test_compute_intensity_3_uint():
image_r = random_array(np.uint8)
image_g = random_array(np.uint8)
image_b = random_array(np.uint8)
intensity = lupton_rgb.compute_intensity(image_r, image_g, image_b)
assert image_r.dtype == intensity.dtype
assert_equal(intensity, (image_r + image_g + image_b) // 3)
class TestLuptonRgb:
"""A test case for Rgb"""
def setup_method(self, method):
np.random.seed(1000) # so we always get the same images.
self.min_, self.stretch_, self.Q = 0, 5, 20 # asinh
width, height = 85, 75
self.width = width
self.height = height
shape = (width, height)
image_r = np.zeros(shape)
image_g = np.zeros(shape)
image_b = np.zeros(shape)
# pixel locations, values and colors
points = [[15, 15], [50, 45], [30, 30], [45, 15]]
values = [1000, 5500, 600, 20000]
g_r = [1.0, -1.0, 1.0, 1.0]
r_i = [2.0, -0.5, 2.5, 1.0]
# Put pixels in the images.
for p, v, gr, ri in zip(points, values, g_r, r_i):
image_r[p[0], p[1]] = v * pow(10, 0.4 * ri)
image_g[p[0], p[1]] = v * pow(10, 0.4 * gr)
image_b[p[0], p[1]] = v
# convolve the image with a reasonable PSF, and add Gaussian background noise
def convolve_with_noise(image, psf):
convolvedImage = convolve(
image, psf, boundary="extend", normalize_kernel=True
)
randomImage = np.random.normal(0, 2, image.shape)
return randomImage + convolvedImage
psf = Gaussian2DKernel(2.5)
self.image_r = convolve_with_noise(image_r, psf)
self.image_g = convolve_with_noise(image_g, psf)
self.image_b = convolve_with_noise(image_b, psf)
def test_Asinh(self):
"""Test creating an RGB image using an asinh stretch"""
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscale(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensity(self):
"""
Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r, self.image_g, self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityPedestal(self):
"""Test creating an RGB image using an asinh stretch estimated using zscale on the intensity
where the images each have a pedestal added"""
pedestal = [100, 400, -400]
self.image_r += pedestal[0]
self.image_g += pedestal[1]
self.image_b += pedestal[2]
map = lupton_rgb.AsinhZScaleMapping(
self.image_r, self.image_g, self.image_b, pedestal=pedestal
)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_AsinhZscaleIntensityBW(self):
"""Test creating a black-and-white image using an asinh stretch estimated
using zscale on the intensity"""
map = lupton_rgb.AsinhZScaleMapping(self.image_r)
rgbImage = map.make_rgb_image(self.image_r, self.image_r, self.image_r)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
def test_make_rgb(self):
"""Test the function that does it all"""
satValue = 1000.0
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(
red, green, blue, self.min_, self.stretch_, self.Q, filename=temp
)
assert os.path.exists(temp.name)
def test_make_rgb_saturated_fix(self):
pytest.skip("saturation correction is not implemented")
satValue = 1000.0
# TODO: Cannot test with these options yet, as that part of the code is not implemented.
with tempfile.NamedTemporaryFile(suffix=".png") as temp:
red = saturate(self.image_r, satValue)
green = saturate(self.image_g, satValue)
blue = saturate(self.image_b, satValue)
lupton_rgb.make_lupton_rgb(
red,
green,
blue,
self.min_,
self.stretch_,
self.Q,
saturated_border_width=1,
saturated_pixel_value=2000,
filename=temp,
)
def test_linear(self):
"""Test using a specified linear stretch"""
map = lupton_rgb.LinearMapping(-8.45, 13.44)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_linear_min_max(self):
"""Test using a min/max linear stretch determined from one image"""
map = lupton_rgb.LinearMapping(image=self.image_b)
rgbImage = map.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_saturated(self):
"""Test interpolationolating saturated pixels"""
pytest.skip("replaceSaturatedPixels is not implemented in astropy yet")
satValue = 1000.0
self.image_r = saturate(self.image_r, satValue)
self.image_g = saturate(self.image_g, satValue)
self.image_b = saturate(self.image_b, satValue)
lupton_rgb.replaceSaturatedPixels(
self.image_r, self.image_g, self.image_b, 1, 2000
)
# Check that we replaced those NaNs with some reasonable value
assert np.isfinite(self.image_r.getImage().getArray()).all()
assert np.isfinite(self.image_g.getImage().getArray()).all()
assert np.isfinite(self.image_b.getImage().getArray()).all()
# Prepare for generating an output file
self.imagesR = self.imagesR.getImage()
self.imagesR = self.imagesG.getImage()
self.imagesR = self.imagesB.getImage()
asinhMap = lupton_rgb.AsinhMapping(self.min_, self.stretch_, self.Q)
rgbImage = asinhMap.make_rgb_image(self.image_r, self.image_g, self.image_b)
if display:
display_rgb(rgbImage, title=sys._getframe().f_code.co_name)
def test_different_shapes_asserts(self):
with pytest.raises(ValueError, match=r"shapes must match"):
# just swap the dimensions to get a differently-shaped 'r'
image_r = self.image_r.reshape(self.height, self.width)
lupton_rgb.make_lupton_rgb(image_r, self.image_g, self.image_b)
|
b6ab9ebf95a3b613acd0879ada5e8b42a876b2be05f68a2ee7619b5d52cf1395 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from packaging.version import Version
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB, HAS_PLT
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.mpl_normalize import ImageNormalize, imshow_norm, simple_norm
from astropy.visualization.stretch import LogStretch, PowerStretch, SqrtStretch
if HAS_MATPLOTLIB:
import matplotlib
MATPLOTLIB_LT_32 = Version(matplotlib.__version__) < Version("3.2")
DATA = np.linspace(0.0, 15.0, 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
DATA3 = np.linspace(-3.0, 3.0, 7)
STRETCHES = (SqrtStretch(), PowerStretch(0.5), LogStretch())
INVALID = (None, -np.inf, -1)
@pytest.mark.skipif(HAS_MATPLOTLIB, reason="matplotlib is installed")
def test_normalize_error_message():
with pytest.raises(
ImportError, match=r"matplotlib is required in order to use this class."
):
ImageNormalize()
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2.0, vmax=10.0, interval=ManualInterval, clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch, clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2.0, vmax=10.0, stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
data=6, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
output = norm(DATA)
expected = [0.0, 0.35355339, 0.70710678, 0.93541435, 1.0, 1.0]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(
vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=False, invalid=None
)
norm2 = ImageNormalize(
DATA,
interval=ManualInterval(2, 10),
stretch=SqrtStretch(),
clip=False,
invalid=None,
)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399, 1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10.0, stretch=SqrtStretch(), clip=False)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(None, 10), stretch=SqrtStretch(), clip=False
)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.0
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2.0, vmax=None, stretch=SqrtStretch(), clip=False)
norm2 = ImageNormalize(
DATA, interval=ManualInterval(2, None), stretch=SqrtStretch(), clip=False
)
output = norm(DATA)
assert norm.vmin == 2.0
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_call_clip(self):
"""Test that the clip keyword is used when calling the object."""
data = np.arange(5)
norm = ImageNormalize(vmin=1.0, vmax=3.0, clip=False)
output = norm(data, clip=True)
assert_equal(output.data, [0, 0, 0.5, 1.0, 1.0])
assert np.all(~output.mask)
output = norm(data, clip=False)
assert_equal(output.data, [-0.5, 0, 0.5, 1.0, 1.5])
assert np.all(~output.mask)
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=True)
norm2 = ImageNormalize(
mdata, interval=ManualInterval(2, 10), stretch=SqrtStretch(), clip=True
)
output = norm(mdata)
expected = [0.0, 0.35355339, 1.0, 0.93541435, 1.0, 1.0]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(
vmin=2.0, vmax=10.0, stretch=SqrtStretch(), clip=False, invalid=None
)
norm2 = ImageNormalize(
mdata,
interval=ManualInterval(2, 10),
stretch=SqrtStretch(),
clip=False,
invalid=None,
)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399, 1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.0).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, "linear", percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.parametrize("stretch", STRETCHES)
def test_invalid_keyword(self, stretch):
norm1 = ImageNormalize(
stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=None
)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False)
norm3 = ImageNormalize(
DATA3, stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=-1.0
)
result1 = norm1(DATA3)
result2 = norm2(DATA3)
result3 = norm3(DATA3)
assert_equal(result1[0:2], (np.nan, np.nan))
assert_equal(result2[0:2], (-1.0, -1.0))
assert_equal(result1[2:], result2[2:])
assert_equal(result2, result3)
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch="linear")
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.0e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm1 = simple_norm(DATA2, stretch="sqrt")
assert_allclose(norm1(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.0e-5)
@pytest.mark.parametrize("invalid", INVALID)
def test_sqrt_invalid_kw(self, invalid):
stretch = SqrtStretch()
norm1 = simple_norm(
DATA3, stretch="sqrt", min_cut=-1, max_cut=1, clip=False, invalid=invalid
)
norm2 = ImageNormalize(
stretch=stretch, vmin=-1, vmax=1, clip=False, invalid=invalid
)
assert_equal(norm1(DATA3), norm2(DATA3))
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch="power", power=power)
assert_allclose(norm(DATA2), DATA2SCL**power, atol=0, rtol=1.0e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch="log")
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch="log", log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch="asinh")
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch="asinh", asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1.0 / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_sinh(self):
"""Test sinh scaling."""
sinh_a = 0.5
norm = simple_norm(DATA2, stretch="sinh", sinh_a=sinh_a)
ref = np.sinh(DATA2SCL / sinh_a) / np.sinh(1 / sinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.0e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch="linear", min_cut=1.0, clip=True)
assert_allclose(norm(DATA2), [0.0, 0.0, 1.0], atol=0, rtol=1.0e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch="linear", percent=99.0, clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.0e-5)
norm2 = simple_norm(
DATA2, stretch="linear", min_percent=0.5, max_percent=99.5, clip=True
)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.0e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch="invalid")
@pytest.mark.skipif(not HAS_PLT, reason="requires matplotlib.pyplot")
def test_imshow_norm():
import matplotlib.pyplot as plt
image = np.random.randn(10, 10)
plt.clf()
ax = plt.subplot(label="test_imshow_norm")
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
plt.clf()
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# make sure the pyplot version works
plt.clf()
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close("all")
|
ea737c446dfab7356a06146219061473d8d039e991661081efac95e3f677b19c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
pytest.importorskip("matplotlib")
from contextlib import nullcontext
import matplotlib.dates
import matplotlib.pyplot as plt
from erfa import ErfaWarning
from astropy.time import Time
from astropy.visualization.time import time_support
# Matplotlib 3.3 added a settable epoch for plot dates and changed the default
# from 0000-12-31 to 1970-01-01. This can be checked by the existence of
# get_epoch() in matplotlib.dates.
MPL_EPOCH_1970 = hasattr(matplotlib.dates, "get_epoch")
# Since some of the examples below use times/dates in the future, we use the
# TAI time scale to avoid ERFA warnings about dubious years.
DEFAULT_SCALE = "tai"
def get_ticklabels(axis):
axis.figure.canvas.draw()
return [x.get_text() for x in axis.get_ticklabels()]
def teardown_function(function):
plt.close("all")
# We first check that we get the expected labels for different time intervals
# for standard ISO formatting. This is a way to check both the locator and
# formatter code.
RANGE_CASES = [
# Interval of many years
(
("2014-03-22T12:30:30.9", "2077-03-22T12:30:32.1"),
["2020-01-01", "2040-01-01", "2060-01-01"],
),
# Interval of a few years
(
("2014-03-22T12:30:30.9", "2017-03-22T12:30:32.1"),
["2015-01-01", "2016-01-01", "2017-01-01"],
),
# Interval of just under a year
(("2014-03-22T12:30:30.9", "2015-01-22T12:30:32.1"), ["2014-05-01", "2014-10-01"]),
# Interval of a few months
(
("2014-11-22T12:30:30.9", "2015-02-22T12:30:32.1"),
["2014-12-01", "2015-01-01", "2015-02-01"],
),
# Interval of just over a month
(("2014-03-22T12:30:30.9", "2014-04-23T12:30:32.1"), ["2014-04-01"]),
# Interval of just under a month
(
("2014-03-22T12:30:30.9", "2014-04-21T12:30:32.1"),
["2014-03-24", "2014-04-03", "2014-04-13"],
),
# Interval of just over an hour
(
("2014-03-22T12:30:30.9", "2014-03-22T13:31:30.9"),
[
"2014-03-22T12:40:00.000",
"2014-03-22T13:00:00.000",
"2014-03-22T13:20:00.000",
],
),
# Interval of just under an hour
(
("2014-03-22T12:30:30.9", "2014-03-22T13:28:30.9"),
[
"2014-03-22T12:40:00.000",
"2014-03-22T13:00:00.000",
"2014-03-22T13:20:00.000",
],
),
# Interval of a few minutes
(
("2014-03-22T12:30:30.9", "2014-03-22T12:38:30.9"),
["2014-03-22T12:33:00.000", "2014-03-22T12:36:00.000"],
),
# Interval of a few seconds
(
("2014-03-22T12:30:30.9", "2014-03-22T12:30:40.9"),
[
"2014-03-22T12:30:33.000",
"2014-03-22T12:30:36.000",
"2014-03-22T12:30:39.000",
],
),
# Interval of a couple of seconds
(
("2014-03-22T12:30:30.9", "2014-03-22T12:30:32.1"),
[
"2014-03-22T12:30:31.000",
"2014-03-22T12:30:31.500",
"2014-03-22T12:30:32.000",
],
),
# Interval of under a second
(
("2014-03-22T12:30:30.89", "2014-03-22T12:30:31.19"),
[
"2014-03-22T12:30:30.900",
"2014-03-22T12:30:31.000",
"2014-03-22T12:30:31.100",
],
),
]
@pytest.mark.parametrize(("interval", "expected"), RANGE_CASES)
def test_formatter_locator(interval, expected):
# Check that the ticks and labels returned for the above cases are correct.
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(
Time(interval[0], scale=DEFAULT_SCALE),
Time(interval[1], scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == expected
FORMAT_CASES = [
("byear", ["2020", "2040", "2060"]),
("byear_str", ["B2020.000", "B2040.000", "B2060.000"]),
("cxcsec", ["1000000000", "1500000000", "2000000000", "2500000000"]),
("decimalyear", ["2020", "2040", "2060"]),
(
"fits",
[
"2020-01-01T00:00:00.000",
"2040-01-01T00:00:00.000",
"2060-01-01T00:00:00.000",
],
),
("gps", ["1500000000", "2000000000", "2500000000", "3000000000"]),
(
"iso",
[
"2020-01-01 00:00:00.000",
"2040-01-01 00:00:00.000",
"2060-01-01 00:00:00.000",
],
),
(
"isot",
[
"2020-01-01T00:00:00.000",
"2040-01-01T00:00:00.000",
"2060-01-01T00:00:00.000",
],
),
("jd", ["2458000", "2464000", "2470000", "2476000"]),
("jyear", ["2020", "2040", "2060"]),
("jyear_str", ["J2020.000", "J2040.000", "J2060.000"]),
("mjd", ["60000", "66000", "72000", "78000"]),
(
"plot_date",
(
["18000", "24000", "30000", "36000"]
if MPL_EPOCH_1970
else ["738000", "744000", "750000", "756000"]
),
),
("unix", ["1500000000", "2000000000", "2500000000", "3000000000"]),
(
"yday",
["2020:001:00:00:00.000", "2040:001:00:00:00.000", "2060:001:00:00:00.000"],
),
]
@pytest.mark.parametrize(("format", "expected"), FORMAT_CASES)
def test_formats(format, expected):
# Check that the locators/formatters work fine for all time formats
with time_support(format=format, simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in [
"unix",
"plot_date",
] else nullcontext():
ax.set_xlim(
Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f"Time ({format})"
@pytest.mark.parametrize(("format", "expected"), FORMAT_CASES)
def test_auto_formats(format, expected):
# Check that the format/scale is taken from the first time used.
with time_support(simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
# Getting unix time and plot_date requires going through a scale for
# which ERFA emits a warning about the date being dubious
with pytest.warns(ErfaWarning) if format in [
"unix",
"plot_date",
] else nullcontext():
ax.set_xlim(
Time(Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE), format=format),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == f"Time ({format})"
FORMAT_CASES_SIMPLIFY = [
("fits", ["2020-01-01", "2040-01-01", "2060-01-01"]),
("iso", ["2020-01-01", "2040-01-01", "2060-01-01"]),
("isot", ["2020-01-01", "2040-01-01", "2060-01-01"]),
("yday", ["2020", "2040", "2060"]),
]
@pytest.mark.parametrize(("format", "expected"), FORMAT_CASES_SIMPLIFY)
def test_formats_simplify(format, expected):
# Check the use of the simplify= option
with time_support(format=format, simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(
Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == expected
def test_plot():
# Make sure that plot() works properly
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(
Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
ax.plot(
Time(
[
"2015-03-22T12:30:30.9",
"2018-03-22T12:30:30.9",
"2021-03-22T12:30:30.9",
],
scale=DEFAULT_SCALE,
)
)
def test_nested():
with time_support(format="iso", simplify=False):
with time_support(format="yday", simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(
Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == ["2020", "2040", "2060"]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(
Time("2014-03-22T12:30:30.9", scale=DEFAULT_SCALE),
Time("2077-03-22T12:30:32.1", scale=DEFAULT_SCALE),
)
assert get_ticklabels(ax.xaxis) == [
"2020-01-01 00:00:00.000",
"2040-01-01 00:00:00.000",
"2060-01-01 00:00:00.000",
]
|
e5dff3111595192eb15a7e444ca02eda1e6571d6eda1fc23700f85ade58fc0f9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.io import fits
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB
if HAS_MATPLOTLIB:
import matplotlib.image as mpimg
from astropy.visualization.scripts.fits2bitmap import fits2bitmap, main
@pytest.mark.skipif(not HAS_MATPLOTLIB, reason="requires matplotlib")
class TestFits2Bitmap:
def setup_class(self):
self.filename = "test.fits"
self.array = np.arange(16384).reshape((128, 128))
@pytest.mark.openfiles_ignore
def test_function(self, tmp_path):
filename = tmp_path / self.filename
fits.writeto(filename, self.array)
fits2bitmap(filename)
@pytest.mark.openfiles_ignore
def test_script(self, tmp_path):
filename = str(tmp_path / self.filename)
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
@pytest.mark.openfiles_ignore
def test_exten_num(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(self.array)
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", "1"])
@pytest.mark.openfiles_ignore
def test_exten_name(self, tmp_path):
filename = str(tmp_path / self.filename)
hdu1 = fits.PrimaryHDU()
extname = "SCI"
hdu2 = fits.ImageHDU(self.array)
hdu2.header["EXTNAME"] = extname
hdulist = fits.HDUList([hdu1, hdu2])
hdulist.writeto(filename)
main([filename, "-e", extname])
@pytest.mark.parametrize("file_exten", [".gz", ".bz2"])
def test_compressed_fits(self, tmp_path, file_exten):
filename = str(tmp_path / f"test.fits{file_exten}")
fits.writeto(filename, self.array)
main([filename, "-e", "0"])
@pytest.mark.openfiles_ignore
def test_orientation(self, tmp_path):
"""
Regression test to check the image vertical orientation/origin.
"""
filename = str(tmp_path / self.filename)
out_filename = "fits2bitmap_test.png"
out_filename = str(tmp_path / out_filename)
data = np.zeros((32, 32))
data[0:16, :] = 1.0
fits.writeto(filename, data)
main([filename, "-e", "0", "-o", out_filename])
img = mpimg.imread(out_filename)
assert img[0, 0, 0] == 0
assert img[31, 31, 0] == 1
|
8e0bbf5ab82acba814436d4112636886fb224480a363089335516679c082c399 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.contour import QuadContourSet
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame,
RectangularFrame,
RectangularFrame1D,
)
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.wcs import WCS
from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
# We cannot use matplotlib.checkdep_usetex() anymore, see
# https://github.com/matplotlib/matplotlib/issues/23244
TEX_UNAVAILABLE = True
MATPLOTLIB_DEV = Version(matplotlib.__version__).is_devrelease
def teardown_function(function):
plt.close("all")
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc("axes", grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmp_path):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring(
"""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""",
sep="\n",
)
@pytest.mark.parametrize("grid_type", ["lines", "contours"])
def test_no_numpy_warnings(ignore_matplotlibrc, tmp_path, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color="white", grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=r".*converting a masked element to nan.*"
)
warnings.filterwarnings(
"ignore", message=r".*No contour levels were found within the data range.*"
)
warnings.filterwarnings(
"ignore", message=r".*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*"
)
warnings.filterwarnings(
"ignore", message=r".*PY_SSIZE_T_CLEAN will be required.*"
)
fig.savefig(tmp_path / "test.png")
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError, match=r"Frame banana not found"):
ax.get_coords_overlay("banana")
with pytest.raises(ValueError, match=r"Unknown frame: banana"):
get_coord_meta("banana")
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, "o", transform=ax.get_transform("galactic"))
def test_scatter_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8], projection=WCS(twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223 * u.deg, 0.26876217 * u.deg)
with pytest.raises(TypeError):
ax.scatter_coord(c, marker="o", transform=ax.get_transform("galactic"))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel("Test x label", labelpad=2, color="red")
ax.set_ylabel("Test y label", labelpad=3, color="green")
assert ax.coords[0].axislabels.get_text() == "Test x label"
assert ax.coords[0].axislabels.get_minpad("b") == 2
assert ax.coords[0].axislabels.get_color() == "red"
assert ax.coords[1].axislabels.get_text() == "Test y label"
assert ax.coords[1].axislabels.get_minpad("l") == 3
assert ax.coords[1].axislabels.get_color() == "green"
assert ax.get_xlabel() == "Test x label"
assert ax.get_ylabel() == "Test y label"
GAL_HEADER = fits.Header.fromstring(
"""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""",
sep="\n",
)
def test_slicing_warnings(ignore_matplotlibrc, tmp_path):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
plt.savefig(tmp_path / "test.png")
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings("ignore", message=r".*PY_SSIZE_T_CLEAN.*")
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 2))
plt.savefig(tmp_path / "test.png")
def test_plt_xlabel_ylabel(tmp_path):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel("Galactic Longitude")
plt.ylabel("Galactic Latitude")
plt.savefig(tmp_path / "test.png")
def test_grid_type_contours_transform(tmp_path):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {
"type": ("scalar", "scalar"),
"unit": (u.m, u.s),
"wrap": (None, None),
"name": ("x", "y"),
}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type="contours")
fig.savefig(tmp_path / "test.png")
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmp_path):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmp_path / "test.png"
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
ax.coords[0].grid(grid_type="contours")
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform("world"))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
with pytest.warns(
UserWarning, match="No contour levels were found within the data range"
):
ax.contour(np.zeros((4, 4)), transform=ax.get_transform("world"))
def test_iterate_coords(ignore_matplotlibrc):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
wcs3d.wcs.cunit = ["deg", "deg", "km/s"]
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0.0, 0.0, 1.0]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ["x", "y", "z"]
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
with pytest.raises(
ValueError,
match=r"WCS has more than 2 pixel dimensions, so 'slices' should be set",
):
plt.subplot(1, 1, 1, projection=wcs3d)
with pytest.raises(
ValueError,
match=(
r"'slices' should have as many elements as WCS has pixel dimensions .should"
r" be 3."
),
):
plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1, 2))
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ["x", "y"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("x", "y"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=("y", "x"))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=["x", "y"])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "x"))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ["x"]
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, "y"))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None no
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None yes
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, "x", "y"))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=("x", "y", 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["FREQ", "TIME"]
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif(TEX_UNAVAILABLE, reason="TeX is unavailable")
def test_simplify_labels_usetex(ignore_matplotlibrc, tmp_path):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc("text", usetex=True)
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---MOL",
"CTYPE2": "DEC--MOL",
"RADESYS": "ICRS",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header["NAXIS1"] - 0.5)
ax.set_ylim(-0.5, header["NAXIS2"] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmp_path / "plot.png")
@pytest.mark.parametrize("frame_class", [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ["RA", "Declination"]
header = {
"NAXIS": 2,
"NAXIS1": 360,
"NAXIS2": 180,
"CRPIX1": 180.5,
"CRPIX2": 90.5,
"CRVAL1": 180.0,
"CRVAL2": 0.0,
"CDELT1": -2 * np.sqrt(2) / np.pi,
"CDELT2": 2 * np.sqrt(2) / np.pi,
"CTYPE1": "RA---AIT",
"CTYPE2": "DEC--AIT",
}
wcs = WCS(header)
fig, ax = plt.subplots(subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize("atol", [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail(
"Exact BoundingBox dimensions are only ensured with FreeType 2.6.1"
)
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmp_path):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color="black", grid_type="contours")
fig.savefig(tmp_path / "plot.png")
fig.savefig(tmp_path / "plot.png")
|
c2a0cc762362d7f3897b8d4855299df99bb75517ca96e74211c9d68d3eaca7f6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.tests.helper import (
assert_quantity_allclose as assert_almost_equal_quantity,
)
from astropy.visualization.wcsaxes.utils import (
select_step_degree,
select_step_hour,
select_step_scalar,
)
def test_select_step_degree():
assert_almost_equal_quantity(select_step_degree(127 * u.deg), 180.0 * u.deg)
assert_almost_equal_quantity(select_step_degree(44 * u.deg), 45.0 * u.deg)
assert_almost_equal_quantity(select_step_degree(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(2 * u.arcmin), 2 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_degree(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(2.2 * u.arcsec), 2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.8 * u.arcsec), 1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.2 * u.arcsec), 0.2 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.11 * u.arcsec), 0.1 * u.arcsec)
assert_almost_equal_quantity(select_step_degree(0.022 * u.arcsec), 0.02 * u.arcsec)
assert_almost_equal_quantity(
select_step_degree(0.0043 * u.arcsec), 0.005 * u.arcsec
)
assert_almost_equal_quantity(
select_step_degree(0.00083 * u.arcsec), 0.001 * u.arcsec
)
assert_almost_equal_quantity(
select_step_degree(0.000027 * u.arcsec), 0.00002 * u.arcsec
)
def test_select_step_hour():
assert_almost_equal_quantity(select_step_hour(127 * u.deg), 8.0 * u.hourangle)
assert_almost_equal_quantity(select_step_hour(44 * u.deg), 3.0 * u.hourangle)
assert_almost_equal_quantity(select_step_hour(18 * u.arcmin), 15 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(3.4 * u.arcmin), 3 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(2 * u.arcmin), 1.5 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(59 * u.arcsec), 1 * u.arcmin)
assert_almost_equal_quantity(select_step_hour(33 * u.arcsec), 30 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(2.2 * u.arcsec), 3.0 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.8 * u.arcsec), 0.75 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.2 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.11 * u.arcsec), 0.15 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.022 * u.arcsec), 0.03 * u.arcsec)
assert_almost_equal_quantity(select_step_hour(0.0043 * u.arcsec), 0.003 * u.arcsec)
assert_almost_equal_quantity(
select_step_hour(0.00083 * u.arcsec), 0.00075 * u.arcsec
)
assert_almost_equal_quantity(
select_step_hour(0.000027 * u.arcsec), 0.00003 * u.arcsec
)
def test_select_step_scalar():
assert_almost_equal(select_step_scalar(33122.0), 50000.0)
assert_almost_equal(select_step_scalar(433.0), 500.0)
assert_almost_equal(select_step_scalar(12.3), 10)
assert_almost_equal(select_step_scalar(3.3), 5.0)
assert_almost_equal(select_step_scalar(0.66), 0.5)
assert_almost_equal(select_step_scalar(0.0877), 0.1)
assert_almost_equal(select_step_scalar(0.00577), 0.005)
assert_almost_equal(select_step_scalar(0.00022), 0.0002)
assert_almost_equal(select_step_scalar(0.000012), 0.00001)
assert_almost_equal(select_step_scalar(0.000000443), 0.0000005)
|
00afe13a94db72822b57e6e2583f4e99eb6c4da656fd40626b185a8c008632dc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
import numpy as np
from astropy.tests.figures import figure_test
from astropy.visualization.wcsaxes import WCSAxes
from astropy.visualization.wcsaxes.frame import BaseFrame
from astropy.wcs import WCS
from .test_images import BaseImageTests
class HexagonalFrame(BaseFrame):
spine_names = "abcdef"
def update_spines(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
ymid = 0.5 * (ymin + ymax)
xmid1 = (xmin + xmax) / 4.0
xmid2 = (xmin + xmax) * 3.0 / 4.0
self["a"].data = np.array(([xmid1, ymin], [xmid2, ymin]))
self["b"].data = np.array(([xmid2, ymin], [xmax, ymid]))
self["c"].data = np.array(([xmax, ymid], [xmid2, ymax]))
self["d"].data = np.array(([xmid2, ymax], [xmid1, ymax]))
self["e"].data = np.array(([xmid1, ymax], [xmin, ymid]))
self["f"].data = np.array(([xmin, ymid], [xmid1, ymin]))
class TestFrame(BaseImageTests):
@figure_test
def test_custom_frame(self):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs, frame_class=HexagonalFrame)
fig.add_axes(ax)
ax.coords.grid(color="white")
im = ax.imshow(
np.ones((149, 149)),
vmin=0.0,
vmax=2.0,
origin="lower",
cmap=plt.cm.gist_heat,
)
minpad = {}
minpad["a"] = minpad["d"] = 1
minpad["b"] = minpad["c"] = minpad["e"] = minpad["f"] = 2.75
ax.coords["glon"].set_axislabel("Longitude", minpad=minpad)
ax.coords["glon"].set_axislabel_position("ad")
ax.coords["glat"].set_axislabel("Latitude", minpad=minpad)
ax.coords["glat"].set_axislabel_position("bcef")
ax.coords["glon"].set_ticklabel_position("ad")
ax.coords["glat"].set_ticklabel_position("bcef")
# Set limits so that no labels overlap
ax.set_xlim(5.5, 100.5)
ax.set_ylim(5.5, 110.5)
# Clip the image to the frame
im.set_clip_path(ax.coords.frame.patch)
return fig
@figure_test
def test_update_clip_path_rectangular(self, tmp_path):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@figure_test
def test_update_clip_path_nonrectangular(self, tmp_path):
fig = plt.figure()
ax = WCSAxes(
fig, [0.1, 0.1, 0.8, 0.8], aspect="equal", frame_class=HexagonalFrame
)
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
return fig
@figure_test
def test_update_clip_path_change_wcs(self, tmp_path):
# When WCS is changed, a new frame is created, so we need to make sure
# that the path is carried over to the new frame.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
ax.set_xlim(0.0, 2.0)
ax.set_ylim(0.0, 2.0)
# Force drawing, which freezes the clip path returned by WCSAxes
fig.savefig(tmp_path / "nothing")
ax.reset_wcs()
ax.imshow(np.zeros((12, 4)))
ax.set_xlim(-0.5, 3.5)
ax.set_ylim(-0.5, 11.5)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
def test_copy_frame_properties_change_wcs(self):
# When WCS is changed, a new frame is created, so we need to make sure
# that the color and linewidth are transferred over
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
ax.coords.frame.set_linewidth(5)
ax.coords.frame.set_color("purple")
ax.reset_wcs()
assert ax.coords.frame.get_linewidth() == 5
assert ax.coords.frame.get_color() == "purple"
|
5a15f1b4c9bc87f122297579b34a7c357b878c1d517467d9a2bbf7103dcf9daf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.tests.figures import figure_test
from astropy.visualization.wcsaxes import WCSAxes
from astropy.visualization.wcsaxes.transforms import CurvedTransform
from astropy.wcs import WCS
from .test_images import BaseImageTests
# Create fake transforms that roughly mimic a polar projection
class DistanceToLonLat(CurvedTransform):
has_inverse = True
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, xy):
x, y = xy[:, 0], xy[:, 1]
lam = np.degrees(np.arctan2(y, x))
phi = 90.0 - np.degrees(np.hypot(x, y) / self.R)
return np.array((lam, phi)).transpose()
transform_non_affine = transform
def inverted(self):
return LonLatToDistance(R=self.R)
class LonLatToDistance(CurvedTransform):
def __init__(self, R=6e3):
super().__init__()
self.R = R
def transform(self, lamphi):
lam, phi = lamphi[:, 0], lamphi[:, 1]
r = np.radians(90 - phi) * self.R
x = r * np.cos(np.radians(lam))
y = r * np.sin(np.radians(lam))
return np.array((x, y)).transpose()
transform_non_affine = transform
def inverted(self):
return DistanceToLonLat(R=self.R)
class TestTransformCoordMeta(BaseImageTests):
@figure_test
def test_coords_overlay(self):
# Set up a simple WCS that maps pixels to non-projected distances
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["x", "y"]
wcs.wcs.cunit = ["km", "km"]
wcs.wcs.crpix = [614.5, 856.5]
wcs.wcs.cdelt = [6.25, 6.25]
wcs.wcs.crval = [0.0, 0.0]
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=wcs)
fig.add_axes(ax)
s = DistanceToLonLat(R=6378.273)
ax.coords["x"].set_ticklabel_position("")
ax.coords["y"].set_ticklabel_position("")
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (360.0, None)
coord_meta["unit"] = (u.deg, u.deg)
coord_meta["name"] = "lon", "lat"
overlay = ax.get_coords_overlay(s, coord_meta=coord_meta)
overlay.grid(color="red")
overlay["lon"].grid(color="red", linestyle="solid", alpha=0.3)
overlay["lat"].grid(color="blue", linestyle="solid", alpha=0.3)
overlay["lon"].set_ticklabel(size=7, exclude_overlapping=True)
overlay["lat"].set_ticklabel(size=7, exclude_overlapping=True)
overlay["lon"].set_ticklabel_position("brtl")
overlay["lat"].set_ticklabel_position("brtl")
overlay["lon"].set_ticks(spacing=10.0 * u.deg)
overlay["lat"].set_ticks(spacing=10.0 * u.deg)
ax.set_xlim(-0.5, 1215.5)
ax.set_ylim(-0.5, 1791.5)
return fig
@figure_test
def test_coords_overlay_auto_coord_meta(self):
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], wcs=WCS(self.msx_header))
fig.add_axes(ax)
ax.grid(color="red", alpha=0.5, linestyle="solid")
overlay = ax.get_coords_overlay("fk5") # automatically sets coord_meta
overlay.grid(color="black", alpha=0.5, linestyle="solid")
overlay["ra"].set_ticks(color="black")
overlay["dec"].set_ticks(color="black")
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
@figure_test
def test_direct_init(self):
s = DistanceToLonLat(R=6378.273)
coord_meta = {}
coord_meta["type"] = ("longitude", "latitude")
coord_meta["wrap"] = (360.0, None)
coord_meta["unit"] = (u.deg, u.deg)
coord_meta["name"] = "lon", "lat"
fig = plt.figure(figsize=(4, 4))
ax = WCSAxes(fig, [0.15, 0.15, 0.7, 0.7], transform=s, coord_meta=coord_meta)
fig.add_axes(ax)
ax.coords["lon"].grid(color="red", linestyle="solid", alpha=0.3)
ax.coords["lat"].grid(color="blue", linestyle="solid", alpha=0.3)
ax.coords["lon"].set_auto_axislabel(False)
ax.coords["lat"].set_auto_axislabel(False)
ax.coords["lon"].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords["lat"].set_ticklabel(size=7, exclude_overlapping=True)
ax.coords["lon"].set_ticklabel_position("brtl")
ax.coords["lat"].set_ticklabel_position("brtl")
ax.coords["lon"].set_ticks(spacing=10.0 * u.deg)
ax.coords["lat"].set_ticks(spacing=10.0 * u.deg)
ax.set_xlim(-400.0, 500.0)
ax.set_ylim(-300.0, 400.0)
return fig
|
17b4305211d96e7b2becee265fd3c34d7d5e77c888fba8490c336ea9d3c7d3a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from matplotlib import rc_context
from numpy.testing import assert_almost_equal
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.visualization.wcsaxes.formatter_locator import (
AngleFormatterLocator,
ScalarFormatterLocator,
)
class TestAngleFormatterLocator:
def test_no_options(self):
fl = AngleFormatterLocator()
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
MESSAGE = r"At most one of values/number/spacing can be specified"
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], number=5)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], spacing=5.0 * u.deg)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(number=5, spacing=5.0 * u.deg)
with pytest.raises(ValueError, match=MESSAGE):
AngleFormatterLocator(values=[1.0, 2.0], number=5, spacing=5.0 * u.deg)
def test_values(self):
fl = AngleFormatterLocator(values=[0.1, 1.0, 14.0] * u.degree)
assert fl.values.to_value(u.degree).tolist() == [0.1, 1.0, 14.0]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [0.1, 1.0, 14.0])
def test_number(self):
fl = AngleFormatterLocator(number=7)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.to_value(u.degree), [35.0, 40.0, 45.0, 50.0, 55.0])
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(
values.to_value(u.degree), [34.5, 34.75, 35.0, 35.25, 35.5, 35.75, 36.0]
)
fl.format = "dd"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35.0, 36.0])
def test_spacing(self):
with pytest.raises(
TypeError,
match=(
r"spacing should be an astropy.units.Quantity instance with units of"
r" angle"
),
):
AngleFormatterLocator(spacing=3.0)
fl = AngleFormatterLocator(spacing=3.0 * u.degree)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3.0 * u.degree
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(
values.to_value(u.degree), [36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0]
)
fl.spacing = 30.0 * u.arcmin
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [34.5, 35.0, 35.5, 36.0])
with pytest.warns(UserWarning, match=r"Spacing is too small"):
fl.format = "dd"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.to_value(u.degree), [35.0, 36.0])
def test_minor_locator(self):
fl = AngleFormatterLocator()
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(
minor_values.to_value(u.degree),
[
36.0,
37.0,
38.0,
39.0,
41.0,
42.0,
43.0,
44.0,
46.0,
47.0,
48.0,
49.0,
51.0,
52.0,
53.0,
54.0,
],
)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1.0, 14.0] * u.degree
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.to_value(u.degree), [])
@pytest.mark.parametrize(
("format", "string"),
[
("dd", "15\xb0"),
("dd:mm", "15\xb024'"),
("dd:mm:ss", "15\xb023'32\""),
("dd:mm:ss.s", "15\xb023'32.0\""),
("dd:mm:ss.ssss", "15\xb023'32.0316\""),
("hh", "1h"),
("hh:mm", "1h02m"),
("hh:mm:ss", "1h01m34s"),
("hh:mm:ss.s", "1h01m34.1s"),
("hh:mm:ss.ssss", "1h01m34.1354s"),
("d", "15\xb0"),
("d.d", "15.4\xb0"),
("d.dd", "15.39\xb0"),
("d.ddd", "15.392\xb0"),
("m", "924'"),
("m.m", "923.5'"),
("m.mm", "923.53'"),
("s", '55412"'),
("s.s", '55412.0"'),
("s.ss", '55412.03"'),
],
)
def test_format(self, format, string):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.formatter([15.392231] * u.degree, None, format="ascii")[0] == string
@pytest.mark.parametrize(
("separator", "format", "string"),
[
(("deg", "'", '"'), "dd", "15deg"),
(("deg", "'", '"'), "dd:mm", "15deg24'"),
(("deg", "'", '"'), "dd:mm:ss", "15deg23'32\""),
((":", "-", "s"), "dd:mm:ss.s", "15:23-32.0s"),
(":", "dd:mm:ss.s", "15:23:32.0"),
((":", ":", "s"), "hh", "1:"),
(("-", "-", "s"), "hh:mm:ss.ssss", "1-01-34.1354s"),
(("d", ":", '"'), "d", "15\xb0"),
(("d", ":", '"'), "d.d", "15.4\xb0"),
],
)
def test_separator(self, separator, format, string):
fl = AngleFormatterLocator(number=5, format=format)
fl.sep = separator
assert fl.formatter([15.392231] * u.degree, None)[0] == string
def test_latex_format(self):
fl = AngleFormatterLocator(number=5, format="dd:mm:ss")
assert fl.formatter([15.392231] * u.degree, None)[0] == "15\xb023'32\""
with rc_context(rc={"text.usetex": True}):
assert (
fl.formatter([15.392231] * u.degree, None)[0]
== "$15^\\circ23{}^\\prime32{}^{\\prime\\prime}$"
)
@pytest.mark.parametrize("format", ["x.xxx", "dd.ss", "dd:ss", "mdd:mm:ss"])
def test_invalid_formats(self, format):
fl = AngleFormatterLocator(number=5)
with pytest.raises(ValueError, match=f"Invalid format: {format}"):
fl.format = format
@pytest.mark.parametrize(
("format", "base_spacing"),
[
("dd", 1.0 * u.deg),
("dd:mm", 1.0 * u.arcmin),
("dd:mm:ss", 1.0 * u.arcsec),
("dd:mm:ss.ss", 0.01 * u.arcsec),
("hh", 15.0 * u.deg),
("hh:mm", 15.0 * u.arcmin),
("hh:mm:ss", 15.0 * u.arcsec),
("hh:mm:ss.ss", 0.15 * u.arcsec),
("d", 1.0 * u.deg),
("d.d", 0.1 * u.deg),
("d.dd", 0.01 * u.deg),
("d.ddd", 0.001 * u.deg),
("m", 1.0 * u.arcmin),
("m.m", 0.1 * u.arcmin),
("m.mm", 0.01 * u.arcmin),
("s", 1.0 * u.arcsec),
("s.s", 0.1 * u.arcsec),
("s.ss", 0.01 * u.arcsec),
],
)
def test_base_spacing(self, format, base_spacing):
fl = AngleFormatterLocator(number=5, format=format)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = AngleFormatterLocator()
fl.spacing = 0.032 * u.deg
with pytest.warns(
UserWarning, match=r"Spacing is not a multiple of base spacing"
):
fl.format = "dd:mm:ss"
assert_almost_equal(fl.spacing.to_value(u.arcsec), 115.0)
def test_decimal_values(self):
# Regression test for a bug that meant that the spacing was not
# determined correctly for decimal coordinates
fl = AngleFormatterLocator()
fl.format = "d.dddd"
assert_quantity_allclose(
fl.locator(266.9730, 266.9750)[0],
[266.9735, 266.9740, 266.9745, 266.9750] * u.deg,
)
fl = AngleFormatterLocator(decimal=True, format_unit=u.hourangle, number=4)
assert_quantity_allclose(
fl.locator(266.9730, 266.9750)[0], [17.79825, 17.79830] * u.hourangle
)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.arcsec, decimal=True)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[1000.0, 1200.0, 1400.0, 1600.0, 1800.0, 2000.0] * u.arcsec,
)
fl = AngleFormatterLocator(unit=u.arcsec, format_unit=u.degree, decimal=False)
assert_quantity_allclose(
fl.locator(850, 2150)[0], [15.0, 20.0, 25.0, 30.0, 35.0] * u.arcmin
)
fl = AngleFormatterLocator(
unit=u.arcsec, format_unit=u.hourangle, decimal=False
)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[60.0, 75.0, 90.0, 105.0, 120.0, 135.0] * (15 * u.arcsec),
)
fl = AngleFormatterLocator(unit=u.arcsec)
fl.format = "dd:mm:ss"
assert_quantity_allclose(fl.locator(0.9, 1.1)[0], [1] * u.arcsec)
fl = AngleFormatterLocator(unit=u.arcsec, spacing=0.2 * u.arcsec)
assert_quantity_allclose(fl.locator(0.3, 0.9)[0], [0.4, 0.6, 0.8] * u.arcsec)
@pytest.mark.parametrize(
("spacing", "string"),
[
(2 * u.deg, "15\xb0"),
(2 * u.arcmin, "15\xb024'"),
(2 * u.arcsec, "15\xb023'32\""),
(0.1 * u.arcsec, "15\xb023'32.0\""),
],
)
def test_formatter_no_format(self, spacing, string):
fl = AngleFormatterLocator()
assert fl.formatter([15.392231] * u.degree, spacing)[0] == string
@pytest.mark.parametrize(
("format_unit", "decimal", "show_decimal_unit", "spacing", "ascii", "latex"),
[
(u.degree, False, True, 2 * u.degree, "15\xb0", r"$15^\circ$"),
(
u.degree,
False,
True,
2 * u.arcmin,
"15\xb024'",
r"$15^\circ24{}^\prime$",
),
(
u.degree,
False,
True,
2 * u.arcsec,
"15\xb023'32\"",
r"$15^\circ23{}^\prime32{}^{\prime\prime}$",
),
(
u.degree,
False,
True,
0.1 * u.arcsec,
"15\xb023'32.0\"",
r"$15^\circ23{}^\prime32.0{}^{\prime\prime}$",
),
(u.hourangle, False, True, 15 * u.degree, "1h", r"$1^{\mathrm{h}}$"),
(
u.hourangle,
False,
True,
15 * u.arcmin,
"1h02m",
r"$1^{\mathrm{h}}02^{\mathrm{m}}$",
),
(
u.hourangle,
False,
True,
15 * u.arcsec,
"1h01m34s",
r"$1^{\mathrm{h}}01^{\mathrm{m}}34^{\mathrm{s}}$",
),
(
u.hourangle,
False,
True,
1.5 * u.arcsec,
"1h01m34.1s",
r"$1^{\mathrm{h}}01^{\mathrm{m}}34.1^{\mathrm{s}}$",
),
(u.degree, True, True, 15 * u.degree, "15\xb0", r"$15\mathrm{^\circ}$"),
(
u.degree,
True,
True,
0.12 * u.degree,
"15.39\xb0",
r"$15.39\mathrm{^\circ}$",
),
(
u.degree,
True,
True,
0.0036 * u.arcsec,
"15.392231\xb0",
r"$15.392231\mathrm{^\circ}$",
),
(u.arcmin, True, True, 15 * u.degree, "924'", r"$924\mathrm{^\prime}$"),
(
u.arcmin,
True,
True,
0.12 * u.degree,
"923.5'",
r"$923.5\mathrm{^\prime}$",
),
(
u.arcmin,
True,
True,
0.1 * u.arcmin,
"923.5'",
r"$923.5\mathrm{^\prime}$",
),
(
u.arcmin,
True,
True,
0.0002 * u.arcmin,
"923.5339'",
r"$923.5339\mathrm{^\prime}$",
),
(
u.arcsec,
True,
True,
0.01 * u.arcsec,
'55412.03"',
r"$55412.03\mathrm{^{\prime\prime}}$",
),
(
u.arcsec,
True,
True,
0.001 * u.arcsec,
'55412.032"',
r"$55412.032\mathrm{^{\prime\prime}}$",
),
(
u.mas,
True,
True,
0.001 * u.arcsec,
"55412032mas",
r"$55412032\mathrm{mas}$",
),
(u.degree, True, False, 15 * u.degree, "15", "15"),
(u.degree, True, False, 0.12 * u.degree, "15.39", "15.39"),
(u.degree, True, False, 0.0036 * u.arcsec, "15.392231", "15.392231"),
(u.arcmin, True, False, 15 * u.degree, "924", "924"),
(u.arcmin, True, False, 0.12 * u.degree, "923.5", "923.5"),
(u.arcmin, True, False, 0.1 * u.arcmin, "923.5", "923.5"),
(u.arcmin, True, False, 0.0002 * u.arcmin, "923.5339", "923.5339"),
(u.arcsec, True, False, 0.01 * u.arcsec, "55412.03", "55412.03"),
(u.arcsec, True, False, 0.001 * u.arcsec, "55412.032", "55412.032"),
(u.mas, True, False, 0.001 * u.arcsec, "55412032", "55412032"),
# Make sure that specifying None defaults to
# decimal for non-degree or non-hour angles
(
u.arcsec,
None,
True,
0.01 * u.arcsec,
'55412.03"',
r"$55412.03\mathrm{^{\prime\prime}}$",
),
],
)
def test_formatter_no_format_with_units(
self, format_unit, decimal, show_decimal_unit, spacing, ascii, latex
):
# Check the formatter works when specifying the default units and
# decimal behavior to use.
fl = AngleFormatterLocator(
unit=u.degree,
format_unit=format_unit,
decimal=decimal,
show_decimal_unit=show_decimal_unit,
)
assert fl.formatter([15.392231] * u.degree, spacing, format="ascii")[0] == ascii
assert fl.formatter([15.392231] * u.degree, spacing, format="latex")[0] == latex
def test_incompatible_unit_decimal(self):
with pytest.raises(
UnitsError,
match=(
r"Units should be degrees or hours when using non-decimal .sexagesimal."
r" mode"
),
):
AngleFormatterLocator(unit=u.arcmin, decimal=False)
class TestScalarFormatterLocator:
def test_no_options(self):
fl = ScalarFormatterLocator(unit=u.m)
assert fl.values is None
assert fl.number == 5
assert fl.spacing is None
def test_too_many_options(self):
MESSAGE = r"At most one of values/number/spacing can be specified"
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, number=5)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, spacing=5.0 * u.m)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(number=5, spacing=5.0 * u.m)
with pytest.raises(ValueError, match=MESSAGE):
ScalarFormatterLocator(values=[1.0, 2.0] * u.m, number=5, spacing=5.0 * u.m)
def test_values(self):
fl = ScalarFormatterLocator(values=[0.1, 1.0, 14.0] * u.m, unit=u.m)
assert fl.values.value.tolist() == [0.1, 1.0, 14.0]
assert fl.number is None
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [0.1, 1.0, 14.0])
def test_number(self):
fl = ScalarFormatterLocator(number=7, unit=u.m)
assert fl.values is None
assert fl.number == 7
assert fl.spacing is None
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, np.linspace(36.0, 54.0, 10))
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, np.linspace(34.4, 36, 9))
fl.format = "x"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35.0, 36.0])
def test_spacing(self):
fl = ScalarFormatterLocator(spacing=3.0 * u.m)
assert fl.values is None
assert fl.number is None
assert fl.spacing == 3.0 * u.m
values, spacing = fl.locator(34.3, 55.4)
assert_almost_equal(values.value, [36.0, 39.0, 42.0, 45.0, 48.0, 51.0, 54.0])
fl.spacing = 0.5 * u.m
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [34.5, 35.0, 35.5, 36.0])
with pytest.warns(UserWarning, match=r"Spacing is too small"):
fl.format = "x"
values, spacing = fl.locator(34.3, 36.1)
assert_almost_equal(values.value, [35.0, 36.0])
def test_minor_locator(self):
fl = ScalarFormatterLocator(unit=u.m)
values, spacing = fl.locator(34.3, 55.4)
minor_values = fl.minor_locator(spacing, 5, 34.3, 55.4)
assert_almost_equal(
minor_values.value,
[
36.0,
37.0,
38.0,
39.0,
41.0,
42.0,
43.0,
44.0,
46.0,
47.0,
48.0,
49.0,
51.0,
52.0,
53.0,
54.0,
],
)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [37.5, 42.5, 47.5, 52.5])
fl.values = [0.1, 1.0, 14.0] * u.m
values, spacing = fl.locator(34.3, 36.1)
minor_values = fl.minor_locator(spacing, 2, 34.3, 55.4)
assert_almost_equal(minor_values.value, [])
@pytest.mark.parametrize(
("format", "string"),
[
("x", "15"),
("x.x", "15.4"),
("x.xx", "15.39"),
("x.xxx", "15.392"),
("%g", "15.3922"),
("%f", "15.392231"),
("%.2f", "15.39"),
("%.3f", "15.392"),
],
)
def test_format(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize(
("format", "string"),
[("x", "1539"), ("x.x", "1539.2"), ("x.xx", "1539.22"), ("x.xxx", "1539.223")],
)
def test_format_unit(self, format, string):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
fl.format_unit = u.cm
assert fl.formatter([15.392231] * u.m, None)[0] == string
@pytest.mark.parametrize("format", ["dd", "dd:mm", "xx:mm", "mx.xxx"])
def test_invalid_formats(self, format):
fl = ScalarFormatterLocator(number=5, unit=u.m)
with pytest.raises(ValueError, match=f"Invalid format: {format}"):
fl.format = format
@pytest.mark.parametrize(
("format", "base_spacing"),
[("x", 1.0 * u.m), ("x.x", 0.1 * u.m), ("x.xxx", 0.001 * u.m)],
)
def test_base_spacing(self, format, base_spacing):
fl = ScalarFormatterLocator(number=5, format=format, unit=u.m)
assert fl.base_spacing == base_spacing
def test_incorrect_spacing(self):
fl = ScalarFormatterLocator(unit=u.m)
fl.spacing = 0.032 * u.m
with pytest.warns(
UserWarning, match=r"Spacing is not a multiple of base spacing"
):
fl.format = "x.xx"
assert_almost_equal(fl.spacing.to_value(u.m), 0.03)
def test_values_unit(self):
# Make sure that the intrinsic unit and format unit are correctly
# taken into account when using the locator
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
assert_quantity_allclose(
fl.locator(850, 2150)[0],
[1000.0, 1200.0, 1400.0, 1600.0, 1800.0, 2000.0] * u.cm,
)
fl = ScalarFormatterLocator(unit=u.cm, format_unit=u.m)
fl.format = "x.x"
assert_quantity_allclose(fl.locator(1, 19)[0], [10] * u.cm)
|
3db5a938e009bde186d950deadd74f7c82152d9b5ba07eceb7f656473449078f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.time import Time
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (
WCSWorld2PixelTransform,
apply_slices,
transform_coord_meta_from_wcs,
)
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
@pytest.fixture
def plt_close():
yield
plt.close("all")
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ["x", "y"]
WCS2D.wcs.cunit = ["km", "km"]
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0.0, 0.0]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ["x", "y", "z"]
WCS3D.wcs.cunit = ["km", "km", "km"]
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0.0, 0.0, 1.0]
@pytest.fixture
def wcs_4d():
header = dedent(
"""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
"""
)
return WCS(header=fits.Header.fromstring(header, sep="\n"))
@pytest.fixture
def cube_wcs():
cube_header = get_pkg_data_filename("data/cube_header")
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ["RA---SIN", "DEC--SIN"]
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(
cube_wcs, RectangularFrame, slices=(50, "y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["l", "r", "b"]
assert ticklabel_position == ["l", "r", "b"]
assert ticks_position == ["l", "r", "b"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ["Longitude", ""]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["default_axis_label"] == ["Longitude", "pos.galactic.lat"]
assert coord_meta["name"] == [
("pos.galactic.lon", "glon-tan", "glon", "Longitude"),
("pos.galactic.lat", "glat-tan", "glat"),
]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.arcsec, u.arcsec]
assert coord_meta["wrap"] == [180.0, None]
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=("y", "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes should be swapped because of slices
assert axislabel_position == ["l", "b"]
assert ticklabel_position == ["l", "b"]
assert ticks_position == ["bltr", "bltr"]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HGLN-TAN", "HGLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [180.0, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["CRLN-TAN", "CRLT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [360.0, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.hourangle, u.deg]
assert coord_meta["wrap"] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["spam", "spam"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.one, u.one]
assert coord_meta["wrap"] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta["type"] == ["scalar"]
assert coord_meta["format_unit"] == [u.m]
assert coord_meta["wrap"] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["longitude", "latitude"]
assert coord_meta["format_unit"] == [u.deg, u.deg]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["WAVE", "UTC"]
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ["nm", "s"]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame1D, slices=("x", 0)
)
assert coord_meta["type"] == ["scalar", "scalar"]
assert coord_meta["format_unit"] == [u.m, u.s]
assert coord_meta["wrap"] == [None, None]
assert coord_meta["visible"] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame, slices=(0, 0, "x", "y")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
assert axislabel_position == ["", "", "b", "l"]
assert ticklabel_position == ["", "", "b", "l"]
assert ticks_position == ["", "", "bltr", "bltr"]
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(
wcs_4d, RectangularFrame1D, slices=(0, 0, 0, "x")
)
axislabel_position = coord_meta["default_axislabel_position"]
ticklabel_position = coord_meta["default_ticklabel_position"]
ticks_position = coord_meta["default_ticks_position"]
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ["", "", "t", "b"]
assert ticklabel_position == ["", "", "t", "b"]
assert ticks_position == ["", "", "t", "b"]
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(
("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0, 0, "x", "y"], (2, 3), 2),
(np.s_[...], [0, "x", 0, "y"], (1, 2, 3), 3),
(np.s_[...], ["x", 0, 0, "y"], (0, 2, 3), 3),
(np.s_[...], ["x", "y", 0, 0], (0, 1), 2),
(np.s_[:, :, 0, :], [0, "x", "y"], (1, 2), 2),
(np.s_[:, :, 0, :], ["x", 0, "y"], (0, 1, 2), 3),
(np.s_[:, :, 0, :], ["x", "y", 0], (0, 1, 2), 3),
(np.s_[:, 0, :, :], ["x", "y", 0], (0, 1), 2),
],
)
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:, :, 0, :]])
def test_sliced_ND_input(wcs_4d, sub_wcs, wcs_slice, plt_close):
slices_wcsaxes = [0, "x", "y"]
for sub_wcs in (sub_wcs, SlicedLowLevelWCS(wcs_4d, wcs_slice)):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(
sub_wcs, RectangularFrame, slices=slices_wcsaxes
)
assert all(len(x) == 3 for x in coord_meta.values())
assert coord_meta["name"] == [
"time",
("custom:pos.helioprojective.lat", "hplt-tan", "hplt"),
("custom:pos.helioprojective.lon", "hpln-tan", "hpln"),
]
assert coord_meta["type"] == ["scalar", "latitude", "longitude"]
assert coord_meta["wrap"] == [None, None, 180.0]
assert coord_meta["unit"] == [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
assert coord_meta["visible"] == [False, True, True]
assert coord_meta["format_unit"] == [
u.Unit("min"),
u.Unit("arcsec"),
u.Unit("arcsec"),
]
assert coord_meta["default_axislabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticklabel_position"] == ["", "b", "l"]
assert coord_meta["default_ticks_position"] == ["", "bltr", "bltr"]
# Validate the axes initialize correctly
plt.clf()
plt.subplot(projection=sub_wcs, slices=slices_wcsaxes)
class LowLevelWCS5D(BaseLowLevelWCS):
pixel_dim = 2
@property
def pixel_n_dim(self):
return self.pixel_dim
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return [
"em.freq",
"time",
"pos.eq.ra",
"pos.eq.dec",
"phys.polarization.stokes",
]
@property
def world_axis_units(self):
return ["Hz", "day", "deg", "deg", ""]
@property
def world_axis_names(self):
return ["Frequency", "", "RA", "DEC", ""]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [
np.asarray(pix) * scale
for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])
]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [
np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])
]
@property
def world_axis_object_components(self):
return [
("freq", 0, "value"),
("time", 0, "mjd"),
("celestial", 0, "spherical.lon.degree"),
("celestial", 1, "spherical.lat.degree"),
("stokes", 0, "value"),
]
@property
def world_axis_object_classes(self):
return {
"celestial": (SkyCoord, (), {"unit": "deg"}),
"time": (Time, (), {"format": "mjd"}),
"freq": (Quantity, (), {"unit": "Hz"}),
"stokes": (Quantity, (), {"unit": "one"}),
}
def test_edge_axes():
# Check that axes on the edge of a spherical projection are shown properley
# (see https://github.com/astropy/astropy/issues/10441)
shape = [180, 360]
data = np.random.rand(*shape)
header = {
"wcsaxes": 2,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": 1.0,
"cdelt2": 1.0,
"cunit1": "deg",
"cunit2": "deg",
"ctype1": "CRLN-CAR",
"ctype2": "CRLT-CAR",
"crval1": 0.0,
"crval2": 0.0,
"lonpole": 0.0,
"latpole": 90.0,
}
wcs = WCS(header)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs)
ax.imshow(data, origin="lower")
# By default the x- and y- axes should be drawn
lon = ax.coords[0]
lat = ax.coords[1]
fig.canvas.draw()
np.testing.assert_equal(
lon.ticks.world["b"], np.array([90.0, 180.0, 180.0, 270.0, 0.0])
)
np.testing.assert_equal(
lat.ticks.world["l"], np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0])
)
def test_coord_meta_wcsapi():
wcs = LowLevelWCS5D()
wcs.pixel_dim = 5
_, coord_meta = transform_coord_meta_from_wcs(
wcs, RectangularFrame, slices=[0, 0, "x", "y", 0]
)
assert coord_meta["name"] == [
("em.freq", "Frequency"),
"time",
("pos.eq.ra", "RA"),
("pos.eq.dec", "DEC"),
"phys.polarization.stokes",
]
assert coord_meta["type"] == ["scalar", "scalar", "longitude", "latitude", "scalar"]
assert coord_meta["wrap"] == [None, None, None, None, None]
assert coord_meta["unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("deg"),
u.Unit("deg"),
u.one,
]
assert coord_meta["visible"] == [True, True, True, True, True]
assert coord_meta["format_unit"] == [
u.Unit("Hz"),
u.Unit("d"),
u.Unit("hourangle"),
u.Unit("deg"),
u.one,
]
assert coord_meta["default_axislabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticklabel_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_ticks_position"] == ["b", "l", "t", "r", ""]
assert coord_meta["default_axis_label"] == [
"Frequency",
"time",
"RA",
"DEC",
"phys.polarization.stokes",
]
@figure_test
def test_wcsapi_5d_with_names(plt_close):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
|
86fe3abcd13663b36c31eb01b993b1a560fcd46ae85f6e034fe112303f05829e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.backend_bases import KeyEvent
import astropy.units as u
from astropy.coordinates import FK5, SkyCoord, galactocentric_frame_defaults
from astropy.time import Time
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
def teardown_method(self, method):
plt.close("all")
def test_overlay_coords(self, ignore_matplotlibrc, tmp_path):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test1.png")
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == "0\xb029'45\" -0\xb029'20\" (world)"
# Test pixel coordinates
event1 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == "0\xb029'45\" -0\xb029'20\" (world)"
overlay = ax.get_coords_overlay("fk5")
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter("d.ddd")
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test2.png")
event4 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == "267.176\xb0 -28\xb045'56\" (world, overlay 1)"
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter("d.ddd")
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test3.png")
event5 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event5)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == "267.176\xb0 -28\xb045'56\" (world, overlay 2)"
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter("d.ddd")
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test4.png")
event6 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == "267.652\xb0 -28\xb046'23\" (world, overlay 3)"
def test_cube_coords(self, ignore_matplotlibrc, tmp_path):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=("y", 50, "x"))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test.png")
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == "3h26m52.0s 30\xb037'17\" 2563 (world)"
# Test pixel coordinates
event1 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_cube_coords_uncorr_slicing(self, ignore_matplotlibrc, tmp_path):
# Regression test for a bug that occurred with coordinate formatting if
# some dimensions were uncorrelated and sliced out.
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=("x", "y", 2))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmp_path / "test.png")
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == "3h26m56.6s 30\xb018'19\" (world)"
# Test pixel coordinates
event1 = KeyEvent("test_pixel_coords", canvas, "w")
fig.canvas.callbacks.process("key_press_event", event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_plot_coord_3d_transform(self):
wcs = WCS(self.msx_header)
with galactocentric_frame_defaults.set("latest"):
coord = SkyCoord(0 * u.kpc, 0 * u.kpc, 0 * u.kpc, frame="galactocentric")
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wcs)
(point,) = ax.plot_coord(coord, "ro")
np.testing.assert_allclose(point.get_xydata()[0], [0, 0], atol=1e-4)
|
88ab470783a06c1e6f2c224b5c82040a3d6b3df951489cfbb729b718492362bf | import numpy as np
import pytest
from matplotlib.lines import Path
from astropy.visualization.wcsaxes.grid_paths import get_lon_lat_path
@pytest.mark.parametrize("step_in_degrees", [10, 1, 0.01])
def test_round_trip_visibility(step_in_degrees):
zero = np.zeros(100)
# The pixel values are irrelevant for this test
pixel = np.stack([zero, zero]).T
# Create a grid line of constant latitude with a point every step
line = np.stack([np.arange(100), zero]).T * step_in_degrees
# Create a modified grid line where the point spacing is larger by 5%
# Starting with point 20, the discrepancy between `line` and `line_round` is greater than `step`
line_round = line * 1.05
# Perform the round-trip check
path = get_lon_lat_path(line, pixel, line_round)
# The grid line should be visible for only the initial part line (19 points)
codes_check = np.full(100, Path.MOVETO)
codes_check[line_round[:, 0] - line[:, 0] < step_in_degrees] = Path.LINETO
assert np.all(path.codes[1:] == codes_check[1:])
|
10a96f89397b0da396e11344d19fde44b994b438af5ddc71ea0298a7571043d6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest.mock import patch
import matplotlib.pyplot as plt
import pytest
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
MSX_HEADER = fits.Header.fromtextfile(get_pkg_data_filename("data/msx_header"))
def teardown_function(function):
plt.close("all")
def test_getaxislabel(ignore_matplotlibrc):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect="equal")
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, "set_position") as pos1:
with patch.object(ax.coords[1].axislabels, "set_position") as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
def test_label_visibility_rules_default(ignore_matplotlibrc, ax):
assert_label_draw(ax, True, True)
def test_label_visibility_rules_label(ignore_matplotlibrc, ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, False, False)
def test_label_visibility_rules_ticks(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule("ticks")
ax.coords[1].set_axislabel_visibility_rule("ticks")
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, True, False)
def test_label_visibility_rules_always(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule("always")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999] * u.one)
assert_label_draw(ax, True, True)
def test_format_unit():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ori_fu = ax.coords[1].get_format_unit()
assert ori_fu == "deg"
ax.coords[1].set_format_unit("arcsec")
fu = ax.coords[1].get_format_unit()
assert fu == "arcsec"
def test_set_separator():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit("deg")
assert ax.coords[1].format_coord(4) == "4\xb000'00\""
ax.coords[1].set_separator((":", ":", ""))
assert ax.coords[1].format_coord(4) == "4:00:00"
ax.coords[1].set_separator("abc")
assert ax.coords[1].format_coord(4) == "4a00b00c"
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == "4\xb000'00\""
|
2fcede79e7a128384c5e5dd6e63382120512b87f62846f250b46452d762829c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.lines
import matplotlib.pyplot as plt
import numpy as np
import pytest
from matplotlib import rc_context
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Rectangle
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.tests.figures import figure_test
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from astropy.visualization.wcsaxes import WCSAxes, add_beam, add_scalebar
from astropy.visualization.wcsaxes.frame import EllipticalFrame
from astropy.visualization.wcsaxes.patches import Quadrangle, SphericalCircle
from astropy.wcs import WCS
class BaseImageTests:
@classmethod
def setup_class(cls):
msx_header = get_pkg_data_filename("data/msx_header")
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = get_pkg_data_filename("data/rosat_header")
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = get_pkg_data_filename("data/2MASS_k_header")
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = get_pkg_data_filename("data/cube_header")
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = get_pkg_data_filename("data/slice_header")
cls.slice_header = fits.Header.fromtextfile(slice_header)
def teardown_method(self, method):
plt.close("all")
class TestBasic(BaseImageTests):
@figure_test
def test_tight_layout(self):
# Check that tight_layout works on a WCSAxes.
fig = plt.figure(figsize=(8, 6))
for i in (1, 2):
fig.add_subplot(2, 1, i, projection=WCS(self.msx_header))
fig.tight_layout()
return fig
@figure_test
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
return fig
@figure_test
def test_axes_off(self):
# Test for turning the axes off
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header))
ax.imshow(np.arange(12).reshape((3, 4)))
ax.set_axis_off()
return fig
@figure_test
@pytest.mark.parametrize("axisbelow", [True, False, "line"])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0.0, 0.20] * u.degree, size=5, width=1)
ax.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30.0, 50.0), 60.0, 50.0, facecolor="green", edgecolor="red")
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@figure_test
def test_contour_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(
data,
transform=ax.get_transform(wcs_msx),
colors="orange",
levels=[2.5e-5, 5e-5, 1.0e-4],
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_contourf_overlay(self):
# Test for overlaying contours on images
path = get_pkg_data_filename("galactic_center/gc_msx_e.fits")
with fits.open(path) as pf:
data = pf[0].data
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(
data, transform=ax.get_transform(wcs_msx), levels=[2.5e-5, 5e-5, 1.0e-4]
)
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0.0, 720.0)
ax.set_ylim(0.0, 720.0)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.25, 0.25, 0.65, 0.65], projection=WCS(self.msx_header), aspect="equal"
)
# Change the format of the ticks
ax.coords[0].set_major_formatter("dd:mm:ss")
ax.coords[1].set_major_formatter("dd:mm:ss.ssss")
# Overlay grid on image
ax.grid(color="red", alpha=1.0, lw=1, linestyle="dashed")
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords["glon"].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords["glat"].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords["glon"].set_axislabel("Galactic Longitude", minpad=1.6)
ax.coords["glat"].set_axislabel("Galactic Latitude", minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color("red")
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == "red"
assert ax.coords.frame.get_linewidth() == 2
return fig
@figure_test
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8], projection=WCS(self.rosat_header), aspect="equal"
)
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color="black", alpha=1.0, lw=1, linestyle="dashed")
p = Circle((300, 100), radius=40, ec="yellow", fc="none")
ax.add_patch(p)
p = Circle(
(30.0, 20.0),
radius=20.0,
ec="orange",
fc="none",
transform=ax.get_transform("world"),
)
ax.add_patch(p)
p = Circle(
(60.0, 50.0),
radius=20.0,
ec="red",
fc="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(p)
p = Circle(
(40.0, 60.0),
radius=20.0,
ec="green",
fc="none",
transform=ax.get_transform("galactic"),
)
ax.add_patch(p)
return fig
@figure_test
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel("Velocity m/s")
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[0].grid(grid_type="contours", color="purple", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="orange", linestyle="solid")
ax.coords[2].grid(grid_type="contours", color="red", linestyle="solid")
return fig
@figure_test
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=("x", "y", 50),
aspect="equal",
)
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type="contours", color="blue", linestyle="solid")
ax.coords[1].grid(grid_type="contours", color="red", linestyle="solid")
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
lines = ax.plot_coord(c, "o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(lines, list)
assert isinstance(lines[0], matplotlib.lines.Line2D)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_scatter_coord(self):
from matplotlib.collections import PathCollection
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
sc = ax.scatter_coord(c, marker="o")
# Test that plot_coord returns the results from ax.plot
assert isinstance(sc, PathCollection)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes(
[0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect="equal",
)
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_major_formatter("x.xx")
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel("Velocity km/s")
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes(
[0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, "y", "x"),
aspect="equal",
)
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].set_ticks_position("")
ax.coords[0].set_ticklabel_position("")
ax.coords[0].set_axislabel_position("")
ax.coords[1].set_ticks_position("lr")
ax.coords[1].set_ticklabel_position("l")
ax.coords[1].set_axislabel_position("l")
ax.coords[2].set_ticks_position("bt")
ax.coords[2].set_ticklabel_position("b")
ax.coords[2].set_axislabel_position("b")
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@figure_test
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color="blue", alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color="red", alpha=0.9, width=1)
ax.coords[0].set_ticks_position("all")
ax.coords[1].set_ticks_position("all")
ax.coords[0].set_axislabel("X-axis", size=20)
ax.coords[1].set_axislabel(
"Y-axis",
color="green",
size=25,
weight="regular",
style="normal",
family="cmtt10",
)
ax.coords[0].set_axislabel_position("t")
ax.coords[1].set_axislabel_position("r")
ax.coords[0].set_ticklabel(
color="purple",
size=15,
alpha=1,
weight="light",
style="normal",
family="cmss10",
)
ax.coords[1].set_ticklabel(
color="black", size=18, alpha=0.9, weight="bold", family="cmr10"
)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("r")
return fig
@figure_test
def test_rcparams(self):
# Test custom rcParams
with rc_context(
{
"axes.labelcolor": "purple",
"axes.labelsize": 14,
"axes.labelweight": "bold",
"axes.linewidth": 3,
"axes.facecolor": "0.5",
"axes.edgecolor": "green",
"xtick.color": "red",
"xtick.labelsize": 8,
"xtick.direction": "in",
"xtick.minor.visible": True,
"xtick.minor.size": 5,
"xtick.major.size": 20,
"xtick.major.width": 3,
"xtick.major.pad": 10,
"grid.color": "blue",
"grid.linestyle": ":",
"grid.linewidth": 1,
"grid.alpha": 0.5,
}
):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel("X label")
ax.set_ylabel("Y label")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = "ICRS"
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color="gray", alpha=0.5, linestyle="solid")
ax.coords["ra"].set_ticks(color="red", size=20)
ax.coords["dec"].set_ticks(color="red", size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@figure_test
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.2, 0.2, 0.6, 0.6], projection=WCS(self.msx_header), aspect="equal"
)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type("scalar")
ax.coords[1].set_coord_type("scalar")
ax.coords[0].set_major_formatter("x.xxx")
ax.coords[1].set_major_formatter("x.xxx")
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@figure_test
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position("all")
ax.coords[1].set_ticklabel_position("all")
return fig
@figure_test
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="auto")
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule("always")
ax.coords[1].ticklabels.set_visible(False)
return fig
@figure_test(savefig_kwargs={"bbox_inches": "tight"})
def test_noncelestial_angular(self, tmp_path):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["solar-x", "solar-y"]
wcs.wcs.cunit = ["arcsec", "arcsec"]
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin="lower")
ax.coords[0].set_coord_type("longitude", coord_wrap=180)
ax.coords[1].set_coord_type("latitude")
ax.coords[0].set_major_formatter("s.s")
ax.coords[1].set_major_formatter("s.s")
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color="white", ls="solid")
# Force drawing (needed for format_coord)
fig.savefig(tmp_path / "nothing")
assert ax.format_coord(512, 512) == "513.0 513.0 (world)"
return fig
@figure_test
def test_patches_distortion(self, tmp_path):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
# Pixel coordinates
r = Rectangle((30.0, 50.0), 60.0, 50.0, edgecolor="green", facecolor="none")
ax.add_patch(r)
# FK5 coordinates
r = Rectangle(
(266.4, -28.9),
0.3,
0.3,
edgecolor="cyan",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r)
# FK5 coordinates
c = Circle(
(266.4, -29.1),
0.15,
edgecolor="magenta",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(c)
# Pixel coordinates
ax.scatter(
[40, 100, 130],
[30, 130, 60],
s=100,
edgecolor="red",
facecolor=(1, 0, 0, 0.5),
)
# World coordinates (should not be distorted)
ax.scatter(
266.78238,
-28.769255,
transform=ax.get_transform("fk5"),
s=300,
edgecolor="red",
facecolor="none",
)
# World coordinates (should not be distorted)
r1 = SphericalCircle(
(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.add_patch(r1)
r2 = SphericalCircle(
SkyCoord(266.4 * u.deg, -29.1 * u.deg),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
with pytest.warns(
AstropyUserWarning,
match="Received `center` of representation type "
"<class 'astropy.coordinates.representation.CartesianRepresentation'> "
"will be converted to SphericalRepresentation",
):
r3 = SphericalCircle(
SkyCoord(
x=-0.05486461,
y=-0.87204803,
z=-0.48633538,
representation_type="cartesian",
),
0.15 * u.degree,
edgecolor="purple",
facecolor="none",
transform=ax.get_transform("fk5"),
)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
# Test to verify that SphericalCircle works irrespective of whether
# the input(center) is a tuple or a SkyCoord object.
assert (r1.get_xy() == r2.get_xy()).all()
assert np.allclose(r1.get_xy(), r3.get_xy())
assert np.allclose(r2.get_xy()[0], [266.4, -29.25])
return fig
@figure_test
def test_quadrangle(self, tmp_path):
# Test that Quadrangle can have curved edges while Rectangle does not
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect="equal")
ax.set_xlim(0, 10000)
ax.set_ylim(-10000, 0)
# Add a quadrangle patch (100 degrees by 20 degrees)
q = Quadrangle(
(255, -70) * u.deg,
100 * u.deg,
20 * u.deg,
label="Quadrangle",
edgecolor="blue",
facecolor="none",
transform=ax.get_transform("icrs"),
)
ax.add_patch(q)
# Add a rectangle patch (100 degrees by 20 degrees)
r = Rectangle(
(255, -70),
100,
20,
label="Rectangle",
edgecolor="red",
facecolor="none",
linestyle="--",
transform=ax.get_transform("icrs"),
)
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@figure_test
def test_beam_shape_from_args(self, tmp_path):
# Test for adding the beam shape with the beam parameters as arguments
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(
ax,
major=2 * u.arcmin,
minor=1 * u.arcmin,
angle=-30 * u.degree,
corner="bottom right",
frame=True,
borderpad=0.0,
pad=1.0,
color="black",
)
return fig
@figure_test
def test_beam_shape_from_header(self, tmp_path):
# Test for adding the beam shape with the beam parameters from a header
hdr = self.msx_header
hdr["BMAJ"] = (2 * u.arcmin).to(u.degree).value
hdr["BMIN"] = (1 * u.arcmin).to(u.degree).value
hdr["BPA"] = 30.0
wcs = WCS(hdr)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_beam(ax, header=hdr)
return fig
@figure_test
def test_scalebar(self, tmp_path):
# Test for adding a scale bar
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, aspect="equal")
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
add_scalebar(
ax,
2 * u.arcmin,
label="2'",
corner="top right",
borderpad=1.0,
label_top=True,
)
return fig
@figure_test
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@figure_test
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test(style={"text.usetex": True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes(
[0.3, 0.2, 0.65, 0.6], projection=WCS(self.twoMASS_k_header), aspect="equal"
)
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@figure_test
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ["lon", "lat"]
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(
direction="in",
length=20,
width=5,
pad=6,
labelsize=6,
color="red",
labelcolor="blue",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(
axis=0,
direction="in",
length=20,
width=5,
pad=4,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
plt.tick_params(
axis="lat",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(
axis="x",
direction="in",
length=20,
width=5,
pad=20,
labelsize=6,
color="red",
labelcolor="blue",
bottom=True,
grid_color="purple",
)
ax.tick_params(
axis="y",
direction="out",
labelsize=8,
color="blue",
labelcolor="purple",
left=True,
right=True,
grid_color="red",
)
plt.grid()
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(
length=4,
pad=2,
colors="orange",
labelbottom=True,
labeltop=True,
labelsize=10,
)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which="minor", length=6)
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
return fig
@pytest.fixture
def wave_wcs_1d():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ["WAVE"]
wcs.wcs.cunit = ["m"]
wcs.wcs.crpix = [1]
wcs.wcs.cdelt = [5]
wcs.wcs.crval = [45]
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_1d_wcs(wave_wcs_1d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.set_xlabel("this is the x-axis")
ax.set_ylabel("this is the y-axis")
return fig
@figure_test
def test_1d_plot_1d_wcs_format_unit(wave_wcs_1d):
"""
This test ensures that the format unit is updated and displayed for both
the axis ticks and default axis labels.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wave_wcs_1d)
(lines,) = ax.plot([10, 12, 14, 12, 10])
ax.coords[0].set_format_unit("nm")
return fig
@pytest.fixture
def spatial_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["GLON-TAN", "GLAT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [15] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
return wcs
@figure_test
def test_1d_plot_2d_wcs_correlated(spatial_wcs_2d):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d, slices=("x", 0))
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
ax.coords["glon"].set_ticks(color="red")
ax.coords["glon"].set_ticklabel(color="red")
ax.coords["glon"].grid(color="red")
ax.coords["glat"].set_ticks(color="blue")
ax.coords["glat"].set_ticklabel(color="blue")
ax.coords["glat"].grid(color="blue")
return fig
@pytest.fixture
def spatial_wcs_2d_small_angle():
"""
This WCS has an almost linear correlation between the pixel and world axes
close to the reference pixel.
"""
wcs = WCS(naxis=2)
wcs.wcs.ctype = ["HPLN-TAN", "HPLT-TAN"]
wcs.wcs.crpix = [3.0] * 2
wcs.wcs.cdelt = [10 / 3600, 5 / 3600]
wcs.wcs.crval = [0] * 2
wcs.wcs.set()
return wcs
@pytest.mark.parametrize(
"slices, bottom_axis",
[
# Remember SLLWCS takes slices in array order
(np.s_[0, :], "custom:pos.helioprojective.lon"),
(np.s_[:, 0], "custom:pos.helioprojective.lat"),
],
)
@figure_test
def test_1d_plot_1d_sliced_low_level_wcs(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
Test that a SLLWCS through a coupled 2D WCS plots as line OK.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle[slices])
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@pytest.mark.parametrize(
"slices, bottom_axis", [(("x", 0), "hpln"), ((0, "x"), "hplt")]
)
@figure_test
def test_1d_plot_put_varying_axis_on_bottom_lon(
spatial_wcs_2d_small_angle, slices, bottom_axis
):
"""
When we plot a 1D slice through spatial axes, we want to put the axis which
actually changes on the bottom.
For example an aligned wcs, pixel grid where you plot a lon slice through a
lat axis, you would end up with no ticks on the bottom as the lon doesn't
change, and a set of lat ticks on the top because it does but it's the
correlated axis not the actual one you are plotting against.
"""
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=spatial_wcs_2d_small_angle, slices=slices)
(lines,) = ax.plot([10, 12, 14, 12, 10], "-o", color="orange")
# Draw to trigger rendering the ticks.
plt.draw()
assert ax.coords[bottom_axis].ticks.get_visible_axes() == ["b"]
return fig
@figure_test
def test_allsky_labels_wrap():
# Regression test for a bug that caused some tick labels to not be shown
# when looking at all-sky maps in the case where coord_wrap < 360
fig = plt.figure(figsize=(4, 4))
icen = 0
for ctype in [("GLON-CAR", "GLAT-CAR"), ("HGLN-CAR", "HGLT-CAR")]:
for cen in [0, 90, 180, 270]:
icen += 1
wcs = WCS(naxis=2)
wcs.wcs.ctype = ctype
wcs.wcs.crval = cen, 0
wcs.wcs.crpix = 360.5, 180.5
wcs.wcs.cdelt = -0.5, 0.5
ax = fig.add_subplot(8, 1, icen, projection=wcs)
ax.set_xlim(-0.5, 719.5)
ax.coords[0].set_ticks(spacing=50 * u.deg)
ax.coords[0].set_ticks_position("b")
ax.coords[0].set_auto_axislabel(False)
ax.coords[1].set_auto_axislabel(False)
ax.coords[1].set_ticklabel_visible(False)
ax.coords[1].set_ticks_visible(False)
fig.subplots_adjust(hspace=2, left=0.05, right=0.95, bottom=0.1, top=0.95)
return fig
@figure_test
def test_tickable_gridlines():
wcs = WCS(
{
"naxis": 2,
"naxis1": 360,
"naxis2": 180,
"crpix1": 180.5,
"crpix2": 90.5,
"cdelt1": -1,
"cdelt2": 1,
"ctype1": "RA---CAR",
"ctype2": "DEC--CAR",
}
)
fig = Figure()
ax = fig.add_subplot(projection=wcs)
ax.set_xlim(-0.5, 360 - 0.5)
ax.set_ylim(-0.5, 150 - 0.5)
lon, lat = ax.coords
lon.grid()
lat.grid()
overlay = ax.get_coords_overlay("galactic")
overlay[0].set_ticks(spacing=30 * u.deg)
overlay[1].set_ticks(spacing=30 * u.deg)
# Test both single-character and multi-character names
overlay[1].add_tickable_gridline("g", -30 * u.deg)
overlay[0].add_tickable_gridline("const-glon", 30 * u.deg)
overlay[0].grid(color="magenta")
overlay[0].set_ticklabel_position("gt")
overlay[0].set_ticklabel(color="magenta")
overlay[0].set_axislabel("Galactic longitude", color="magenta")
overlay[1].grid(color="blue")
overlay[1].set_ticklabel_position(("const-glon", "r"))
overlay[1].set_ticklabel(color="blue")
overlay[1].set_axislabel("Galactic latitude", color="blue")
return fig
|
09e956d3a90a054ba8721f36fd981cefbc2738f54031c1d821235c4b612961a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Slicing mixin to the NDData class.
from astropy import log
from astropy.wcs.wcsapi import (
BaseHighLevelWCS,
BaseLowLevelWCS,
HighLevelWCSWrapper,
SlicedLowLevelWCS,
)
__all__ = ["NDSlicingMixin"]
class NDSlicingMixin:
"""Mixin to provide slicing on objects using the `NDData`
interface.
The ``data``, ``mask``, ``uncertainty`` and ``wcs`` will be sliced, if
set and sliceable. The ``unit`` and ``meta`` will be untouched. The return
will be a reference and not a copy, if possible.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDSlicingMixin
>>> class NDDataSliceable(NDSlicingMixin, NDData):
... pass
Slicing an instance containing data::
>>> nd = NDDataSliceable([1,2,3,4,5])
>>> nd[1:3]
NDDataSliceable([2, 3])
Also the other attributes are sliced for example the ``mask``::
>>> import numpy as np
>>> mask = np.array([True, False, True, True, False])
>>> nd2 = NDDataSliceable(nd, mask=mask)
>>> nd2slc = nd2[1:3]
>>> nd2slc[nd2slc.mask]
NDDataSliceable([3])
Be aware that changing values of the sliced instance will change the values
of the original::
>>> nd3 = nd2[1:3]
>>> nd3.data[0] = 100
>>> nd2
NDDataSliceable([ 1, 100, 3, 4, 5])
See also
--------
NDDataRef
NDDataArray
"""
def __getitem__(self, item):
# Abort slicing if the data is a single scalar.
if self.data.shape == ():
raise TypeError("scalars cannot be sliced.")
# Let the other methods handle slicing.
kwargs = self._slice(item)
return self.__class__(**kwargs)
def _slice(self, item):
"""Collects the sliced attributes and passes them back as `dict`.
It passes uncertainty, mask and wcs to their appropriate ``_slice_*``
method, while ``meta`` and ``unit`` are simply taken from the original.
The data is assumed to be sliceable and is sliced directly.
When possible the return should *not* be a copy of the data but a
reference.
Parameters
----------
item : slice
The slice passed to ``__getitem__``.
Returns
-------
dict :
Containing all the attributes after slicing - ready to
use them to create ``self.__class__.__init__(**kwargs)`` in
``__getitem__``.
"""
kwargs = {}
kwargs["data"] = self.data[item]
# Try to slice some attributes
kwargs["uncertainty"] = self._slice_uncertainty(item)
kwargs["mask"] = self._slice_mask(item)
kwargs["wcs"] = self._slice_wcs(item)
# Attributes which are copied and not intended to be sliced
kwargs["unit"] = self.unit
kwargs["meta"] = self.meta
return kwargs
def _slice_uncertainty(self, item):
if self.uncertainty is None:
return None
try:
return self.uncertainty[item]
except TypeError:
# Catching TypeError in case the object has no __getitem__ method.
# But let IndexError raise.
log.info("uncertainty cannot be sliced.")
return self.uncertainty
def _slice_mask(self, item):
if self.mask is None:
return None
try:
return self.mask[item]
except TypeError:
log.info("mask cannot be sliced.")
return self.mask
def _slice_wcs(self, item):
if self.wcs is None:
return None
try:
llwcs = SlicedLowLevelWCS(self.wcs.low_level_wcs, item)
return HighLevelWCSWrapper(llwcs)
except Exception as err:
self._handle_wcs_slicing_error(err, item)
# Implement this in a method to allow subclasses to customise the error.
def _handle_wcs_slicing_error(self, err, item):
raise ValueError(
f"Slicing the WCS object with the slice '{item}' "
"failed, if you want to slice the NDData object without the WCS, you "
"can remove by setting `NDData.wcs = None` and then retry."
) from err
|
1663bab7cf14eb698630cd30a32a2b709d77fbed205b9f25ebd5ea3ab0baf707 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the Arithmetic mixin to the NDData class.
import warnings
from copy import deepcopy
import numpy as np
from astropy.nddata.nduncertainty import NDUncertainty
from astropy.units import dimensionless_unscaled
from astropy.utils import format_doc, sharedmethod
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ["NDArithmeticMixin"]
# Global so it doesn't pollute the class dict unnecessarily:
# Docstring templates for add, subtract, multiply, divide methods.
_arit_doc = """
Performs {name} by evaluating ``self`` {op} ``operand``.
Parameters
----------
operand, operand2 : `NDData`-like instance
If ``operand2`` is ``None`` or not given it will perform the operation
``self`` {op} ``operand``.
If ``operand2`` is given it will perform ``operand`` {op} ``operand2``.
If the method was called on a class rather than on the instance
``operand2`` must be given.
propagate_uncertainties : `bool` or ``None``, optional
If ``None`` the result will have no uncertainty. If ``False`` the
result will have a copied version of the first operand that has an
uncertainty. If ``True`` the result will have a correctly propagated
uncertainty from the uncertainties of the operands but this assumes
that the uncertainties are `NDUncertainty`-like. Default is ``True``.
.. versionchanged:: 1.2
This parameter must be given as keyword-parameter. Using it as
positional parameter is deprecated.
``None`` was added as valid parameter value.
handle_mask : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no mask. If ``'first_found'`` the
result will have a copied version of the first operand that has a
mask). If it is a callable then the specified callable must
create the results ``mask`` and if necessary provide a copy.
Default is `numpy.logical_or`.
.. versionadded:: 1.2
handle_meta : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no meta. If ``'first_found'`` the
result will have a copied version of the first operand that has a
(not empty) meta. If it is a callable then the specified callable must
create the results ``meta`` and if necessary provide a copy.
Default is ``None``.
.. versionadded:: 1.2
compare_wcs : callable, ``'first_found'`` or ``None``, optional
If ``None`` the result will have no wcs and no comparison between
the wcs of the operands is made. If ``'first_found'`` the
result will have a copied version of the first operand that has a
wcs. If it is a callable then the specified callable must
compare the ``wcs``. The resulting ``wcs`` will be like if ``False``
was given otherwise it raises a ``ValueError`` if the comparison was
not successful. Default is ``'first_found'``.
.. versionadded:: 1.2
uncertainty_correlation : number or `~numpy.ndarray`, optional
The correlation between the two operands is used for correct error
propagation for correlated data as given in:
https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
Default is 0.
.. versionadded:: 1.2
kwargs :
Any other parameter that should be passed to the callables used.
Returns
-------
result : `~astropy.nddata.NDData`-like
The resulting dataset
Notes
-----
If a ``callable`` is used for ``mask``, ``wcs`` or ``meta`` the
callable must accept the corresponding attributes as first two
parameters. If the callable also needs additional parameters these can be
defined as ``kwargs`` and must start with ``"wcs_"`` (for wcs callable) or
``"meta_"`` (for meta callable). This startstring is removed before the
callable is called.
``"first_found"`` can also be abbreviated with ``"ff"``.
"""
class NDArithmeticMixin:
"""
Mixin class to add arithmetic to an NDData object.
When subclassing, be sure to list the superclasses in the correct order
so that the subclass sees NDData as the main superclass. See
`~astropy.nddata.NDDataArray` for an example.
Notes
-----
This class only aims at covering the most common cases so there are certain
restrictions on the saved attributes::
- ``uncertainty`` : has to be something that has a `NDUncertainty`-like
interface for uncertainty propagation
- ``mask`` : has to be something that can be used by a bitwise ``or``
operation.
- ``wcs`` : has to implement a way of comparing with ``=`` to allow
the operation.
But there is a workaround that allows to disable handling a specific
attribute and to simply set the results attribute to ``None`` or to
copy the existing attribute (and neglecting the other).
For example for uncertainties not representing an `NDUncertainty`-like
interface you can alter the ``propagate_uncertainties`` parameter in
:meth:`NDArithmeticMixin.add`. ``None`` means that the result will have no
uncertainty, ``False`` means it takes the uncertainty of the first operand
(if this does not exist from the second operand) as the result's
uncertainty. This behavior is also explained in the docstring for the
different arithmetic operations.
Decomposing the units is not attempted, mainly due to the internal mechanics
of `~astropy.units.Quantity`, so the resulting data might have units like
``km/m`` if you divided for example 100km by 5m. So this Mixin has adopted
this behavior.
Examples
--------
Using this Mixin with `~astropy.nddata.NDData`:
>>> from astropy.nddata import NDData, NDArithmeticMixin
>>> class NDDataWithMath(NDArithmeticMixin, NDData):
... pass
Using it with one operand on an instance::
>>> ndd = NDDataWithMath(100)
>>> ndd.add(20)
NDDataWithMath(120)
Using it with two operand on an instance::
>>> ndd = NDDataWithMath(-4)
>>> ndd.divide(1, ndd)
NDDataWithMath(-0.25)
Using it as classmethod requires two operands::
>>> NDDataWithMath.subtract(5, 4)
NDDataWithMath(1)
"""
def _arithmetic(
self,
operation,
operand,
propagate_uncertainties=True,
handle_mask=np.logical_or,
handle_meta=None,
uncertainty_correlation=0,
compare_wcs="first_found",
**kwds,
):
"""
Base method which calculates the result of the arithmetic operation.
This method determines the result of the arithmetic operation on the
``data`` including their units and then forwards to other methods
to calculate the other properties for the result (like uncertainty).
Parameters
----------
operation : callable
The operation that is performed on the `NDData`. Supported are
`numpy.add`, `numpy.subtract`, `numpy.multiply` and
`numpy.true_divide`.
operand : same type (class) as self
see :meth:`NDArithmeticMixin.add`
propagate_uncertainties : `bool` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_mask : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
handle_meta : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
compare_wcs : callable, ``'first_found'`` or ``None``, optional
see :meth:`NDArithmeticMixin.add`
uncertainty_correlation : ``Number`` or `~numpy.ndarray`, optional
see :meth:`NDArithmeticMixin.add`
kwargs :
Any other parameter that should be passed to the
different :meth:`NDArithmeticMixin._arithmetic_mask` (or wcs, ...)
methods.
Returns
-------
result : ndarray or `~astropy.units.Quantity`
The resulting data as array (in case both operands were without
unit) or as quantity if at least one had a unit.
kwargs : `dict`
The kwargs should contain all the other attributes (besides data
and unit) needed to create a new instance for the result. Creating
the new instance is up to the calling method, for example
:meth:`NDArithmeticMixin.add`.
"""
# Find the appropriate keywords for the appropriate method (not sure
# if data and uncertainty are ever used ...)
kwds2 = {"mask": {}, "meta": {}, "wcs": {}, "data": {}, "uncertainty": {}}
for i in kwds:
splitted = i.split("_", 1)
try:
kwds2[splitted[0]][splitted[1]] = kwds[i]
except KeyError:
raise KeyError(f"Unknown prefix {splitted[0]} for parameter {i}")
kwargs = {}
# First check that the WCS allows the arithmetic operation
if compare_wcs is None:
kwargs["wcs"] = None
elif compare_wcs in ["ff", "first_found"]:
if self.wcs is None:
kwargs["wcs"] = deepcopy(operand.wcs)
else:
kwargs["wcs"] = deepcopy(self.wcs)
else:
kwargs["wcs"] = self._arithmetic_wcs(
operation, operand, compare_wcs, **kwds2["wcs"]
)
# Then calculate the resulting data (which can but not needs to be a
# quantity)
result = self._arithmetic_data(operation, operand, **kwds2["data"])
# Determine the other properties
if propagate_uncertainties is None:
kwargs["uncertainty"] = None
elif not propagate_uncertainties:
if self.uncertainty is None:
kwargs["uncertainty"] = deepcopy(operand.uncertainty)
else:
kwargs["uncertainty"] = deepcopy(self.uncertainty)
else:
kwargs["uncertainty"] = self._arithmetic_uncertainty(
operation,
operand,
result,
uncertainty_correlation,
**kwds2["uncertainty"],
)
# If both are None, there is nothing to do.
if self.psf is not None or operand.psf is not None:
warnings.warn(
f"Not setting psf attribute during {operation.__name__}.",
AstropyUserWarning,
)
if handle_mask is None:
kwargs["mask"] = None
elif handle_mask in ["ff", "first_found"]:
if self.mask is None:
kwargs["mask"] = deepcopy(operand.mask)
else:
kwargs["mask"] = deepcopy(self.mask)
else:
kwargs["mask"] = self._arithmetic_mask(
operation, operand, handle_mask, **kwds2["mask"]
)
if handle_meta is None:
kwargs["meta"] = None
elif handle_meta in ["ff", "first_found"]:
if not self.meta:
kwargs["meta"] = deepcopy(operand.meta)
else:
kwargs["meta"] = deepcopy(self.meta)
else:
kwargs["meta"] = self._arithmetic_meta(
operation, operand, handle_meta, **kwds2["meta"]
)
# Wrap the individual results into a new instance of the same class.
return result, kwargs
def _arithmetic_data(self, operation, operand, **kwds):
"""
Calculate the resulting data
Parameters
----------
operation : callable
see `NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
kwds :
Additional parameters.
Returns
-------
result_data : ndarray or `~astropy.units.Quantity`
If both operands had no unit the resulting data is a simple numpy
array, but if any of the operands had a unit the return is a
Quantity.
"""
# Do the calculation with or without units
if self.unit is None and operand.unit is None:
result = operation(self.data, operand.data)
elif self.unit is None:
result = operation(
self.data << dimensionless_unscaled, operand.data << operand.unit
)
elif operand.unit is None:
result = operation(
self.data << self.unit, operand.data << dimensionless_unscaled
)
else:
result = operation(self.data << self.unit, operand.data << operand.unit)
return result
def _arithmetic_uncertainty(self, operation, operand, result, correlation, **kwds):
"""
Calculate the resulting uncertainty.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
result : `~astropy.units.Quantity` or `~numpy.ndarray`
The result of :meth:`NDArithmeticMixin._arithmetic_data`.
correlation : number or `~numpy.ndarray`
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters.
Returns
-------
result_uncertainty : `NDUncertainty` subclass instance or None
The resulting uncertainty already saved in the same `NDUncertainty`
subclass that ``self`` had (or ``operand`` if self had no
uncertainty). ``None`` only if both had no uncertainty.
"""
# Make sure these uncertainties are NDUncertainties so this kind of
# propagation is possible.
if self.uncertainty is not None and not isinstance(
self.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
if operand.uncertainty is not None and not isinstance(
operand.uncertainty, NDUncertainty
):
raise TypeError(
"Uncertainty propagation is only defined for "
"subclasses of NDUncertainty."
)
# Now do the uncertainty propagation
# TODO: There is no enforced requirement that actually forbids the
# uncertainty to have negative entries but with correlation the
# sign of the uncertainty DOES matter.
if self.uncertainty is None and operand.uncertainty is None:
# Neither has uncertainties so the result should have none.
return None
elif self.uncertainty is None:
# Create a temporary uncertainty to allow uncertainty propagation
# to yield the correct results. (issue #4152)
self.uncertainty = operand.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
# Delete the temporary uncertainty again.
self.uncertainty = None
return result_uncert
elif operand.uncertainty is None:
# As with self.uncertainty is None but the other way around.
operand.uncertainty = self.uncertainty.__class__(None)
result_uncert = self.uncertainty.propagate(
operation, operand, result, correlation
)
operand.uncertainty = None
return result_uncert
else:
# Both have uncertainties so just propagate.
return self.uncertainty.propagate(operation, operand, result, correlation)
def _arithmetic_mask(self, operation, operand, handle_mask, **kwds):
"""
Calculate the resulting mask
This is implemented as the piecewise ``or`` operation if both have a
mask.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_mask : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_mask``.
Returns
-------
result_mask : any type
If only one mask was present this mask is returned.
If neither had a mask ``None`` is returned. Otherwise
``handle_mask`` must create (and copy) the returned mask.
"""
# If only one mask is present we need not bother about any type checks
if self.mask is None and operand.mask is None:
return None
elif self.mask is None:
# Make a copy so there is no reference in the result.
return deepcopy(operand.mask)
elif operand.mask is None:
return deepcopy(self.mask)
else:
# Now lets calculate the resulting mask (operation enforces copy)
return handle_mask(self.mask, operand.mask, **kwds)
def _arithmetic_wcs(self, operation, operand, compare_wcs, **kwds):
"""
Calculate the resulting wcs.
There is actually no calculation involved but it is a good place to
compare wcs information of both operands. This is currently not working
properly with `~astropy.wcs.WCS` (which is the suggested class for
storing as wcs property) but it will not break it neither.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData` instance or subclass
The second operand wrapped in an instance of the same class as
self.
compare_wcs : callable
see :meth:`NDArithmeticMixin.add` parameter description.
kwds :
Additional parameters given to ``compare_wcs``.
Raises
------
ValueError
If ``compare_wcs`` returns ``False``.
Returns
-------
result_wcs : any type
The ``wcs`` of the first operand is returned.
"""
# ok, not really arithmetics but we need to check which wcs makes sense
# for the result and this is an ideal place to compare the two WCS,
# too.
# I'll assume that the comparison returned None or False in case they
# are not equal.
if not compare_wcs(self.wcs, operand.wcs, **kwds):
raise ValueError("WCS are not equal.")
return deepcopy(self.wcs)
def _arithmetic_meta(self, operation, operand, handle_meta, **kwds):
"""
Calculate the resulting meta.
Parameters
----------
operation : callable
see :meth:`NDArithmeticMixin._arithmetic` parameter description.
By default, the ``operation`` will be ignored.
operand : `NDData`-like instance
The second operand wrapped in an instance of the same class as
self.
handle_meta : callable
see :meth:`NDArithmeticMixin.add`
kwds :
Additional parameters given to ``handle_meta``.
Returns
-------
result_meta : any type
The result of ``handle_meta``.
"""
# Just return what handle_meta does with both of the metas.
return handle_meta(self.meta, operand.meta, **kwds)
@sharedmethod
@format_doc(_arit_doc, name="addition", op="+")
def add(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(np.add, operand, operand2, **kwargs)
@sharedmethod
@format_doc(_arit_doc, name="subtraction", op="-")
def subtract(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.subtract, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="multiplication", op="*")
def multiply(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.multiply, operand, operand2, **kwargs
)
@sharedmethod
@format_doc(_arit_doc, name="division", op="/")
def divide(self, operand, operand2=None, **kwargs):
return self._prepare_then_do_arithmetic(
np.true_divide, operand, operand2, **kwargs
)
@sharedmethod
def _prepare_then_do_arithmetic(
self_or_cls, operation, operand, operand2, **kwargs
):
"""Intermediate method called by public arithmetics (i.e. ``add``)
before the processing method (``_arithmetic``) is invoked.
.. warning::
Do not override this method in subclasses.
This method checks if it was called as instance or as class method and
then wraps the operands and the result from ``_arithmetics`` in the
appropriate subclass.
Parameters
----------
self_or_cls : instance or class
``sharedmethod`` behaves like a normal method if called on the
instance (then this parameter is ``self``) but like a classmethod
when called on the class (then this parameter is ``cls``).
operations : callable
The operation (normally a numpy-ufunc) that represents the
appropriate action.
operand, operand2, kwargs :
See for example ``add``.
Result
------
result : `~astropy.nddata.NDData`-like
Depending how this method was called either ``self_or_cls``
(called on class) or ``self_or_cls.__class__`` (called on instance)
is the NDData-subclass that is used as wrapper for the result.
"""
# DO NOT OVERRIDE THIS METHOD IN SUBCLASSES.
if isinstance(self_or_cls, NDArithmeticMixin):
# True means it was called on the instance, so self_or_cls is
# a reference to self
cls = self_or_cls.__class__
if operand2 is None:
# Only one operand was given. Set operand2 to operand and
# operand to self so that we call the appropriate method of the
# operand.
operand2 = operand
operand = self_or_cls
else:
# Convert the first operand to the class of this method.
# This is important so that always the correct _arithmetics is
# called later that method.
operand = cls(operand)
else:
# It was used as classmethod so self_or_cls represents the cls
cls = self_or_cls
# It was called on the class so we expect two operands!
if operand2 is None:
raise TypeError(
"operand2 must be given when the method isn't "
"called on an instance."
)
# Convert to this class. See above comment why.
operand = cls(operand)
# At this point operand, operand2, kwargs and cls are determined.
# Let's try to convert operand2 to the class of operand to allows for
# arithmetic operations with numbers, lists, numpy arrays, numpy masked
# arrays, astropy quantities, masked quantities and of other subclasses
# of NDData.
operand2 = cls(operand2)
# Now call the _arithmetics method to do the arithmetics.
result, init_kwds = operand._arithmetic(operation, operand2, **kwargs)
# Return a new class based on the result
return cls(result, **init_kwds)
|
9f0eb944a96d790a6832d7be99a08c23b8f9c7931fc74a876538dc5c4d84c44e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the I/O mixin to the NDData class.
from astropy.io import registry
__all__ = ["NDIOMixin"]
__doctest_skip__ = ["NDDataRead", "NDDataWrite"]
class NDDataRead(registry.UnifiedReadWrite):
"""Read and parse gridded N-dimensional data and return as an NDData-derived
object.
This function provides the NDDataBase interface to the astropy unified I/O
layer. This allows easily reading a file in the supported data formats,
for example::
>>> from astropy.nddata import CCDData
>>> dat = CCDData.read('image.fits')
Get help on the available readers for ``CCDData`` using the``help()`` method::
>>> CCDData.read.help() # Get help reading CCDData and list supported formats
>>> CCDData.read.help('fits') # Get detailed help on CCDData FITS reader
>>> CCDData.read.list_formats() # Print list of available formats
See also:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
format : str, optional
File format specifier.
cache : bool, optional
Caching behavior if file is a URL.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `NDData` subclass
NDData-basd object corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "read", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
return self.registry.read(self._cls, *args, **kwargs)
class NDDataWrite(registry.UnifiedReadWrite):
"""Write this CCDData object out in the specified format.
This function provides the NDData interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.nddata import CCDData
>>> dat = CCDData(np.zeros((12, 12)), unit='adu') # 12x12 image of zeros
>>> dat.write('zeros.fits')
Get help on the available writers for ``CCDData`` using the``help()`` method::
>>> CCDData.write.help() # Get help writing CCDData and list supported formats
>>> CCDData.write.help('fits') # Get detailed help on CCDData FITS writer
>>> CCDData.write.list_formats() # Print list of available formats
See also:
- https://docs.astropy.org/en/stable/nddata
- https://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str, optional
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, "write", registry=None)
# uses default global registry
def __call__(self, *args, **kwargs):
self.registry.write(self._instance, *args, **kwargs)
class NDIOMixin:
"""
Mixin class to connect NDData to the astropy input/output registry.
This mixin adds two methods to its subclasses, ``read`` and ``write``.
"""
read = registry.UnifiedReadWriteMethod(NDDataRead)
write = registry.UnifiedReadWriteMethod(NDDataWrite)
|
2de25808385032793ca8f27275531bc9e0cf9a94a055568d1fb5622f42e46b02 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import textwrap
import numpy as np
import pytest
from astropy import log
from astropy import units as u
from astropy.io import fits
from astropy.nddata import _testing as nd_testing
from astropy.nddata.ccddata import CCDData
from astropy.nddata.nduncertainty import (
InverseVariance,
MissingDataAssociationException,
StdDevUncertainty,
VarianceUncertainty,
)
from astropy.table import Table
from astropy.utils import NumpyRNGContext
from astropy.utils.data import (
get_pkg_data_contents,
get_pkg_data_filename,
get_pkg_data_filenames,
)
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS, FITSFixedWarning
DEFAULT_DATA_SIZE = 100
with NumpyRNGContext(123):
_random_array = np.random.normal(size=[DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE])
_random_psf = np.random.normal(size=(20, 20))
@pytest.fixture
def home_is_tmpdir(tmp_path, monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables will be temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path))
def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {"my_key": 42, "your_key": "not 42"}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd
def test_ccddata_empty():
with pytest.raises(TypeError):
CCDData() # empty initializer should fail
def test_ccddata_must_have_unit():
with pytest.raises(ValueError):
CCDData(np.zeros([2, 2]))
def test_ccddata_unit_cannot_be_set_to_none():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.unit = None
def test_ccddata_meta_header_conflict():
with pytest.raises(ValueError, match=".*can't have both header and meta.*"):
CCDData([1, 2, 3], unit="", meta={1: 1}, header={2: 2})
def test_ccddata_simple():
ccd_data = create_ccd_data()
assert ccd_data.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert ccd_data.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert ccd_data.dtype == np.dtype(float)
def test_ccddata_init_with_string_electron_unit():
ccd = CCDData(np.zeros([2, 2]), unit="electron")
assert ccd.unit is u.electron
def test_initialize_from_FITS(tmp_path):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdulist = fits.HDUList([hdu])
filename = str(tmp_path / "afile.fits")
hdulist.writeto(filename)
cd = CCDData.read(filename, unit=u.electron)
assert cd.shape == (DEFAULT_DATA_SIZE, DEFAULT_DATA_SIZE)
assert cd.size == DEFAULT_DATA_SIZE * DEFAULT_DATA_SIZE
assert np.issubdtype(cd.data.dtype, np.floating)
for k, v in hdu.header.items():
assert cd.meta[k] == v
def test_initialize_from_fits_with_unit_in_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["bunit"] = u.adu.to_string()
filename = str(tmp_path / "afile.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
# An explicit unit in the read overrides any unit in the FITS file
ccd2 = CCDData.read(filename, unit="photon")
assert ccd2.unit is u.photon
def test_initialize_from_fits_with_ADU_in_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["bunit"] = "ADU"
filename = str(tmp_path / "afile.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
# ccd should pick up the unit adu from the fits header...did it?
assert ccd.unit is u.adu
def test_initialize_from_fits_with_invalid_unit_in_header(tmp_path):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header["bunit"] = "definetely-not-a-unit"
filename = str(tmp_path / "afile.fits")
hdu.writeto(filename)
with pytest.raises(ValueError):
CCDData.read(filename)
def test_initialize_from_fits_with_technically_invalid_but_not_really(tmp_path):
hdu = fits.PrimaryHDU(np.ones((2, 2)))
hdu.header["bunit"] = "ELECTRONS/S"
filename = str(tmp_path / "afile.fits")
hdu.writeto(filename)
ccd = CCDData.read(filename)
assert ccd.unit == u.electron / u.s
def test_initialize_from_fits_with_data_in_different_extension(tmp_path):
fake_img = np.arange(4).reshape(2, 2)
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(fake_img)
hdus = fits.HDUList([hdu1, hdu2])
filename = str(tmp_path / "afile.fits")
hdus.writeto(filename)
ccd = CCDData.read(filename, unit="adu")
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img)
# check that the header is the combined header
assert hdu2.header + hdu1.header == ccd.header
def test_initialize_from_fits_with_extension(tmp_path):
fake_img1 = np.zeros([2, 2])
fake_img2 = np.arange(4).reshape(2, 2)
hdu0 = fits.PrimaryHDU()
hdu1 = fits.ImageHDU(fake_img1, name="first", ver=1)
hdu2 = fits.ImageHDU(fake_img2, name="second", ver=1)
hdus = fits.HDUList([hdu0, hdu1, hdu2])
filename = str(tmp_path / "afile.fits")
hdus.writeto(filename)
ccd = CCDData.read(filename, hdu=2, unit="adu")
# ccd should pick up the unit adu from the fits header...did it?
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu string parameter
ccd = CCDData.read(filename, hdu="second", unit="adu")
np.testing.assert_array_equal(ccd.data, fake_img2)
# check hdu tuple parameter
ccd = CCDData.read(filename, hdu=("second", 1), unit="adu")
np.testing.assert_array_equal(ccd.data, fake_img2)
def test_write_unit_to_hdu():
ccd_data = create_ccd_data()
ccd_unit = ccd_data.unit
hdulist = ccd_data.to_hdu()
assert "bunit" in hdulist[0].header
assert hdulist[0].header["bunit"] == ccd_unit.to_string()
def test_initialize_from_FITS_bad_keyword_raises_error(tmp_path):
# There are two fits.open keywords that are not permitted in ccdproc:
# do_not_scale_image_data and scale_back
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, do_not_scale_image_data=True)
with pytest.raises(TypeError):
CCDData.read(filename, unit=ccd_data.unit, scale_back=True)
def test_ccddata_writer(tmp_path):
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
def test_ccddata_writer_as_imagehdu(tmp_path):
ccd_data = create_ccd_data()
filename = str(tmp_path / "test.fits")
ccd_data.write(filename, as_image_hdu=False)
with fits.open(filename) as hdus:
assert len(hdus) == 1
filename = str(tmp_path / "test2.fits")
ccd_data.write(filename, as_image_hdu=True)
with fits.open(filename) as hdus:
assert len(hdus) == 2
assert isinstance(hdus[1], fits.ImageHDU)
def test_ccddata_meta_is_case_sensitive():
ccd_data = create_ccd_data()
key = "SoMeKEY"
ccd_data.meta[key] = 10
assert key.lower() not in ccd_data.meta
assert key.upper() not in ccd_data.meta
assert key in ccd_data.meta
def test_ccddata_meta_is_not_fits_header():
ccd_data = create_ccd_data()
ccd_data.meta = {"OBSERVER": "Edwin Hubble"}
assert not isinstance(ccd_data.meta, fits.Header)
def test_fromMEF(tmp_path):
ccd_data = create_ccd_data()
hdu = fits.PrimaryHDU(ccd_data)
hdu2 = fits.PrimaryHDU(2 * ccd_data.data)
hdulist = fits.HDUList(hdu)
hdulist.append(hdu2)
filename = str(tmp_path / "afile.fits")
hdulist.writeto(filename)
# by default, we reading from the first extension
cd = CCDData.read(filename, unit=u.electron)
np.testing.assert_array_equal(cd.data, ccd_data.data)
# but reading from the second should work too
cd = CCDData.read(filename, hdu=1, unit=u.electron)
np.testing.assert_array_equal(cd.data, 2 * ccd_data.data)
def test_metafromheader():
hdr = fits.header.Header()
hdr["observer"] = "Edwin Hubble"
hdr["exptime"] = "3600"
d1 = CCDData(np.ones((5, 5)), meta=hdr, unit=u.electron)
assert d1.meta["OBSERVER"] == "Edwin Hubble"
assert d1.header["OBSERVER"] == "Edwin Hubble"
def test_metafromdict():
dic = {"OBSERVER": "Edwin Hubble", "EXPTIME": 3600}
d1 = CCDData(np.ones((5, 5)), meta=dic, unit=u.electron)
assert d1.meta["OBSERVER"] == "Edwin Hubble"
def test_header2meta():
hdr = fits.header.Header()
hdr["observer"] = "Edwin Hubble"
hdr["exptime"] = "3600"
d1 = CCDData(np.ones((5, 5)), unit=u.electron)
d1.header = hdr
assert d1.meta["OBSERVER"] == "Edwin Hubble"
assert d1.header["OBSERVER"] == "Edwin Hubble"
def test_metafromstring_fail():
hdr = "this is not a valid header"
with pytest.raises(TypeError):
CCDData(np.ones((5, 5)), meta=hdr, unit=u.adu)
def test_setting_bad_uncertainty_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
# Uncertainty is supposed to be an instance of NDUncertainty
ccd_data.uncertainty = 10
def test_setting_uncertainty_with_array():
ccd_data = create_ccd_data()
ccd_data.uncertainty = None
fake_uncertainty = np.sqrt(np.abs(ccd_data.data))
ccd_data.uncertainty = fake_uncertainty.copy()
np.testing.assert_array_equal(ccd_data.uncertainty.array, fake_uncertainty)
def test_setting_uncertainty_wrong_shape_raises_error():
ccd_data = create_ccd_data()
with pytest.raises(ValueError):
ccd_data.uncertainty = np.zeros([3, 4])
def test_to_hdu():
ccd_data = create_ccd_data()
ccd_data.meta = {"observer": "Edwin Hubble"}
fits_hdulist = ccd_data.to_hdu()
assert isinstance(fits_hdulist, fits.HDUList)
for k, v in ccd_data.meta.items():
assert fits_hdulist[0].header[k] == v
np.testing.assert_array_equal(fits_hdulist[0].data, ccd_data.data)
def test_to_hdu_as_imagehdu():
ccd_data = create_ccd_data()
fits_hdulist = ccd_data.to_hdu(as_image_hdu=False)
assert isinstance(fits_hdulist[0], fits.PrimaryHDU)
fits_hdulist = ccd_data.to_hdu(as_image_hdu=True)
assert isinstance(fits_hdulist[0], fits.ImageHDU)
def test_copy():
ccd_data = create_ccd_data()
ccd_copy = ccd_data.copy()
np.testing.assert_array_equal(ccd_copy.data, ccd_data.data)
assert ccd_copy.unit == ccd_data.unit
assert ccd_copy.meta == ccd_data.meta
@pytest.mark.parametrize(
"operation,affects_uncertainty",
[
("multiply", True),
("divide", True),
],
)
@pytest.mark.parametrize(
"operand",
[
2.0,
2 * u.dimensionless_unscaled,
2 * u.photon / u.adu,
],
)
@pytest.mark.parametrize("with_uncertainty", [True, False])
def test_mult_div_overload(operand, with_uncertainty, operation, affects_uncertainty):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert result.uncertainty is None or isinstance(
result.uncertainty, StdDevUncertainty
)
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(
result.uncertainty.array,
np_method(ccd_data.uncertainty.array, op_value),
)
else:
np.testing.assert_array_equal(
result.uncertainty.array, ccd_data.uncertainty.array
)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
# Need the "1 *" below to force arguments to be Quantity to work around
# astropy/astropy#2377
expected_unit = np_method(1 * ccd_data.unit, 1 * operand.unit).unit
assert result.unit == expected_unit
else:
assert result.unit == ccd_data.unit
@pytest.mark.parametrize(
"operation,affects_uncertainty",
[
("add", False),
("subtract", False),
],
)
@pytest.mark.parametrize(
"operand,expect_failure",
[
(2.0, u.UnitsError), # fail--units don't match image
(2 * u.dimensionless_unscaled, u.UnitsError), # same
(2 * u.adu, False),
],
)
@pytest.mark.parametrize("with_uncertainty", [True, False])
def test_add_sub_overload(
operand, expect_failure, with_uncertainty, operation, affects_uncertainty
):
ccd_data = create_ccd_data()
if with_uncertainty:
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
method = getattr(ccd_data, operation)
np_method = getattr(np, operation)
if expect_failure:
with pytest.raises(expect_failure):
result = method(operand)
return
else:
result = method(operand)
assert result is not ccd_data
assert isinstance(result, CCDData)
assert result.uncertainty is None or isinstance(
result.uncertainty, StdDevUncertainty
)
try:
op_value = operand.value
except AttributeError:
op_value = operand
np.testing.assert_array_equal(result.data, np_method(ccd_data.data, op_value))
if with_uncertainty:
if affects_uncertainty:
np.testing.assert_array_equal(
result.uncertainty.array,
np_method(ccd_data.uncertainty.array, op_value),
)
else:
np.testing.assert_array_equal(
result.uncertainty.array, ccd_data.uncertainty.array
)
else:
assert result.uncertainty is None
if isinstance(operand, u.Quantity):
assert result.unit == ccd_data.unit and result.unit == operand.unit
else:
assert result.unit == ccd_data.unit
def test_arithmetic_overload_fails():
ccd_data = create_ccd_data()
with pytest.raises(TypeError):
ccd_data.multiply("five")
with pytest.raises(TypeError):
ccd_data.divide("five")
with pytest.raises(TypeError):
ccd_data.add("five")
with pytest.raises(TypeError):
ccd_data.subtract("five")
def test_arithmetic_no_wcs_compare():
ccd = CCDData(np.ones((10, 10)), unit="")
assert ccd.add(ccd, compare_wcs=None).wcs is None
assert ccd.subtract(ccd, compare_wcs=None).wcs is None
assert ccd.multiply(ccd, compare_wcs=None).wcs is None
assert ccd.divide(ccd, compare_wcs=None).wcs is None
def test_arithmetic_with_wcs_compare():
def return_true(_, __):
return True
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=2)
ccd1 = CCDData(np.ones((10, 10)), unit="", wcs=wcs1)
ccd2 = CCDData(np.ones((10, 10)), unit="", wcs=wcs2)
nd_testing.assert_wcs_seem_equal(ccd1.add(ccd2, compare_wcs=return_true).wcs, wcs1)
nd_testing.assert_wcs_seem_equal(
ccd1.subtract(ccd2, compare_wcs=return_true).wcs, wcs1
)
nd_testing.assert_wcs_seem_equal(
ccd1.multiply(ccd2, compare_wcs=return_true).wcs, wcs1
)
nd_testing.assert_wcs_seem_equal(
ccd1.divide(ccd2, compare_wcs=return_true).wcs, wcs1
)
def test_arithmetic_with_wcs_compare_fail():
def return_false(_, __):
return False
ccd1 = CCDData(np.ones((10, 10)), unit="", wcs=WCS())
ccd2 = CCDData(np.ones((10, 10)), unit="", wcs=WCS())
with pytest.raises(ValueError):
ccd1.add(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.subtract(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.multiply(ccd2, compare_wcs=return_false)
with pytest.raises(ValueError):
ccd1.divide(ccd2, compare_wcs=return_false)
def test_arithmetic_overload_ccddata_operand():
ccd_data = create_ccd_data()
ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
operand = ccd_data.copy()
result = ccd_data.add(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, 2 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.subtract(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, 0 * ccd_data.data)
np.testing.assert_array_almost_equal_nulp(
result.uncertainty.array, np.sqrt(2) * ccd_data.uncertainty.array
)
result = ccd_data.multiply(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, ccd_data.data**2)
expected_uncertainty = (
np.sqrt(2) * np.abs(ccd_data.data) * ccd_data.uncertainty.array
)
np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)
result = ccd_data.divide(operand)
assert len(result.meta) == 0
np.testing.assert_array_equal(result.data, np.ones_like(ccd_data.data))
expected_uncertainty = (
np.sqrt(2) / np.abs(ccd_data.data) * ccd_data.uncertainty.array
)
np.testing.assert_allclose(result.uncertainty.array, expected_uncertainty)
def test_arithmetic_overload_differing_units():
a = np.array([1, 2, 3]) * u.m
b = np.array([1, 2, 3]) * u.cm
ccddata = CCDData(a)
# TODO: Could also be parametrized.
res = ccddata.add(b)
np.testing.assert_array_almost_equal(res.data, np.add(a, b).value)
assert res.unit == np.add(a, b).unit
res = ccddata.subtract(b)
np.testing.assert_array_almost_equal(res.data, np.subtract(a, b).value)
assert res.unit == np.subtract(a, b).unit
res = ccddata.multiply(b)
np.testing.assert_array_almost_equal(res.data, np.multiply(a, b).value)
assert res.unit == np.multiply(a, b).unit
res = ccddata.divide(b)
np.testing.assert_array_almost_equal(res.data, np.divide(a, b).value)
assert res.unit == np.divide(a, b).unit
def test_arithmetic_add_with_array():
ccd = CCDData(np.ones((3, 3)), unit="")
res = ccd.add(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 2, 3]] * 3)
ccd = CCDData(np.ones((3, 3)), unit="adu")
with pytest.raises(ValueError):
ccd.add(np.arange(3))
def test_arithmetic_subtract_with_array():
ccd = CCDData(np.ones((3, 3)), unit="")
res = ccd.subtract(np.arange(3))
np.testing.assert_array_equal(res.data, [[1, 0, -1]] * 3)
ccd = CCDData(np.ones((3, 3)), unit="adu")
with pytest.raises(ValueError):
ccd.subtract(np.arange(3))
def test_arithmetic_multiply_with_array():
ccd = CCDData(np.ones((3, 3)) * 3, unit=u.m)
res = ccd.multiply(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[6, 6, 6]] * 3)
assert res.unit == ccd.unit
def test_arithmetic_divide_with_array():
ccd = CCDData(np.ones((3, 3)), unit=u.m)
res = ccd.divide(np.ones((3, 3)) * 2)
np.testing.assert_array_equal(res.data, [[0.5, 0.5, 0.5]] * 3)
assert res.unit == ccd.unit
def test_history_preserved_if_metadata_is_fits_header(tmp_path):
fake_img = np.zeros([2, 2])
hdu = fits.PrimaryHDU(fake_img)
hdu.header["history"] = "one"
hdu.header["history"] = "two"
hdu.header["history"] = "three"
assert len(hdu.header["history"]) == 3
tmp_file = str(tmp_path / "temp.fits")
hdu.writeto(tmp_file)
ccd_read = CCDData.read(tmp_file, unit="adu")
assert ccd_read.header["history"] == hdu.header["history"]
def test_infol_logged_if_unit_in_fits_header(tmp_path):
ccd_data = create_ccd_data()
tmpfile = str(tmp_path / "temp.fits")
ccd_data.write(tmpfile)
log.setLevel("INFO")
explicit_unit_name = "photon"
with log.log_to_list() as log_list:
_ = CCDData.read(tmpfile, unit=explicit_unit_name)
assert explicit_unit_name in log_list[0].message
def test_wcs_attribute(tmp_path):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = str(tmp_path / "temp.fits")
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ["", "COMMENT", "HISTORY"]:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header["CDELT1"] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header["CDELT2"] == ccd_new.wcs.wcs.cdelt[1]
def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename("data/sip-wcs.fits")
ccd = CCDData.read(data_file)
with pytest.warns(
AstropyWarning, match=r"Some non-standard WCS keywords were excluded"
):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename("../../io/fits/tests/data/o4sp040b0_raw.fits")
with pytest.warns(FITSFixedWarning, match=r"'unitfix' made the change"):
ccd = CCDData.read(data_file1, unit="count")
def test_wcs_SIP_coefficient_keywords_removed():
# If SIP polynomials are present, check that no more polynomial
# coefficients remain in the header. See #8598
# The SIP paper is ambiguous as to whether keywords like
# A_0_0 can appear in the header for a 2nd order or higher
# polynomial. The paper clearly says that the corrections
# are only for quadratic or higher order, so A_0_0 and the like
# should be zero if they are present, but they apparently can be
# there (or at least astrometry.net produces them).
# astropy WCS does not write those coefficients, so they were
# not being removed from the header even though they are WCS-related.
data_file = get_pkg_data_filename("data/sip-wcs.fits")
test_keys = ["A_0_0", "B_0_1"]
# Make sure the keywords added to this file for testing are there
with fits.open(data_file) as hdu:
for key in test_keys:
assert key in hdu[0].header
ccd = CCDData.read(data_file)
# Now the test...the two keywords above should have been removed.
for key in test_keys:
assert key not in ccd.header
@pytest.mark.filterwarnings("ignore")
def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import (
_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs,
_generate_wcs_and_update_header,
_PCs,
)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
wcs_headers = get_pkg_data_filenames("../../wcs/tests/data", pattern="*.hdr")
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if (
"invalid" in hdr
or "nonstandard" in hdr
or "segfault" in hdr
or "chandra-pixlist-wcs" in hdr
):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) & set(new_wcs_header) - keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v)
def test_read_wcs_not_creatable(tmp_path):
# The following Header can't be converted to a WCS object. See also #6499.
hdr_txt_example_WCS = textwrap.dedent(
"""
SIMPLE = T / Fits standard
BITPIX = 16 / Bits per pixel
NAXIS = 2 / Number of axes
NAXIS1 = 1104 / Axis length
NAXIS2 = 4241 / Axis length
CRVAL1 = 164.98110962 / Physical value of the reference pixel X
CRVAL2 = 44.34089279 / Physical value of the reference pixel Y
CRPIX1 = -34.0 / Reference pixel in X (pixel)
CRPIX2 = 2041.0 / Reference pixel in Y (pixel)
CDELT1 = 0.10380000 / X Scale projected on detector (#/pix)
CDELT2 = 0.10380000 / Y Scale projected on detector (#/pix)
CTYPE1 = 'RA---TAN' / Pixel coordinate system
CTYPE2 = 'WAVELENGTH' / Pixel coordinate system
CUNIT1 = 'degree ' / Units used in both CRVAL1 and CDELT1
CUNIT2 = 'nm ' / Units used in both CRVAL2 and CDELT2
CD1_1 = 0.20760000 / Pixel Coordinate translation matrix
CD1_2 = 0.00000000 / Pixel Coordinate translation matrix
CD2_1 = 0.00000000 / Pixel Coordinate translation matrix
CD2_2 = 0.10380000 / Pixel Coordinate translation matrix
C2YPE1 = 'RA---TAN' / Pixel coordinate system
C2YPE2 = 'DEC--TAN' / Pixel coordinate system
C2NIT1 = 'degree ' / Units used in both C2VAL1 and C2ELT1
C2NIT2 = 'degree ' / Units used in both C2VAL2 and C2ELT2
RADECSYS= 'FK5 ' / The equatorial coordinate system
"""
)
hdr = fits.Header.fromstring(hdr_txt_example_WCS, sep="\n")
hdul = fits.HDUList([fits.PrimaryHDU(np.ones((4241, 1104)), header=hdr)])
filename = str(tmp_path / "afile.fits")
hdul.writeto(filename)
# The hdr cannot be converted to a WCS object because of an
# InconsistentAxisTypesError but it should still open the file
ccd = CCDData.read(filename, unit="adu")
assert ccd.wcs is None
def test_header():
ccd_data = create_ccd_data()
a = {"Observer": "Hubble"}
ccd = CCDData(ccd_data, header=a)
assert ccd.meta == a
def test_wcs_arithmetic():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
result = ccd_data.multiply(1.0)
nd_testing.assert_wcs_seem_equal(result.wcs, wcs)
@pytest.mark.parametrize("operation", ["multiply", "divide", "add", "subtract"])
def test_wcs_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.wcs = WCS(naxis=2)
method = getattr(ccd_data, operation)
result = method(ccd_data2)
nd_testing.assert_wcs_seem_equal(result.wcs, ccd_data.wcs)
assert ccd_data2.wcs is None
def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename("data/sip-wcs.fits")
def check_wcs_ctypes(header):
expected_wcs_ctypes = {"CTYPE1": "RA---TAN-SIP", "CTYPE2": "DEC--TAN-SIP"}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header["CTYPE1"] == "RA---TAN"
assert ccd_no_relax[0].header["CTYPE2"] == "DEC--TAN"
@pytest.mark.parametrize("operation", ["multiply", "divide", "add", "subtract"])
def test_mask_arithmetic_ccd(operation):
ccd_data = create_ccd_data()
ccd_data2 = ccd_data.copy()
ccd_data.mask = ccd_data.data > 0
method = getattr(ccd_data, operation)
result = method(ccd_data2)
np.testing.assert_equal(result.mask, ccd_data.mask)
def test_write_read_multiextensionfits_mask_default(tmp_path):
# Test that if a mask is present the mask is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
filename = str(tmp_path / "afile.fits")
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
@pytest.mark.parametrize(
"uncertainty_type", [StdDevUncertainty, VarianceUncertainty, InverseVariance]
)
def test_write_read_multiextensionfits_uncertainty_default(tmp_path, uncertainty_type):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = str(tmp_path / "afile.fits")
ccd_data.write(filename)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
@pytest.mark.parametrize(
"uncertainty_type", [StdDevUncertainty, VarianceUncertainty, InverseVariance]
)
def test_write_read_multiextensionfits_uncertainty_different_uncertainty_key(
tmp_path, uncertainty_type
):
# Test that if a uncertainty is present it is saved and loaded by default.
ccd_data = create_ccd_data()
ccd_data.uncertainty = uncertainty_type(ccd_data.data * 10)
filename = str(tmp_path / "afile.fits")
ccd_data.write(filename, key_uncertainty_type="Blah")
ccd_after = CCDData.read(filename, key_uncertainty_type="Blah")
assert ccd_after.uncertainty is not None
assert type(ccd_after.uncertainty) is uncertainty_type
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
def test_write_read_multiextensionfits_not(tmp_path):
# Test that writing mask and uncertainty can be disabled
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = str(tmp_path / "afile.fits")
ccd_data.write(filename, hdu_mask=None, hdu_uncertainty=None)
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
def test_write_read_multiextensionfits_custom_ext_names(tmp_path):
# Test writing mask, uncertainty in another extension than default
ccd_data = create_ccd_data()
ccd_data.mask = ccd_data.data > 10
ccd_data.uncertainty = StdDevUncertainty(ccd_data.data * 10)
filename = str(tmp_path / "afile.fits")
ccd_data.write(filename, hdu_mask="Fun", hdu_uncertainty="NoFun")
# Try reading with defaults extension names
ccd_after = CCDData.read(filename)
assert ccd_after.uncertainty is None
assert ccd_after.mask is None
# Try reading with custom extension names
ccd_after = CCDData.read(filename, hdu_mask="Fun", hdu_uncertainty="NoFun")
assert ccd_after.uncertainty is not None
assert ccd_after.mask is not None
np.testing.assert_array_equal(ccd_data.mask, ccd_after.mask)
np.testing.assert_array_equal(
ccd_data.uncertainty.array, ccd_after.uncertainty.array
)
def test_read_old_style_multiextensionfits(tmp_path):
# Regression test for https://github.com/astropy/ccdproc/issues/664
#
# Prior to astropy 3.1 there was no uncertainty type saved
# in the multiextension fits files generated by CCDData
# because the uncertainty had to be StandardDevUncertainty.
#
# Current version should be able to read those in.
#
size = 4
# Value of the variables below are not important to the test.
data = np.zeros([size, size])
mask = data > 0.9
uncert = np.sqrt(data)
ccd = CCDData(data=data, mask=mask, uncertainty=uncert, unit="adu")
# We'll create the file manually to ensure we have the
# right extension names and no uncertainty type.
hdulist = ccd.to_hdu()
del hdulist[2].header["UTYPE"]
file_name = str(tmp_path / "old_ccddata_mef.fits")
hdulist.writeto(file_name)
ccd = CCDData.read(file_name)
assert isinstance(ccd.uncertainty, StdDevUncertainty)
def test_wcs():
ccd_data = create_ccd_data()
wcs = WCS(naxis=2)
ccd_data.wcs = wcs
assert ccd_data.wcs is wcs
def test_recognized_fits_formats_for_read_write(tmp_path):
# These are the extensions that are supposed to be supported.
ccd_data = create_ccd_data()
supported_extensions = ["fit", "fits", "fts"]
for ext in supported_extensions:
path = str(tmp_path / f"test.{ext}")
ccd_data.write(path)
from_disk = CCDData.read(path)
assert (ccd_data.data == from_disk.data).all()
def test_stddevuncertainty_compat_descriptor_no_parent():
with pytest.raises(MissingDataAssociationException):
StdDevUncertainty(np.ones((10, 10))).parent_nddata
def test_stddevuncertainty_compat_descriptor_no_weakref():
# TODO: Remove this test if astropy 1.0 isn't supported anymore
# This test might create a Memoryleak on purpose, so the last lines after
# the assert are IMPORTANT cleanup.
ccd = CCDData(np.ones((10, 10)), unit="")
uncert = StdDevUncertainty(np.ones((10, 10)))
uncert._parent_nddata = ccd
assert uncert.parent_nddata is ccd
uncert._parent_nddata = None
# https://github.com/astropy/astropy/issues/7595
def test_read_returns_image(tmp_path):
# Test if CCData.read returns a image when reading a fits file containing
# a table and image, in that order.
tbl = Table(np.ones(10).reshape(5, 2))
img = np.ones((5, 5))
hdul = fits.HDUList(
hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()), fits.ImageHDU(img)]
)
filename = str(tmp_path / "table_image.fits")
hdul.writeto(filename)
ccd = CCDData.read(filename, unit="adu")
# Expecting to get (5, 5), the size of the image
assert ccd.data.shape == (5, 5)
# https://github.com/astropy/astropy/issues/9664
def test_sliced_ccdata_to_hdu():
wcs = WCS(naxis=2)
wcs.wcs.crpix = 10, 10
ccd = CCDData(np.ones((10, 10)), wcs=wcs, unit="pixel")
trimmed = ccd[2:-2, 2:-2]
hdul = trimmed.to_hdu()
assert isinstance(hdul, fits.HDUList)
assert hdul[0].header["CRPIX1"] == 8
assert hdul[0].header["CRPIX2"] == 8
def test_read_write_tilde_paths(home_is_tmpdir):
# Test for reading and writing to tilde-prefixed paths without errors
ccd_data = create_ccd_data()
filename = os.path.join("~", "test.fits")
ccd_data.write(filename)
ccd_disk = CCDData.read(filename, unit=ccd_data.unit)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
# Ensure the unexpanded path doesn't exist (e.g. no directory whose name is
# a literal ~ was created)
assert not os.path.exists(filename)
def test_ccddata_with_psf():
psf = _random_psf.copy()
ccd = CCDData(_random_array.copy(), unit=u.adu, psf=psf)
assert (ccd.psf == psf).all()
# cannot pass in non-ndarray
with pytest.raises(TypeError, match="The psf must be a numpy array."):
CCDData(_random_array.copy(), unit=u.adu, psf="something")
def test_psf_setter():
psf = _random_psf.copy()
ccd = CCDData(_random_array.copy(), unit=u.adu)
ccd.psf = psf
assert (ccd.psf == psf).all()
# cannot set with non-ndarray
with pytest.raises(TypeError, match="The psf must be a numpy array."):
ccd.psf = 5
def test_write_read_psf(tmp_path):
"""Test that we can round-trip a CCDData with an attached PSF image."""
ccd_data = create_ccd_data()
ccd_data.psf = _random_psf
filename = tmp_path / "test_write_read_psf.fits"
ccd_data.write(filename)
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf)
# Try a different name for the PSF HDU.
filename = tmp_path / "test_write_read_psf_hdu.fits"
ccd_data.write(filename, hdu_psf="PSFOTHER")
# psf will be None if we don't supply the new HDU name to the reader.
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
assert ccd_disk.psf is None
# psf will round-trip if we do supply the new HDU name.
ccd_disk = CCDData.read(filename, hdu_psf="PSFOTHER")
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf)
|
cadce47cdba7dfa234a8bb71b28834b6edc15065deedd289d80c20a0828a7a5e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Tests of NDDataBase
from astropy.nddata.nddata_base import NDDataBase
class MinimalSubclass(NDDataBase):
def __init__(self):
super().__init__()
@property
def data(self):
return None
@property
def mask(self):
return super().mask
@property
def unit(self):
return super().unit
@property
def wcs(self):
return super().wcs
@property
def meta(self):
return super().meta
@property
def uncertainty(self):
return super().uncertainty
@property
def psf(self):
return super().psf
class MinimalSubclassNoPSF(NDDataBase):
def __init__(self):
super().__init__()
@property
def data(self):
return None
@property
def mask(self):
return super().mask
@property
def unit(self):
return super().unit
@property
def wcs(self):
return super().wcs
@property
def meta(self):
return super().meta
@property
def uncertainty(self):
return super().uncertainty
def test_nddata_base_subclass():
a = MinimalSubclass()
assert a.meta is None
assert a.data is None
assert a.mask is None
assert a.unit is None
assert a.wcs is None
assert a.uncertainty is None
assert a.psf is None
def test_omitting_psf_is_ok():
# Make sure that psf does not need to be overridden when creating a subclass
b = MinimalSubclassNoPSF()
assert b.psf is None
|
8347990c99f5173ac82449dfa58ee788bf84bed45d2c1fcfe5d64d04194b2b39 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from packaging.version import Version
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.nddata import (
CCDData,
Cutout2D,
NoOverlapError,
PartialOverlapError,
add_array,
extract_array,
overlap_slices,
subpixel_indices,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import WCS, Sip
from astropy.wcs.utils import proj_plane_pixel_area
test_positions = [
(10.52, 3.12),
(5.62, 12.97),
(31.33, 31.77),
(0.46, 0.94),
(20.45, 12.12),
(42.24, 24.42),
]
test_position_indices = [(0, 3), (0, 2), (4, 1), (4, 2), (4, 3), (3, 4)]
test_slices = [
slice(10.52, 3.12),
slice(5.62, 12.97),
slice(31.33, 31.77),
slice(0.46, 0.94),
slice(20.45, 12.12),
slice(42.24, 24.42),
]
subsampling = 5
test_pos_bad = [(-1, -4), (-2, 0), (6, 2), (6, 6)]
test_nonfinite_positions = [
(np.nan, np.nan),
(np.inf, np.inf),
(1, np.nan),
(np.nan, 2),
(2, -np.inf),
(-np.inf, 3),
]
def test_slices_different_dim():
"""Overlap from arrays with different number of dim is undefined."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5, 6), (1, 2), (0, 0))
def test_slices_pos_different_dim():
"""Position must have same dim as arrays."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5), (1, 2), (0, 0, 3))
@pytest.mark.parametrize("pos", test_pos_bad)
def test_slices_no_overlap(pos):
"""If there is no overlap between arrays, an error should be raised."""
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos)
def test_slices_partial_overlap():
"""Compute a slice for partially overlapping arrays."""
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode="partial")
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(
PartialOverlapError, match=".*Arrays overlap only partially.*"
):
temp = overlap_slices((5,), (3,), (pos,), mode="strict")
def test_slices_edges():
"""
Test overlap_slices when extracting along edges.
"""
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (1, 1), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 0
assert slc_lg[0].stop == slc_lg[1].stop == 3
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (8, 8), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 7
assert slc_lg[0].stop == slc_lg[1].stop == 10
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
# test (0, 0) shape
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (0, 0))
assert slc_lg[0].start == slc_lg[0].stop == 0
assert slc_lg[1].start == slc_lg[1].stop == 0
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (5, 5))
assert slc_lg[0].start == slc_lg[0].stop == 5
assert slc_lg[1].start == slc_lg[1].stop == 5
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
def test_slices_overlap_wrong_mode():
"""Call overlap_slices with non-existing mode."""
with pytest.raises(ValueError, match="^Mode can be only.*"):
overlap_slices((5,), (3,), (0,), mode="full")
@pytest.mark.parametrize("position", test_nonfinite_positions)
def test_slices_nonfinite_position(position):
"""
A ValueError should be raised if position contains a non-finite
value.
"""
with pytest.raises(ValueError):
overlap_slices((7, 7), (3, 3), position)
def test_extract_array_even_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (2,)
positions_expected = [
(1.49, (1, 2)),
(1.5, (1, 2)),
(1.501, (1, 2)),
(1.99, (1, 2)),
(2.0, (1, 2)),
(2.01, (2, 3)),
(2.49, (2, 3)),
(2.5, (2, 3)),
(2.501, (2, 3)),
(2.99, (2, 3)),
(3.0, (2, 3)),
(3.01, (3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, 0)
exp2 = (0, 1)
expected = [exp1,] * 6 + [
exp2,
]
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_odd_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (3,)
positions_expected = [
(1.49, (0, 1, 2)),
(1.5, (0, 1, 2)),
(1.501, (1, 2, 3)),
(1.99, (1, 2, 3)),
(2.0, (1, 2, 3)),
(2.01, (1, 2, 3)),
(2.49, (1, 2, 3)),
(2.5, (1, 2, 3)),
(2.501, (2, 3, 4)),
(2.99, (2, 3, 4)),
(3.0, (2, 3, 4)),
(3.01, (2, 3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, -99, 0)
exp2 = (-99, 0, 1)
expected = [exp1,] * 3 + [
exp2,
] * 4
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp)
def test_extract_array_wrong_mode():
"""Call extract_array with non-existing mode."""
with pytest.raises(ValueError) as e:
extract_array(np.arange(4), (2,), (0,), mode="full")
assert "Valid modes are 'partial', 'trim', and 'strict'." == str(e.value)
def test_extract_array_1d_even():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(
extract_array(np.arange(4), (2,), (0,), fill_value=-99) == np.array([-99, 0])
)
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2,), (i,)) == np.array([i - 1, i]))
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), fill_value=np.inf)
== np.array([3, np.inf])
)
def test_extract_array_1d_odd():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
"""
assert np.all(
extract_array(np.arange(4), (3,), (-1,), fill_value=-99)
== np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), (3,), (0,), fill_value=-99) == np.array([-99, 0, 1])
)
for i in [1, 2]:
assert np.all(
extract_array(np.arange(4), (3,), (i,)) == np.array([i - 1, i, i + 1])
)
assert np.all(
extract_array(np.arange(4), (3,), (3,), fill_value=-99) == np.array([2, 3, -99])
)
arrayin = np.arange(4.0)
extracted = extract_array(arrayin, (3,), (4,))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype
def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(
extract_array(np.arange(4), 3, (-1,), fill_value=-99) == np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0])
)
def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4]))
def test_extract_array_1d_trim():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(extract_array(np.arange(4), (2,), (0,), mode="trim") == np.array([0]))
for i in [1, 2, 3]:
assert np.all(
extract_array(np.arange(4), (2,), (i,), mode="trim") == np.array([i - 1, i])
)
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), mode="trim") == np.array([3])
)
@pytest.mark.parametrize("mode", ["partial", "trim", "strict"])
def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)
assert np.all(extracted_array == small_test_array)
def test_extract_array_return_pos():
"""Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
"""
large_test_array = np.arange(5, dtype=float)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(
large_test_array, 3, i, mode="partial", return_position=True
)
assert new_pos == (1,)
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [0.49, 0.51, 1]):
extracted, new_pos = extract_array(
large_test_array, (2,), (i,), mode="strict", return_position=True
)
assert new_pos == (expected,)
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(
large_test_array, (3,), (i,), mode="trim", return_position=True
)
assert new_pos == (expected,)
def test_extract_array_nan_fillvalue():
if Version(np.__version__) >= Version("1.20"):
msg = "fill_value cannot be set to np.nan if the input array has"
with pytest.raises(ValueError, match=msg):
extract_array(
np.ones((10, 10), dtype=int), (5, 5), (1, 1), fill_value=np.nan
)
def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref)
def test_add_array_equal_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((11, 11))
large_test_array_ref = large_test_array.copy()
large_test_array_ref += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref)
@pytest.mark.parametrize(
("position", "subpixel_index"), zip(test_positions, test_position_indices)
)
def test_subpixel_indices(position, subpixel_index):
"""
Test subpixel_indices utility function.
Test by asserting that the function returns correct results for
given test values.
"""
assert np.all(subpixel_indices(position, subsampling) == subpixel_index)
class TestCutout2D:
def setup_class(self):
self.data = np.arange(20.0).reshape(5, 4)
self.position = SkyCoord("13h11m29.96s -01d19m18.7s", frame="icrs")
wcs = WCS(naxis=2)
rho = np.pi / 3.0
scale = 0.05 / 3600.0
wcs.wcs.cd = [
[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)],
]
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
wcs.wcs.crval = [
self.position.ra.to_value(u.deg),
self.position.dec.to_value(u.deg),
]
wcs.wcs.crpix = [3, 3]
self.wcs = wcs
# add SIP
sipwcs = wcs.deepcopy()
sipwcs.wcs.ctype = ["RA---TAN-SIP", "DEC--TAN-SIP"]
a = np.array(
[
[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0],
]
)
b = np.array(
[
[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0],
]
)
sipwcs.sip = Sip(a, b, None, None, wcs.wcs.crpix)
sipwcs.wcs.set()
self.sipwcs = sipwcs
def test_cutout(self):
sizes = [
3,
3 * u.pixel,
(3, 3),
(3 * u.pixel, 3 * u.pix),
(3.0, 3 * u.pixel),
(2.9, 3.3),
]
for size in sizes:
position = (2.1, 1.9)
c = Cutout2D(self.data, position, size)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 10
assert c.origin_original == (1, 1)
assert c.origin_cutout == (0, 0)
assert c.input_position_original == position
assert_allclose(c.input_position_cutout, (1.1, 0.9))
assert c.position_original == (2.0, 2.0)
assert c.position_cutout == (1.0, 1.0)
assert c.center_original == (2.0, 2.0)
assert c.center_cutout == (1.0, 1.0)
assert c.bbox_original == ((1, 3), (1, 3))
assert c.bbox_cutout == ((0, 2), (0, 2))
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_length(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (1, 1, 1))
def test_size_units(self):
for size in [3 * u.cm, (3, 3 * u.K)]:
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), size)
def test_size_pixel(self):
"""
Check size in derived pixel units.
"""
size = 0.3 * u.arcsec / (0.1 * u.arcsec / u.pixel)
c = Cutout2D(self.data, (2, 2), size)
assert c.data.shape == (3, 3)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 4), slice(1, 4))
assert c.slices_cutout == (slice(0, 3), slice(0, 3))
def test_size_angle(self):
c = Cutout2D(self.data, (2, 2), (0.1 * u.arcsec), wcs=self.wcs)
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 5
assert c.slices_original == (slice(1, 3), slice(1, 3))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_size_angle_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, (2, 2), (3, 3 * u.arcsec))
def test_cutout_trim_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="trim")
assert c.data.shape == (2, 2)
assert c.data[0, 0] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(0, 2), slice(0, 2))
def test_cutout_partial_overlap(self):
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial")
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.slices_original == (slice(0, 2), slice(0, 2))
assert c.slices_cutout == (slice(1, 3), slice(1, 3))
def test_cutout_partial_overlap_fill_value(self):
fill_value = -99
c = Cutout2D(self.data, (0, 0), (3, 3), mode="partial", fill_value=fill_value)
assert c.data.shape == (3, 3)
assert c.data[1, 1] == 0
assert c.data[0, 0] == fill_value
def test_copy(self):
data = np.copy(self.data)
c = Cutout2D(data, (2, 3), (3, 3))
xy = (0, 0)
value = 100.0
c.data[xy] = value
xy_orig = c.to_original_position(xy)
yx = xy_orig[::-1]
assert data[yx] == value
data = np.copy(self.data)
c2 = Cutout2D(self.data, (2, 3), (3, 3), copy=True)
c2.data[xy] = value
assert data[yx] != value
def test_to_from_large(self):
position = (2, 2)
c = Cutout2D(self.data, position, (3, 3))
xy = (0, 0)
result = c.to_cutout_position(c.to_original_position(xy))
assert_allclose(result, xy)
def test_skycoord_without_wcs(self):
with pytest.raises(ValueError):
Cutout2D(self.data, self.position, (3, 3))
def test_skycoord(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs)
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_skycoord_partial(self):
c = Cutout2D(self.data, self.position, (3, 3), wcs=self.wcs, mode="partial")
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
def test_naxis_update(self):
xsize = 2
ysize = 3
c = Cutout2D(self.data, self.position, (ysize, xsize), wcs=self.wcs)
assert c.wcs.array_shape == (ysize, xsize)
def test_crpix_maps_to_crval(self):
w = Cutout2D(self.data, (0, 0), (3, 3), wcs=self.sipwcs, mode="partial").wcs
pscale = np.sqrt(proj_plane_pixel_area(w))
assert_allclose(
w.wcs_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
assert_allclose(
w.all_pix2world(*w.wcs.crpix, 1), w.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_cutout_with_nddata_as_input(self):
# This is essentially a copy/paste of test_skycoord with the
# input a ccd with wcs attribute instead of passing the
# wcs separately.
ccd = CCDData(data=self.data, wcs=self.wcs, unit="adu")
c = Cutout2D(ccd, self.position, (3, 3))
skycoord_original = self.position.from_pixel(
c.center_original[1], c.center_original[0], self.wcs
)
skycoord_cutout = self.position.from_pixel(
c.center_cutout[1], c.center_cutout[0], c.wcs
)
assert_quantity_allclose(skycoord_original.ra, skycoord_cutout.ra)
assert_quantity_allclose(skycoord_original.dec, skycoord_cutout.dec)
|
762cfaa17755c43507e5e2b3cc15a53813c2a54b5fcd5a1f4c2689dbae59bcdc | """
A module containing unit tests for the `bitmask` module.
Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
import warnings
import numpy as np
import pytest
from astropy.nddata import bitmask
MAX_INT_TYPE = np.maximum_sctype(np.int_)
MAX_UINT_TYPE = np.maximum_sctype(np.uint)
MAX_UINT_FLAG = np.left_shift(
MAX_UINT_TYPE(1), MAX_UINT_TYPE(np.iinfo(MAX_UINT_TYPE).bits - 1)
)
MAX_INT_FLAG = np.left_shift(
MAX_INT_TYPE(1), MAX_INT_TYPE(np.iinfo(MAX_INT_TYPE).bits - 2)
)
SUPER_LARGE_FLAG = 1 << np.iinfo(MAX_UINT_TYPE).bits
EXTREME_TEST_DATA = np.array(
[
0,
1,
1 + 1 << 2,
MAX_INT_FLAG,
~0,
MAX_INT_TYPE(MAX_UINT_FLAG),
1 + MAX_INT_TYPE(MAX_UINT_FLAG),
],
dtype=MAX_INT_TYPE,
)
@pytest.mark.parametrize("flag", [0, -1])
def test_nonpositive_not_a_bit_flag(flag):
assert not bitmask._is_bit_flag(n=flag)
@pytest.mark.parametrize(
"flag", [1, MAX_UINT_FLAG, int(MAX_UINT_FLAG), SUPER_LARGE_FLAG]
)
def test_is_bit_flag(flag):
assert bitmask._is_bit_flag(n=flag)
@pytest.mark.parametrize("number", [0, 1, MAX_UINT_FLAG, SUPER_LARGE_FLAG])
def test_is_int(number):
assert bitmask._is_int(number)
@pytest.mark.parametrize("number", ["1", True, 1.0])
def test_nonint_is_not_an_int(number):
assert not bitmask._is_int(number)
@pytest.mark.parametrize(
"flag,flip,expected",
[
(3, None, 3),
(3, True, -4),
(3, False, 3),
([1, 2], False, 3),
([1, 2], True, -4),
],
)
def test_interpret_valid_int_bit_flags(flag, flip, expected):
assert bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip) == expected
@pytest.mark.parametrize("flag", [None, " ", "None", "Indef"])
def test_interpret_none_bit_flags_as_None(flag):
assert bitmask.interpret_bit_flags(bit_flags=flag) is None
@pytest.mark.parametrize(
"flag,expected",
[
("1", 1),
("~-1", ~(-1)),
("~1", ~1),
("1,2", 3),
("1|2", 3),
("1+2", 3),
("(1,2)", 3),
("(1+2)", 3),
("~1,2", ~3),
("~1+2", ~3),
("~(1,2)", ~3),
("~(1+2)", ~3),
],
)
def test_interpret_valid_str_bit_flags(flag, expected):
assert bitmask.interpret_bit_flags(bit_flags=flag) == expected
@pytest.mark.parametrize(
"flag,expected",
[
("CR", 1),
("~CR", ~1),
("CR|HOT", 3),
("CR,HOT", 3),
("CR+HOT", 3),
(["CR", "HOT"], 3),
("(CR,HOT)", 3),
("(HOT+CR)", 3),
("~HOT,CR", ~3),
("~CR+HOT", ~3),
("~(HOT,CR)", ~3),
("~(HOT|CR)", ~3),
("~(CR+HOT)", ~3),
],
)
def test_interpret_valid_mnemonic_bit_flags(flag, expected):
flagmap = bitmask.extend_bit_flag_map("DetectorMap", CR=1, HOT=2)
assert (
bitmask.interpret_bit_flags(bit_flags=flag, flag_name_map=flagmap) == expected
)
@pytest.mark.parametrize(
"flag,flip",
[
(None, True),
(" ", True),
("None", True),
("Indef", True),
(None, False),
(" ", False),
("None", False),
("Indef", False),
("1", True),
("1", False),
],
)
def test_interpret_None_or_str_and_flip_incompatibility(flag, flip):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag, flip_bits=flip)
@pytest.mark.parametrize("flag", [True, 1.0, [1.0], object])
def test_interpret_wrong_flag_type(flag):
with pytest.raises(TypeError):
bitmask.interpret_bit_flags(bit_flags=flag)
@pytest.mark.parametrize("flag", ["SOMETHING", "1.0,2,3"])
def test_interpret_wrong_string_int_format(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_duplicate_flag_warning():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert bitmask.interpret_bit_flags([2, 4, 4]) == 6
assert len(w)
assert issubclass(w[-1].category, UserWarning)
assert "Duplicate" in str(w[-1].message)
@pytest.mark.parametrize("flag", [[1, 2, 3], "1, 2, 3"])
def test_interpret_non_flag(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_interpret_allow_single_value_str_nonflags():
assert bitmask.interpret_bit_flags(bit_flags=str(3)) == 3
@pytest.mark.parametrize(
"flag",
["~", "( )", "(~1,2)", "~(1,2", "1,~2", "1,(2,4)", "1,2+4", "1+4,2", "1|4+2"],
)
def test_interpret_bad_str_syntax(flag):
with pytest.raises(ValueError):
bitmask.interpret_bit_flags(bit_flags=flag)
def test_bitfield_must_be_integer_check():
with pytest.raises(TypeError):
bitmask.bitfield_to_boolean_mask(1.0, 1)
@pytest.mark.parametrize(
"data,flags,flip,goodval,dtype,ref",
[
(EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]),
(EXTREME_TEST_DATA, None, None, False, np.bool_, EXTREME_TEST_DATA.size * [0]),
(
EXTREME_TEST_DATA,
[1, MAX_UINT_FLAG],
False,
True,
np.bool_,
[1, 1, 0, 0, 0, 1, 1],
),
(EXTREME_TEST_DATA, None, None, True, np.bool_, EXTREME_TEST_DATA.size * [1]),
(
EXTREME_TEST_DATA,
[1, MAX_UINT_FLAG],
False,
False,
np.bool_,
[0, 0, 1, 1, 1, 0, 0],
),
(
EXTREME_TEST_DATA,
[1, MAX_UINT_FLAG],
True,
True,
np.int8,
[1, 0, 1, 1, 0, 0, 0],
),
],
)
def test_bitfield_to_boolean_mask(data, flags, flip, goodval, dtype, ref):
mask = bitmask.bitfield_to_boolean_mask(
bitfield=data,
ignore_flags=flags,
flip_bits=flip,
good_mask_value=goodval,
dtype=dtype,
)
assert mask.dtype == dtype
assert np.all(mask == ref)
@pytest.mark.parametrize("flag", [(4, "flag1"), 8])
def test_bitflag(flag):
f = bitmask.BitFlag(flag)
if isinstance(flag, tuple):
assert f == flag[0]
assert f.__doc__ == flag[1]
f = bitmask.BitFlag(*flag)
assert f == flag[0]
assert f.__doc__ == flag[1]
else:
assert f == flag
def test_bitflag_docs2():
with pytest.raises(ValueError):
bitmask.BitFlag((1, "docs1"), "docs2")
@pytest.mark.parametrize("flag", [0, 3])
def test_bitflag_not_pow2(flag):
with pytest.raises(bitmask.InvalidBitFlag):
bitmask.BitFlag(flag, "custom flag")
@pytest.mark.parametrize("flag", [0.0, True, "1"])
def test_bitflag_not_int_flag(flag):
with pytest.raises(bitmask.InvalidBitFlag):
bitmask.BitFlag((flag, "custom flag"))
@pytest.mark.parametrize("caching", [True, False])
def test_basic_map(monkeypatch, caching):
monkeypatch.setattr(bitmask, "_ENABLE_BITFLAG_CACHING", False)
class ObservatoryDQMap(bitmask.BitFlagNameMap):
_not_a_flag = 1
CR = 1, "cosmic ray"
HOT = 2
DEAD = 4
class DetectorMap(ObservatoryDQMap):
__version__ = "1.0"
_not_a_flag = 181
READOUT_ERR = 16
assert ObservatoryDQMap.cr == 1
assert ObservatoryDQMap.cr.__doc__ == "cosmic ray"
assert DetectorMap.READOUT_ERR == 16
@pytest.mark.parametrize("caching", [True, False])
def test_extend_map(monkeypatch, caching):
monkeypatch.setattr(bitmask, "_ENABLE_BITFLAG_CACHING", caching)
class ObservatoryDQMap(bitmask.BitFlagNameMap):
CR = 1
HOT = 2
DEAD = 4
DetectorMap = bitmask.extend_bit_flag_map(
"DetectorMap", ObservatoryDQMap, __version__="1.0", DEAD=4, READOUT_ERR=16
)
assert DetectorMap.CR == 1
assert DetectorMap.readout_err == 16
@pytest.mark.parametrize("caching", [True, False])
def test_extend_map_redefine_flag(monkeypatch, caching):
monkeypatch.setattr(bitmask, "_ENABLE_BITFLAG_CACHING", caching)
class ObservatoryDQMap(bitmask.BitFlagNameMap):
CR = 1
HOT = 2
DEAD = 4
with pytest.raises(AttributeError):
bitmask.extend_bit_flag_map(
"DetectorMap", ObservatoryDQMap, __version__="1.0", DEAD=32
)
with pytest.raises(AttributeError):
bitmask.extend_bit_flag_map(
"DetectorMap", ObservatoryDQMap, __version__="1.0", DEAD=32, dead=64
)
@pytest.mark.parametrize("caching", [True, False])
def test_map_redefine_flag(monkeypatch, caching):
monkeypatch.setattr(bitmask, "_ENABLE_BITFLAG_CACHING", caching)
class ObservatoryDQMap(bitmask.BitFlagNameMap):
_not_a_flag = 8
CR = 1
HOT = 2
DEAD = 4
with pytest.raises(AttributeError):
class DetectorMap1(ObservatoryDQMap):
__version__ = "1.0"
CR = 16
with pytest.raises(AttributeError):
class DetectorMap2(ObservatoryDQMap):
SHADE = 8
_FROZEN = 16
DetectorMap2.novel = 32
with pytest.raises(AttributeError):
bitmask.extend_bit_flag_map(
"DetectorMap", ObservatoryDQMap, READOUT_ERR=16, SHADE=32, readout_err=128
)
def test_map_cant_modify_version():
class ObservatoryDQMap(bitmask.BitFlagNameMap):
__version__ = "1.2.3"
CR = 1
assert ObservatoryDQMap.__version__ == "1.2.3"
assert ObservatoryDQMap.CR == 1
with pytest.raises(AttributeError):
ObservatoryDQMap.__version__ = "3.2.1"
@pytest.mark.parametrize("flag", [0, 3])
def test_map_not_bit_flag(flag):
with pytest.raises(ValueError):
bitmask.extend_bit_flag_map("DetectorMap", DEAD=flag)
with pytest.raises(ValueError):
class DetectorMap(bitmask.BitFlagNameMap):
DEAD = flag
@pytest.mark.parametrize("flag", [0.0, True, "1"])
def test_map_not_int_flag(flag):
with pytest.raises(bitmask.InvalidBitFlag):
bitmask.extend_bit_flag_map("DetectorMap", DEAD=flag)
with pytest.raises(bitmask.InvalidBitFlag):
class ObservatoryDQMap(bitmask.BitFlagNameMap):
CR = flag
def test_map_access_undefined_flag():
DetectorMap = bitmask.extend_bit_flag_map("DetectorMap", DEAD=1)
with pytest.raises(AttributeError):
DetectorMap.DEAD1
with pytest.raises(AttributeError):
DetectorMap["DEAD1"]
def test_map_delete_flag():
DetectorMap = bitmask.extend_bit_flag_map("DetectorMap", DEAD=1)
with pytest.raises(AttributeError):
del DetectorMap.DEAD1
with pytest.raises(AttributeError):
del DetectorMap["DEAD1"]
def test_map_repr():
DetectorMap = bitmask.extend_bit_flag_map("DetectorMap", DEAD=1)
assert repr(DetectorMap) == "<BitFlagNameMap 'DetectorMap'>"
def test_map_add_flags():
map1 = bitmask.extend_bit_flag_map("DetectorMap", CR=1)
map2 = map1 + {"HOT": 2, "DEAD": (4, "a really dead pixel")}
assert map2.CR == 1
assert map2.HOT == 2
assert map2.DEAD.__doc__ == "a really dead pixel"
assert map2.DEAD == 4
map2 = map1 + [("HOT", 2), ("DEAD", 4)]
assert map2.CR == 1
assert map2.HOT == 2
map2 = map1 + ("HOT", 2)
assert map2.CR == 1
assert map2.HOT == 2
|
cc61698a18fee16af2abd21211a8abf7add9769ea701040f6a4335b19c77d7b0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.nddata import block_reduce, block_replicate, reshape_as_blocks
class TestReshapeAsBlocks:
def test_1d(self):
data = np.arange(16)
reshaped = reshape_as_blocks(data, 2)
assert reshaped.shape == (8, 2)
reshaped = reshape_as_blocks(data, 4)
assert reshaped.shape == (4, 4)
reshaped = reshape_as_blocks(data, 8)
assert reshaped.shape == (2, 8)
def test_2d(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (2, 2, 2, 2)
data = np.arange(64).reshape(8, 8)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (4, 4, 2, 2)
reshaped = reshape_as_blocks(data, (4, 4))
assert reshaped.shape == (2, 2, 4, 4)
def test_3d(self):
data = np.arange(64).reshape(4, 4, 4)
reshaped = reshape_as_blocks(data, (2, 2, 2))
assert reshaped.shape == (2, 2, 2, 2, 2, 2)
data = np.arange(2 * 3 * 4).reshape(2, 3, 4)
reshaped = reshape_as_blocks(data, (2, 1, 2))
assert reshaped.shape == (1, 3, 2, 2, 1, 2)
def test_view(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
data[0, 0] = 100
assert reshaped[0, 0, 0, 0] == 100
def test_invalid_block_dim(self):
data = np.arange(64).reshape(4, 4, 4)
match = (
"block_size must be a scalar or have the same "
"length as the number of data dimensions"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 2))
def test_invalid_block_size(self):
data = np.arange(16).reshape(4, 4)
match = (
"Each dimension of block_size must divide evenly "
"into the corresponding dimension of data"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 3))
def test_invalid_block_value(self):
data = np.arange(16).reshape(4, 4)
match = "block_size elements must be integers"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2.1, 2))
match = "block_size elements must be strictly positive"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (-1, 0))
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size**2
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2))
|
40b8862d769434488841bad80c8071ed7691ac01845bb06999b006a96de77fa2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.nddata import FlagCollection
def test_init():
FlagCollection(shape=(1, 2, 3))
def test_init_noshape():
with pytest.raises(Exception) as exc:
FlagCollection()
assert (
exc.value.args[0]
== "FlagCollection should be initialized with the shape of the data"
)
def test_init_notiterable():
with pytest.raises(Exception) as exc:
FlagCollection(shape=1.0)
assert exc.value.args[0] == "FlagCollection shape should be an iterable object"
def test_setitem():
f = FlagCollection(shape=(1, 2, 3))
f["a"] = np.ones((1, 2, 3)).astype(float)
f["b"] = np.ones((1, 2, 3)).astype(int)
f["c"] = np.ones((1, 2, 3)).astype(bool)
f["d"] = np.ones((1, 2, 3)).astype(str)
@pytest.mark.parametrize("value", [1, 1.0, "spam", [1, 2, 3], (1.0, 2.0, 3.0)])
def test_setitem_invalid_type(value):
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(Exception) as exc:
f["a"] = value
assert exc.value.args[0] == "flags should be given as a Numpy array"
def test_setitem_invalid_shape():
f = FlagCollection(shape=(1, 2, 3))
with pytest.raises(ValueError) as exc:
f["a"] = np.ones((3, 2, 1))
assert exc.value.args[0].startswith("flags array shape")
assert exc.value.args[0].endswith("does not match data shape (1, 2, 3)")
|
498c17d1035becbb1addd9ce7dbc4155da4e9aaa3c321a00b6f4c2525f8d3682 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pickle
import textwrap
from collections import OrderedDict
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from astropy import units as u
from astropy.nddata import _testing as nd_testing
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.utils import NumpyRNGContext
from astropy.wcs import WCS
from astropy.wcs.wcsapi import BaseHighLevelWCS, HighLevelWCSWrapper, SlicedLowLevelWCS
from .test_nduncertainty import FakeUncertainty
class FakeNumpyArray:
"""
Class that has a few of the attributes of a numpy array.
These attributes are checked for by NDData.
"""
def __init__(self):
super().__init__()
def shape(self):
pass
def __getitem__(self):
pass
def __array__(self):
pass
@property
def dtype(self):
return "fake"
class MinimalUncertainty:
"""
Define the minimum attributes acceptable as an uncertainty object.
"""
def __init__(self, value):
self._uncertainty = value
@property
def uncertainty_type(self):
return "totally and completely fake"
class BadNDDataSubclass(NDData):
def __init__(
self,
data,
uncertainty=None,
mask=None,
wcs=None,
meta=None,
unit=None,
psf=None,
):
self._data = data
self._uncertainty = uncertainty
self._mask = mask
self._wcs = wcs
self._psf = psf
self._unit = unit
self._meta = meta
# Setter tests
def test_uncertainty_setter():
nd = NDData([1, 2, 3])
good_uncertainty = MinimalUncertainty(5)
nd.uncertainty = good_uncertainty
assert nd.uncertainty is good_uncertainty
# Check the fake uncertainty (minimal does not work since it has no
# parent_nddata attribute from NDUncertainty)
nd.uncertainty = FakeUncertainty(5)
assert nd.uncertainty.parent_nddata is nd
# Check that it works if the uncertainty was set during init
nd = NDData(nd)
assert isinstance(nd.uncertainty, FakeUncertainty)
nd.uncertainty = 10
assert not isinstance(nd.uncertainty, FakeUncertainty)
assert nd.uncertainty.array == 10
def test_mask_setter():
# Since it just changes the _mask attribute everything should work
nd = NDData([1, 2, 3])
nd.mask = True
assert nd.mask
nd.mask = False
assert not nd.mask
# Check that it replaces a mask from init
nd = NDData(nd, mask=True)
assert nd.mask
nd.mask = False
assert not nd.mask
# Init tests
def test_nddata_empty():
with pytest.raises(TypeError):
NDData() # empty initializer should fail
def test_nddata_init_data_nonarray():
inp = [1, 2, 3]
nd = NDData(inp)
assert (np.array(inp) == nd.data).all()
def test_nddata_init_data_ndarray():
# random floats
with NumpyRNGContext(123):
nd = NDData(np.random.random((10, 10)))
assert nd.data.shape == (10, 10)
assert nd.data.size == 100
assert nd.data.dtype == np.dtype(float)
# specific integers
nd = NDData(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.data.size == 6
assert nd.data.dtype == np.dtype(int)
# Tests to ensure that creating a new NDData object copies by *reference*.
a = np.ones((10, 10))
nd_ref = NDData(a)
a[0, 0] = 0
assert nd_ref.data[0, 0] == 0
# Except we choose copy=True
a = np.ones((10, 10))
nd_ref = NDData(a, copy=True)
a[0, 0] = 0
assert nd_ref.data[0, 0] != 0
def test_nddata_init_data_maskedarray():
with NumpyRNGContext(456):
NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5)
# Another test (just copied here)
with NumpyRNGContext(12345):
a = np.random.randn(100)
marr = np.ma.masked_where(a > 0, a)
nd = NDData(marr)
# check that masks and data match
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# check that they are both by reference
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 123456789
assert_array_equal(nd.mask, marr.mask)
assert_array_equal(nd.data, marr.data)
# or not if we choose copy=True
nd = NDData(marr, copy=True)
marr.mask[10] = ~marr.mask[10]
marr.data[11] = 0
assert nd.mask[10] != marr.mask[10]
assert nd.data[11] != marr.data[11]
@pytest.mark.parametrize("data", [np.array([1, 2, 3]), 5])
def test_nddata_init_data_quantity(data):
# Test an array and a scalar because a scalar Quantity does not always
# behaves the same way as an array.
quantity = data * u.adu
ndd = NDData(quantity)
assert ndd.unit == quantity.unit
assert_array_equal(ndd.data, np.array(quantity.value))
if ndd.data.size > 1:
# check that if it is an array it is not copied
quantity.value[1] = 100
assert ndd.data[1] == quantity.value[1]
# or is copied if we choose copy=True
ndd = NDData(quantity, copy=True)
quantity.value[1] = 5
assert ndd.data[1] != quantity.value[1]
def test_nddata_init_data_masked_quantity():
a = np.array([2, 3])
q = a * u.m
m = False
mq = np.ma.array(q, mask=m)
nd = NDData(mq)
assert_array_equal(nd.data, a)
# This test failed before the change in nddata init because the masked
# arrays data (which in fact was a quantity was directly saved)
assert nd.unit == u.m
assert not isinstance(nd.data, u.Quantity)
np.testing.assert_array_equal(nd.mask, np.array(m))
def test_nddata_init_data_nddata():
nd1 = NDData(np.array([1]))
nd2 = NDData(nd1)
assert nd2.wcs == nd1.wcs
assert nd2.uncertainty == nd1.uncertainty
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# Check that it is copied by reference
nd1 = NDData(np.ones((5, 5)))
nd2 = NDData(nd1)
assert nd1.data is nd2.data
# Check that it is really copied if copy=True
nd2 = NDData(nd1, copy=True)
nd1.data[2, 3] = 10
assert nd1.data[2, 3] != nd2.data[2, 3]
# Now let's see what happens if we have all explicitly set
nd1 = NDData(
np.array([1]),
mask=False,
uncertainty=StdDevUncertainty(10),
unit=u.s,
meta={"dest": "mordor"},
wcs=WCS(naxis=1),
psf=np.array([10]),
)
nd2 = NDData(nd1)
assert nd2.data is nd1.data
assert nd2.wcs is nd1.wcs
assert nd2.uncertainty.array == nd1.uncertainty.array
assert nd2.mask == nd1.mask
assert nd2.unit == nd1.unit
assert nd2.meta == nd1.meta
assert nd2.psf == nd1.psf
# now what happens if we overwrite them all too
nd3 = NDData(
nd1,
mask=True,
uncertainty=StdDevUncertainty(200),
unit=u.km,
meta={"observer": "ME"},
wcs=WCS(naxis=1),
psf=np.array([20]),
)
assert nd3.data is nd1.data
assert nd3.wcs is not nd1.wcs
assert nd3.uncertainty.array != nd1.uncertainty.array
assert nd3.mask != nd1.mask
assert nd3.unit != nd1.unit
assert nd3.meta != nd1.meta
assert nd3.psf != nd1.psf
def test_nddata_init_data_nddata_subclass():
uncert = StdDevUncertainty(3)
# There might be some incompatible subclasses of NDData around.
bnd = BadNDDataSubclass(False, True, 3, 2, "gollum", 100, 12)
# Before changing the NDData init this would not have raised an error but
# would have lead to a compromised nddata instance
with pytest.raises(TypeError):
NDData(bnd)
# but if it has no actual incompatible attributes it passes
bnd_good = BadNDDataSubclass(
np.array([1, 2]),
uncert,
3,
HighLevelWCSWrapper(WCS(naxis=1)),
{"enemy": "black knight"},
u.km,
)
nd = NDData(bnd_good)
assert nd.unit == bnd_good.unit
assert nd.meta == bnd_good.meta
assert nd.uncertainty == bnd_good.uncertainty
assert nd.mask == bnd_good.mask
assert nd.wcs is bnd_good.wcs
assert nd.data is bnd_good.data
def test_nddata_init_data_fail():
# First one is sliceable but has no shape, so should fail.
with pytest.raises(TypeError):
NDData({"a": "dict"})
# This has a shape but is not sliceable
class Shape:
def __init__(self):
self.shape = 5
def __repr__(self):
return "7"
with pytest.raises(TypeError):
NDData(Shape())
def test_nddata_init_data_fakes():
ndd1 = NDData(FakeNumpyArray())
# First make sure that NDData isn't converting its data to a numpy array.
assert isinstance(ndd1.data, FakeNumpyArray)
# Make a new NDData initialized from an NDData
ndd2 = NDData(ndd1)
# Check that the data wasn't converted to numpy
assert isinstance(ndd2.data, FakeNumpyArray)
# Specific parameters
def test_param_uncertainty():
u = StdDevUncertainty(array=np.ones((5, 5)))
d = NDData(np.ones((5, 5)), uncertainty=u)
# Test that the parent_nddata is set.
assert d.uncertainty.parent_nddata is d
# Test conflicting uncertainties (other NDData)
u2 = StdDevUncertainty(array=np.ones((5, 5)) * 2)
d2 = NDData(d, uncertainty=u2)
assert d2.uncertainty is u2
assert d2.uncertainty.parent_nddata is d2
def test_param_wcs():
# Since everything is allowed we only need to test something
nd = NDData([1], wcs=WCS(naxis=1))
assert nd.wcs is not None
# Test conflicting wcs (other NDData)
nd2 = NDData(nd, wcs=WCS(naxis=1))
assert nd2.wcs is not None and nd2.wcs is not nd.wcs
def test_param_meta():
# everything dict-like is allowed
with pytest.raises(TypeError):
NDData([1], meta=3)
nd = NDData([1, 2, 3], meta={})
assert len(nd.meta) == 0
nd = NDData([1, 2, 3])
assert isinstance(nd.meta, OrderedDict)
assert len(nd.meta) == 0
# Test conflicting meta (other NDData)
nd2 = NDData(nd, meta={"image": "sun"})
assert len(nd2.meta) == 1
nd3 = NDData(nd2, meta={"image": "moon"})
assert len(nd3.meta) == 1
assert nd3.meta["image"] == "moon"
def test_param_mask():
# Since everything is allowed we only need to test something
nd = NDData([1], mask=False)
assert not nd.mask
# Test conflicting mask (other NDData)
nd2 = NDData(nd, mask=True)
assert nd2.mask
# (masked array)
nd3 = NDData(np.ma.array([1], mask=False), mask=True)
assert nd3.mask
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd4 = NDData(mq, mask=True)
assert nd4.mask
def test_param_unit():
with pytest.raises(ValueError):
NDData(np.ones((5, 5)), unit="NotAValidUnit")
NDData([1, 2, 3], unit="meter")
# Test conflicting units (quantity as data)
q = np.array([1, 2, 3]) * u.m
nd = NDData(q, unit="cm")
assert nd.unit != q.unit
assert nd.unit == u.cm
# (masked quantity)
mq = np.ma.array(np.array([2, 3]) * u.m, mask=False)
nd2 = NDData(mq, unit=u.s)
assert nd2.unit == u.s
# (another NDData as data)
nd3 = NDData(nd, unit="km")
assert nd3.unit == u.km
def test_pickle_nddata_with_uncertainty():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
ndd_dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(ndd_dumped)
assert type(ndd_restored.uncertainty) is StdDevUncertainty
assert ndd_restored.uncertainty.parent_nddata is ndd_restored
assert ndd_restored.uncertainty.unit == u.m
def test_pickle_uncertainty_only():
ndd = NDData(
np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m
)
uncertainty_dumped = pickle.dumps(ndd.uncertainty)
uncertainty_restored = pickle.loads(uncertainty_dumped)
np.testing.assert_array_equal(ndd.uncertainty.array, uncertainty_restored.array)
assert ndd.uncertainty.unit == uncertainty_restored.unit
# Even though it has a parent there is no one that references the parent
# after unpickling so the weakref "dies" immediately after unpickling
# finishes.
assert uncertainty_restored.parent_nddata is None
def test_pickle_nddata_without_uncertainty():
ndd = NDData(np.ones(3), unit=u.m)
dumped = pickle.dumps(ndd)
ndd_restored = pickle.loads(dumped)
np.testing.assert_array_equal(ndd.data, ndd_restored.data)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaNDData(MetaBaseTest):
test_class = NDData
args = np.array([[1.0]])
# Representation tests
def test_nddata_str():
arr1d = NDData(np.array([1, 2, 3]))
assert str(arr1d) == "[1 2 3]"
arr2d = NDData(np.array([[1, 2], [3, 4]]))
assert str(arr2d) == textwrap.dedent(
"""
[[1 2]
[3 4]]"""[
1:
]
)
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
assert str(arr3d) == textwrap.dedent(
"""
[[[1 2]
[3 4]]
[[5 6]
[7 8]]]"""[
1:
]
)
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
assert str(arr) == "[1 2 3] km"
# what if it had these units?
arr = NDData(np.array([1, 2, 3]), unit="erg cm^-2 s^-1 A^-1")
assert str(arr) == "[1 2 3] erg / (A cm2 s)"
def test_nddata_repr():
# The big test is eval(repr()) should be equal to the original!
arr1d = NDData(np.array([1, 2, 3]))
s = repr(arr1d)
assert s == "NDData([1, 2, 3])"
got = eval(s)
assert np.all(got.data == arr1d.data)
assert got.unit == arr1d.unit
arr2d = NDData(np.array([[1, 2], [3, 4]]))
s = repr(arr2d)
assert s == textwrap.dedent(
"""
NDData([[1, 2],
[3, 4]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr2d.data)
assert got.unit == arr2d.unit
arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]))
s = repr(arr3d)
assert s == textwrap.dedent(
"""
NDData([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])"""[
1:
]
)
got = eval(s)
assert np.all(got.data == arr3d.data)
assert got.unit == arr3d.unit
# let's add units!
arr = NDData(np.array([1, 2, 3]), unit="km")
s = repr(arr)
assert s == "NDData([1, 2, 3], unit='km')"
got = eval(s)
assert np.all(got.data == arr.data)
assert got.unit == arr.unit
# Not supported features
def test_slicing_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd[0]
def test_arithmetic_not_supported():
ndd = NDData(np.ones((5, 5)))
with pytest.raises(TypeError):
ndd + ndd
def test_nddata_wcs_setter_error_cases():
ndd = NDData(np.ones((5, 5)))
# Setting with a non-WCS should raise an error
with pytest.raises(TypeError):
ndd.wcs = "I am not a WCS"
naxis = 2
# This should succeed since the WCS is currently None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
with pytest.raises(ValueError):
# This should fail since the WCS is not None
ndd.wcs = nd_testing._create_wcs_simple(
naxis=naxis,
ctype=["deg"] * naxis,
crpix=[0] * naxis,
crval=[10] * naxis,
cdelt=[1] * naxis,
)
def test_nddata_wcs_setter_with_low_level_wcs():
ndd = NDData(np.ones((5, 5)))
wcs = WCS()
# If the wcs property is set with a low level WCS it should get
# wrapped to high level.
low_level = SlicedLowLevelWCS(wcs, 5)
assert not isinstance(low_level, BaseHighLevelWCS)
ndd.wcs = low_level
assert isinstance(ndd.wcs, BaseHighLevelWCS)
def test_nddata_init_with_low_level_wcs():
wcs = WCS()
low_level = SlicedLowLevelWCS(wcs, 5)
ndd = NDData(np.ones((5, 5)), wcs=low_level)
assert isinstance(ndd.wcs, BaseHighLevelWCS)
class NDDataCustomWCS(NDData):
@property
def wcs(self):
return WCS()
def test_overriden_wcs():
# Check that a sub-class that overrides `.wcs` without providing a setter
# works
NDDataCustomWCS(np.ones((5, 5)))
|
1c3b8f28d178313694e1c690e9d8ad679f9355ca15fae34d4efbc8bcb604652d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module contains tests of a class equivalent to pre-1.0 NDData.
import numpy as np
import pytest
from astropy import units as u
from astropy.nddata.compat import NDDataArray
from astropy.nddata.nddata import NDData
from astropy.nddata.nduncertainty import StdDevUncertainty
from astropy.wcs import WCS
NDDATA_ATTRIBUTES = [
"mask",
"flags",
"uncertainty",
"unit",
"shape",
"size",
"dtype",
"ndim",
"wcs",
"convert_unit_to",
]
def test_nddataarray_has_attributes_of_old_nddata():
ndd = NDDataArray([1, 2, 3])
for attr in NDDATA_ATTRIBUTES:
assert hasattr(ndd, attr)
def test_nddata_simple():
nd = NDDataArray(np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
def test_nddata_parameters():
# Test for issue 4620
nd = NDDataArray(data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Change order; `data` has to be given explicitly here
nd = NDDataArray(meta={}, data=np.zeros((10, 10)))
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
# Pass uncertainty as second implicit argument
data = np.zeros((10, 10))
uncertainty = StdDevUncertainty(0.1 + np.zeros_like(data))
nd = NDDataArray(data, uncertainty)
assert nd.shape == (10, 10)
assert nd.size == 100
assert nd.dtype == np.dtype(float)
assert nd.uncertainty == uncertainty
def test_nddata_conversion():
nd = NDDataArray(np.array([[1, 2, 3], [4, 5, 6]]))
assert nd.size == 6
assert nd.dtype == np.dtype(int)
@pytest.mark.parametrize(
"flags_in",
[
np.array([True, False]),
np.array([1, 0]),
[True, False],
[1, 0],
np.array(["a", "b"]),
["a", "b"],
],
)
def test_nddata_flags_init_without_np_array(flags_in):
ndd = NDDataArray([1, 1], flags=flags_in)
assert (ndd.flags == flags_in).all()
@pytest.mark.parametrize("shape", [(10,), (5, 5), (3, 10, 10)])
def test_nddata_flags_invalid_shape(shape):
with pytest.raises(ValueError) as exc:
NDDataArray(np.zeros((10, 10)), flags=np.ones(shape))
assert exc.value.args[0] == "dimensions of flags do not match data"
def test_convert_unit_to():
# convert_unit_to should return a copy of its input
d = NDDataArray(np.ones((5, 5)))
d.unit = "km"
d.uncertainty = StdDevUncertainty(0.1 + np.zeros_like(d))
# workaround because zeros_like does not support dtype arg until v1.6
# and NDData accepts only bool ndarray as mask
tmp = np.zeros_like(d.data)
d.mask = np.array(tmp, dtype=bool)
d1 = d.convert_unit_to("m")
assert np.all(d1.data == np.array(1000.0))
assert np.all(d1.uncertainty.array == 1000.0 * d.uncertainty.array)
assert d1.unit == u.m
# changing the output mask should not change the original
d1.mask[0, 0] = True
assert d.mask[0, 0] != d1.mask[0, 0]
d.flags = np.zeros_like(d.data)
d1 = d.convert_unit_to("m")
# check that subclasses can require wcs and/or unit to be present and use
# _arithmetic and convert_unit_to
class SubNDData(NDDataArray):
"""
Subclass for test initialization of subclasses in NDData._arithmetic and
NDData.convert_unit_to
"""
def __init__(self, *arg, **kwd):
super().__init__(*arg, **kwd)
if self.unit is None:
raise ValueError("Unit for subclass must be specified")
if self.wcs is None:
raise ValueError("WCS for subclass must be specified")
def test_init_of_subclass_in_convert_unit_to():
data = np.ones([10, 10])
arr1 = SubNDData(data, unit="m", wcs=WCS(naxis=2))
result = arr1.convert_unit_to("km")
np.testing.assert_array_equal(arr1.data, 1000 * result.data)
# Test for issue #4129:
def test_nddataarray_from_nddataarray():
ndd1 = NDDataArray(
[1.0, 4.0, 9.0], uncertainty=StdDevUncertainty([1.0, 2.0, 3.0]), flags=[0, 1, 0]
)
ndd2 = NDDataArray(ndd1)
# Test that the 2 instances point to the same objects and aren't just
# equal; this is explicitly documented for the main data array and we
# probably want to catch any future change in behavior for the other
# attributes too and ensure they are intentional.
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.flags is ndd1.flags
assert ndd2.meta == ndd1.meta
# Test for issue #4137:
def test_nddataarray_from_nddata():
ndd1 = NDData([1.0, 4.0, 9.0], uncertainty=StdDevUncertainty([1.0, 2.0, 3.0]))
ndd2 = NDDataArray(ndd1)
assert ndd2.data is ndd1.data
assert ndd2.uncertainty is ndd1.uncertainty
assert ndd2.meta == ndd1.meta
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.