hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
13e09c02b387f36511200971dd04323086e2d3eaf4740a23eb5c733f19e138ee | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pickle
from io import StringIO
import pytest
import numpy as np
from astropy.table.serialize import represent_mixins_as_columns
from astropy.utils.data_info import ParentDtypeInfo
from astropy.table.table_helpers import ArrayWrapper
from astropy.coordinates import EarthLocation, SkyCoord
from astropy.table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin
from astropy.table import serialize
from astropy import time
from astropy import coordinates
from astropy import units as u
from astropy.table.column import BaseColumn
from astropy.table import table_helpers
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.metadata import MergeConflictWarning
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.coordinates.tests.helper import skycoord_equal
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols['m']
m.info.name = 'a'
assert m.info.name == 'a'
m.info.description = 'a'
assert m.info.description == 'a'
# Cannot set unit for these classes
if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time, time.TimeDelta,
coordinates.BaseRepresentationOrDifferential)):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = 'a'
assert m.info.format == 'a'
m.info.meta = {'a': 1}
assert m.info.meta == {'a': 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)
or isinstance(in_col, Column)):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from astropy.io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt['Write'] and '.fast_' not in fmt['Format']:
out = StringIO()
t.write(out, format=fmt['Format'])
def test_votable_quantity_write(tmpdir):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t['a'] = u.Quantity([1, 2, 4], unit='nm')
filename = str(tmpdir.join('table-tmp'))
t.write(filename, format='votable', overwrite=True)
qt = QTable.read(filename, format='votable')
assert isinstance(qt['a'], u.Quantity)
assert qt['a'].unit == 'nm'
@pytest.mark.remote_data
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_standard(tmpdir, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ['string', 'column']])
for scale in time.STANDARD_TIME_SCALES:
t['a' + scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',
scale=scale, location=EarthLocation(
-2446354, 4237210, 4077985, unit='m'))
t['b' + scale] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale=scale)
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
with pytest.warns(
AstropyUserWarning,
match='Time Column "btai" has no specified location, '
'but global Time Position is present'):
t.write(filename, format='fits', overwrite=True)
with pytest.warns(
AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified'):
tm = table_types.read(filename, format='fits', astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_local(tmpdir, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ['string', 'column']])
t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],
format='mjd', scale='local',
location=EarthLocation(-2446354, 4237210, 4077985,
unit='m'))
t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale='local')
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
with pytest.warns(AstropyUserWarning,
match='Time Column "b_local" has no specified location'):
t.write(filename, format='fits', overwrite=True)
with pytest.warns(AstropyUserWarning,
match='Time column reference position "TRPOSn" is not specified.'):
tm = table_types.read(filename, format='fits', astropy_native=True)
for ab in ('a', 'b'):
name = ab + '_local'
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ('a', 'b'):
name = ab + '_local'
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for ab in ('a', 'b'):
name = ab + '_local'
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='votable')
assert 'cannot write table with mixin column(s)' in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2['a'] = ['b', 'c', 'a', 'd']
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + '2'
for join_type in ('inner', 'left'):
t12 = join(t1, t2, keys='a', join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + '2'
for join_type in ('outer', 'right'):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys='a', join_type=join_type)
assert 'join requires masking column' in str(exc.value)
with pytest.raises(TypeError) as exc:
t12 = join(t1, t2, keys=['a', 'skycoord'])
assert 'one or more key columns are not sortable' in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
with pytest.warns(MergeConflictWarning,
match="In merged column 'quantity' the 'description' "
"attribute does not match"):
t12 = join(t1, t2, keys=['quantity'])
assert np.all(t12['a_1'] == t1['a'])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {'a': 1}
for join_type in ('inner', 'outer'):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == 'outer':
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert 'hstack requires masking column' in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ('description', 'meta'):
assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t[name], col))
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, 'm', m[item])
for attr in attrs:
assert getattr(t2['m'].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
# non-native byteorder not preserved by last 2 func, _except_ for structured dtype
if (attr != 'dtype'
or getattr(m.info.dtype, 'isnative', True)
or m.info.dtype.name.startswith('void')
or func in (copy.copy, copy.deepcopy)):
original = getattr(m.info, attr)
else:
# func does not preserve byteorder, check against (native) type.
original = m.info.dtype.newbyteorder('=')
assert getattr(m2.info, attr) == original
def test_add_column(mixin_cols):
"""
Test that adding a column preserves values and attributes
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
assert m.info.name is None
# Make sure adding column in various ways doesn't touch
t = QTable([m], names=['a'])
assert m.info.name is None
t['new'] = m
assert m.info.name is None
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
# Add columns m2, m3, m4 by two different methods and test expected equality
t['m2'] = m
m.info.name = 'm3'
t.add_columns([m], copy=True)
m.info.name = 'm4'
t.add_columns([m], copy=False)
for name in ('m2', 'm3', 'm4'):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m) and 'info' in s.__dict__:
# We're not going to worry about testing classes for which scalars
# are a different class than the real array, or where info is not copied.
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=['m'])
if type(s) is type(m) and 'info' in s.__dict__:
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which works for Column, Quantity, Time and SkyCoord.
"""
t = QTable(mixin_cols)
t0 = t.copy()
t['m'].info.description = 'd'
idxs = [0, -1, 1, 2, 3]
if isinstance(t['m'], (u.Quantity, Column, time.Time, time.TimeDelta, coordinates.SkyCoord)):
t.insert_row(1, t[-1])
for name in t.colnames:
col = t[name]
if isinstance(col, coordinates.SkyCoord):
assert skycoord_equal(col, t0[name][idxs])
else:
assert np.all(col == t0[name][idxs])
assert t['m'].info.description == 'd'
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed/velocity) and 'm' (length) are not convertible" in str(exc.value)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols['m']
dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'
assert ta['m'].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ('quantity', 'arraywrap'):
m = MIXIN_COLS[name]
t0 = QTable([m], names=['m'])
for i0, i1 in ((1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3]))):
t = t0.copy()
t['m'][i0] = m[i1]
if name == 'arraywrap':
assert np.all(t['m'].data[i0] == m.data[i1])
assert np.all(t0['m'].data[i0] == m.data[i0])
assert np.all(t0['m'].data[i0] != t['m'].data[i0])
else:
assert np.all(t['m'][i0] == m[i1])
assert np.all(t0['m'][i0] == m[i0])
assert np.all(t0['m'][i0] != t['m'][i0])
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == 'quantity':
assert np.all(t['quantity'] == qt['quantity'].value)
assert np.all(t['quantity'].unit is qt['quantity'].unit)
assert isinstance(t['quantity'], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t['a'] = ['x', 'y']
t['b'] = 'b' # Previously was failing with KeyError
assert np.all(t['a'] == ['x', 'y'])
assert np.all(t['b'] == ['b', 'b'])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == ['col0',
' m ',
'----',
' 1.0',
' 2.0']
def test_representation_representation():
"""
Test that Representations are represented correctly.
"""
# With no unit we get "None" in the unit row
c = coordinates.CartesianRepresentation([0], [1], [0], unit=u.one)
t = Table([c])
assert t.pformat() == [' col0 ',
'------------',
'(0., 1., 0.)']
c = coordinates.CartesianRepresentation([0], [1], [0], unit='m')
t = Table([c])
assert t.pformat() == [' col0 ',
' m ',
'------------',
'(0., 1., 0.)']
c = coordinates.SphericalRepresentation([10]*u.deg, [20]*u.deg, [1]*u.pc)
t = Table([c])
assert t.pformat() == [' col0 ',
' deg, deg, pc ',
'--------------',
'(10., 20., 1.)']
c = coordinates.UnitSphericalRepresentation([10]*u.deg, [20]*u.deg)
t = Table([c])
assert t.pformat() == [' col0 ',
' deg ',
'----------',
'(10., 20.)']
c = coordinates.SphericalCosLatDifferential(
[10]*u.mas/u.yr, [2]*u.mas/u.yr, [10]*u.km/u.s)
t = Table([c])
assert t.pformat() == [' col0 ',
'mas / yr, mas / yr, km / s',
'--------------------------',
' (10., 2., 10.)']
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
'None,None,None',
'--------------',
' 0.0,1.0,0.0']
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit='m', representation_type='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
' m,m,m ',
'-----------',
'0.0,1.0,0.0']
t['col0'].representation_type = 'unitspherical'
assert t.pformat() == [' col0 ',
'deg,deg ',
'--------',
'90.0,0.0']
t['col0'].representation_type = 'cylindrical'
assert t.pformat() == [' col0 ',
' m,deg,m ',
'------------',
'1.0,90.0,0.0']
def test_ndarray_mixin():
"""
Test directly adding a plain structured array into a table instead of the
view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous
tests apply.
"""
a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],
dtype='<i4,' + ('|U1'))
b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')],
dtype=[('x', 'i4'), ('y', ('U2'))])
c = np.rec.fromrecords([(100, 'raa'), (200, 'rbb'), (300, 'rcc'), (400, 'rdd')],
names=['rx', 'ry'])
d = np.arange(8).reshape(4, 2).view(NdarrayMixin)
# Add one during initialization and the next as a new column.
t = Table([a], names=['a'])
t['b'] = b
t['c'] = c
t['d'] = d
assert isinstance(t['a'], NdarrayMixin)
assert t['a'][1][1] == a[1][1]
assert t['a'][2][0] == a[2][0]
assert t[1]['a'][1] == a[1][1]
assert t[2]['a'][0] == a[2][0]
assert isinstance(t['b'], NdarrayMixin)
assert t['b'][1]['x'] == b[1]['x']
assert t['b'][1]['y'] == b[1]['y']
assert t[1]['b']['x'] == b[1]['x']
assert t[1]['b']['y'] == b[1]['y']
assert isinstance(t['c'], NdarrayMixin)
assert t['c'][1]['rx'] == c[1]['rx']
assert t['c'][1]['ry'] == c[1]['ry']
assert t[1]['c']['rx'] == c[1]['rx']
assert t[1]['c']['ry'] == c[1]['ry']
assert isinstance(t['d'], NdarrayMixin)
assert t['d'][1][0] == d[1][0]
assert t['d'][1][1] == d[1][1]
assert t[1]['d'][0] == d[1][0]
assert t[1]['d'][1] == d[1][1]
assert t.pformat() == [' a b c d [2] ',
'-------- ---------- ------------ ------',
"(1, 'a') (10, 'aa') (100, 'raa') 0 .. 1",
"(2, 'b') (20, 'bb') (200, 'rbb') 2 .. 3",
"(3, 'c') (30, 'cc') (300, 'rcc') 4 .. 5",
"(4, 'd') (40, 'dd') (400, 'rdd') 6 .. 7"]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t['col0'].info.format = '%.3f'
assert t.pformat() == [' col0',
' m ',
'-----',
'1.000',
'2.000']
t['col0'].info.format = 'hi {:.3f}'
assert t.pformat() == [' col0 ',
' m ',
'--------',
'hi 1.000',
'hi 2.000']
t['col0'].info.format = '.4f'
assert t.pformat() == [' col0 ',
' m ',
'------',
'1.0000',
'2.0000']
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column('m', 'mm')
assert t.colnames == ['i', 'a', 'b', 'mm']
if isinstance(t['mm'], table_helpers.ArrayWrapper):
assert np.all(t['mm'].data == tc['m'].data)
elif isinstance(t['mm'], coordinates.SkyCoord):
assert np.all(t['mm'].ra == tc['m'].ra)
assert np.all(t['mm'].dec == tc['m'].dec)
elif isinstance(t['mm'], coordinates.BaseRepresentationOrDifferential):
assert np.all(representation_equal(t['mm'], tc['m']))
else:
assert np.all(t['mm'] == tc['m'])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({'a': [1, 2]}, masked=True)
t['a'].unit = 'not a valid unit'
t['a'].mask[1] = True
serialize.represent_mixins_as_columns(t)
def test_primary_data_column_gets_description():
"""
If the mixin defines a primary data column, that should get the
description, format, etc., so no __info__ should be needed.
"""
t = QTable({'a': [1, 2] * u.m})
t['a'].info.description = 'parrot'
t['a'].info.format = '7.2f'
tser = serialize.represent_mixins_as_columns(t)
assert '__info__' not in tser.meta['__serialized_columns__']['a']
assert tser['a'].format == '7.2f'
assert tser['a'].description == 'parrot'
def test_skycoord_with_velocity():
# Regression test for gh-6447
sc = SkyCoord([1], [2], unit='deg', galcen_v_sun=None)
t = Table([sc])
s = StringIO()
t.write(s, format='ascii.ecsv', overwrite=True)
s.seek(0)
t2 = Table.read(s.read(), format='ascii.ecsv')
assert skycoord_equal(t2['col0'], sc)
@pytest.mark.parametrize('table_cls', [Table, QTable])
def test_ensure_input_info_is_unchanged(table_cls):
"""If a mixin input to a table has no info, it should stay that way.
This since having 'info' slows down slicing, etc.
See gh-11066.
"""
q = [1, 2] * u.m
assert 'info' not in q.__dict__
t = table_cls([q], names=['q'])
assert 'info' not in q.__dict__
t = table_cls([q])
assert 'info' not in q.__dict__
t = table_cls({'q': q})
assert 'info' not in q.__dict__
t['q2'] = q
assert 'info' not in q.__dict__
sc = SkyCoord([1, 2], [2, 3], unit='deg')
t['sc'] = sc
assert 'info' not in sc.__dict__
def test_bad_info_class():
"""Make a mixin column class that does not trigger the machinery to generate
a pure column representation"""
class MyArrayWrapper(ArrayWrapper):
info = ParentDtypeInfo()
t = Table()
t['tm'] = MyArrayWrapper([0, 1, 2])
out = StringIO()
match = r"failed to represent column 'tm' \(MyArrayWrapper\) as one or more Column subclasses"
with pytest.raises(TypeError, match=match):
represent_mixins_as_columns(t)
|
f3f00efdf0201bbfcefc025ec88f9b58cbbb4b26f2063c208745063c6ab7c521 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.convolution.utils import discretize_model
from astropy.modeling.functional_models import (Box1D, Box2D, Gaussian1D, Gaussian2D,
RickerWavelet1D, RickerWavelet2D)
from astropy.modeling.tests.example_models import models_1D, models_2D
from astropy.modeling.tests.test_models import create_model
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
modes = ['center', 'linear_interp', 'oversample']
test_models_1D = [Gaussian1D, Box1D, RickerWavelet1D]
test_models_2D = [Gaussian2D, Box2D, RickerWavelet2D]
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_1D, modes)))
def test_pixel_sum_1D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box1D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_1D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_1D[model_class]['x_lim'], mode=mode)
assert_allclose(values.sum(), models_1D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_1D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian1D.eval().
"""
model = Gaussian1D(1, 0, 20)
x = np.arange(-100, 101)
values = model(x)
disc_values = discretize_model(model, (-100, 101), mode=mode)
assert_allclose(values, disc_values, atol=0.001)
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes)))
def test_pixel_sum_2D(model_class, mode):
"""
Test if the sum of all pixels corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model, models_2D[model_class]['x_lim'],
models_2D[model_class]['y_lim'], mode=mode)
assert_allclose(values.sum(), models_2D[model_class]['integral'], atol=0.0001)
@pytest.mark.parametrize(('model_class', 'mode'), list(itertools.product(test_models_2D, modes)))
def test_pixel_sum_compound_2D(model_class, mode):
"""
Test if the sum of all pixels of a compound model corresponds nearly to the integral.
"""
if model_class == Box2D and mode == "center":
pytest.skip("Non integrating mode. Skip integral test.")
parameters = models_2D[model_class]
model = create_model(model_class, parameters)
values = discretize_model(model + model, models_2D[model_class]['x_lim'],
models_2D[model_class]['y_lim'], mode=mode)
model_integral = 2 * models_2D[model_class]['integral']
assert_allclose(values.sum(), model_integral, atol=0.0001)
@pytest.mark.parametrize('mode', modes)
def test_gaussian_eval_2D(mode):
"""
Discretize Gaussian with different modes and check
if result is at least similar to Gaussian2D.eval()
"""
model = Gaussian2D(0.01, 0, 0, 1, 1)
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode=mode)
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian_eval_2D_integrate_mode():
"""
Discretize Gaussian with integrate mode
"""
model_list = [Gaussian2D(.01, 0, 0, 2, 2),
Gaussian2D(.01, 0, 0, 1, 2),
Gaussian2D(.01, 0, 0, 2, 1)]
x = np.arange(-2, 3)
y = np.arange(-2, 3)
x, y = np.meshgrid(x, y)
for model in model_list:
values = model(x, y)
disc_values = discretize_model(model, (-2, 3), (-2, 3), mode='integrate')
assert_allclose(values, disc_values, atol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_1D():
"""
Test subpixel accuracy of the integrate mode with gaussian 1D model.
"""
gauss_1D = Gaussian1D(1, 0, 0.1)
values = discretize_model(gauss_1D, (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), np.sqrt(2 * np.pi) * 0.1, atol=0.00001)
@pytest.mark.skipif('not HAS_SCIPY')
def test_subpixel_gauss_2D():
"""
Test subpixel accuracy of the integrate mode with gaussian 2D model.
"""
gauss_2D = Gaussian2D(1, 0, 0, 0.1, 0.1)
values = discretize_model(gauss_2D, (-1, 2), (-1, 2), mode='integrate', factor=100)
assert_allclose(values.sum(), 2 * np.pi * 0.01, atol=0.00001)
def test_discretize_callable_1d():
"""
Test discretize when a 1d function is passed.
"""
def f(x):
return x ** 2
y = discretize_model(f, (-5, 6))
assert_allclose(y, np.arange(-5, 6) ** 2)
def test_discretize_callable_2d():
"""
Test discretize when a 2d function is passed.
"""
def f(x, y):
return x ** 2 + y ** 2
actual = discretize_model(f, (-5, 6), (-5, 6))
y, x = (np.indices((11, 11)) - 5)
desired = x ** 2 + y ** 2
assert_allclose(actual, desired)
def test_type_exception():
"""
Test type exception.
"""
with pytest.raises(TypeError) as exc:
discretize_model(float(0), (-10, 11))
assert exc.value.args[0] == 'Model must be callable.'
def test_dim_exception_1d():
"""
Test dimension exception 1d.
"""
def f(x):
return x ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10, 11))
assert exc.value.args[0] == "y range specified, but model is only 1-d."
def test_dim_exception_2d():
"""
Test dimension exception 2d.
"""
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11))
assert exc.value.args[0] == "y range not specified, but model is 2-d"
def test_float_x_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper and lower"
" limit of 'x_range' must be a whole number.")
def test_float_y_range_exception():
def f(x, y):
return x ** 2 + y ** 2
with pytest.raises(ValueError) as exc:
discretize_model(f, (-10, 11), (-10.002, 11.23))
assert exc.value.args[0] == ("The difference between the upper and lower"
" limit of 'y_range' must be a whole number.")
def test_discretize_oversample():
gauss_2D = Gaussian2D(amplitude=1.0, x_mean=5.,
y_mean=125., x_stddev=0.75, y_stddev=3)
values = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode='oversample', factor=10)
vmax = np.max(values)
vmax_yx = np.unravel_index(values.argmax(), values.shape)
values_osf1 = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode='oversample', factor=1)
values_center = discretize_model(gauss_2D,
x_range=[0, 10],
y_range=[100, 135],
mode = 'center')
assert values.shape == (35, 10)
assert_allclose(vmax, 0.927, atol=1e-3)
assert vmax_yx == (25, 5)
assert_allclose(values_center, values_osf1)
|
e15b390ccd6bc7b673e933bf264168888ca0a68bcf8e76d77a4df082ef25ec79 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test statistic functions
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_almost_equal
from astropy.modeling.fitting import LinearLSQFitter
from astropy.modeling.models import Identity, Mapping
from astropy.modeling.statistic import leastsquare, leastsquare_1d, leastsquare_2d, leastsquare_3d
class TestLeastSquare_XD:
"""Tests for leastsquare with pre-specified number of dimensions."""
@classmethod
def setup_class(cls):
cls.model1D = Identity(n_inputs=1)
cls.model2D = Identity(n_inputs=2) | Mapping((0,), n_inputs=2)
cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3)
cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100)
cls.lsq_exp = 0
def test_1d_no_weights(self):
lsq = leastsquare_1d(self.data, self.model1D, None, self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_1d_with_weights(self):
lsq = leastsquare_1d(self.data, self.model1D, np.ones(100), self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_2d_no_weights(self):
lsq = leastsquare_2d(self.data, self.model2D, None, self.x, self.y)
assert_almost_equal(lsq, self.lsq_exp)
def test_2d_with_weights(self):
lsq = leastsquare_2d(
self.data, self.model2D, np.ones(100), self.x, self.y
)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_no_weights(self):
lsq = leastsquare_3d(
self.data, self.model3D, None, self.x, self.y, self.z
)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_with_weights(self):
lsq = leastsquare_3d(
self.data, self.model3D, np.ones(100), self.x, self.y, self.z
)
assert_almost_equal(lsq, self.lsq_exp)
class TestLeastSquare_ND:
"""Tests for leastsquare."""
@classmethod
def setup_class(cls):
cls.model1D = Identity(n_inputs=1)
cls.model3D = Identity(n_inputs=3) | Mapping((0,), n_inputs=3)
cls.data = cls.x = cls.y = cls.z = np.linspace(0, 10, num=100)
cls.lsq_exp = 0
def test_1d_no_weights(self):
lsq = leastsquare(self.data, self.model1D, None, self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_1d_with_weights(self):
lsq = leastsquare(self.data, self.model1D, np.ones(100), self.x)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_no_weights(self):
lsq = leastsquare(
self.data, self.model3D, None, self.x, self.y, self.z
)
assert_almost_equal(lsq, self.lsq_exp)
def test_3d_with_weights(self):
lsq = leastsquare(
self.data, self.model3D, np.ones(100), self.x, self.y, self.z
)
assert_almost_equal(lsq, self.lsq_exp)
def test_shape_mismatch(self):
with pytest.raises(ValueError):
leastsquare(0, self.model1D, None, self.x)
|
a85c3de06c3b638a198f773b5cbafe07109f9d58886ce36f6277b7ba9b09338e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test sky projections defined in WCS Paper II"""
# pylint: disable=invalid-name, no-member
import os
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
from astropy import units as u
from astropy import wcs
from astropy.io import fits
from astropy.modeling import projections
from astropy.modeling.parameters import InputParameterError
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
def test_new_wcslib_projections():
# Test that we are aware of all WCSLIB projections.
# Dectect if a new WCSLIB release introduced new projections.
assert not set(wcs.PRJ_CODES).symmetric_difference(
projections.projcodes + projections._NOT_SUPPORTED_PROJ_CODES
)
def test_Projection_properties():
projection = projections.Sky2Pix_PlateCarree()
assert projection.n_inputs == 2
assert projection.n_outputs == 2
PIX_COORDINATES = [-10, 30]
MAPS_DIR = os.path.join(os.pardir, os.pardir, "wcs", "tests", "data", "maps")
pars = [(x,) for x in projections.projcodes]
# There is no groundtruth file for the XPH projection available here:
# https://www.atnf.csiro.au/people/mcalabre/WCS/example_data.html
pars.remove(('XPH',))
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f'PV2_{i + 1}'
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
assert isinstance(tinv.prjprm, wcs.Prjprm)
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f'PV2_{i + 1}'
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
@pytest.mark.parametrize(('code',), pars)
def test_Sky2Pix_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f'PV2_{i + 1}'
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout['world'], 1)['pixcrd']
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model(*params)
x, y = tinv(wcslibout['phi'] * u.deg, wcslibout['theta'] * u.deg)
assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg)
assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_Pix2Sky_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f'PV2_{i + 1}'
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0., 0.]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
model = getattr(projections, 'Pix2Sky_' + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES * u.deg)
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
@pytest.mark.parametrize(('code',), pars)
def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, 'Sky2Pix_' + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, 'Pix2Sky_' + code)
tinv = model()
x, y = tinv(0, 0)
class TestZenithalPerspective:
"""Test Zenithal Perspective projection"""
def setup_class(self):
ID = 'AZP'
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_ZenithalPerspective(*self.pv_kw)
def test_AZP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_AZP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_validate(self):
message = "Zenithal perspective projection is not defined for mu = -1"
with pytest.raises(InputParameterError) as err:
projections.Pix2Sky_ZenithalPerspective(-1)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
projections.Sky2Pix_ZenithalPerspective(-1)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
projections.Pix2Sky_SlantZenithalPerspective(-1)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
projections.Sky2Pix_SlantZenithalPerspective(-1)
assert str(err.value) == message
class TestCylindricalPerspective:
"""Test cylindrical perspective projection"""
def setup_class(self):
ID = "CYP"
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{ID}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
self.wazp = wcs.WCS(header)
self.wazp.wcs.crpix = np.array([0., 0.])
self.wazp.wcs.crval = np.array([0., 0.])
self.wazp.wcs.cdelt = np.array([1., 1.])
self.pv_kw = [kw[2] for kw in self.wazp.wcs.get_pv()]
self.azp = projections.Pix2Sky_CylindricalPerspective(*self.pv_kw)
def test_CYP_p2s(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_phi = wcslibout['phi']
wcs_theta = wcslibout['theta']
phi, theta = self.azp(-10, 30)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta)
def test_CYP_s2p(self):
wcslibout = self.wazp.wcs.p2s([[-10, 30]], 1)
wcs_pix = self.wazp.wcs.s2p(wcslibout['world'], 1)['pixcrd']
x, y = self.azp.inverse(wcslibout['phi'], wcslibout['theta'])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
def test_validate(self):
message0 = "CYP projection is not defined for mu = -lambda"
message1 = "CYP projection is not defined for lambda = -mu"
# Pix2Sky_CylindricalPerspective
with pytest.raises(InputParameterError) as err:
projections.Pix2Sky_CylindricalPerspective(1, -1)
assert str(err.value) == message0 or str(err.value) == message1
with pytest.raises(InputParameterError) as err:
projections.Pix2Sky_CylindricalPerspective(-1, 1)
assert str(err.value) == message0 or str(err.value) == message1
model = projections.Pix2Sky_CylindricalPerspective()
with pytest.raises(InputParameterError) as err:
model.mu = -1
assert str(err.value) == message0
with pytest.raises(InputParameterError) as err:
model.lam = -1
assert str(err.value) == message1
# Sky2Pix_CylindricalPerspective
with pytest.raises(InputParameterError) as err:
projections.Sky2Pix_CylindricalPerspective(1, -1)
assert str(err.value) == message0 or str(err.value) == message1
with pytest.raises(InputParameterError) as err:
projections.Sky2Pix_CylindricalPerspective(-1, 1)
assert str(err.value) == message0 or str(err.value) == message1
model = projections.Sky2Pix_CylindricalPerspective()
with pytest.raises(InputParameterError) as err:
model.mu = -1
assert str(err.value) == message0
with pytest.raises(InputParameterError) as err:
model.lam = -1
assert str(err.value) == message1
def test_AffineTransformation2D():
# Simple test with a scale and translation
model = projections.AffineTransformation2D(
matrix=[[2, 0], [0, 2]], translation=[1, 1])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
new_rect = np.vstack(model(x, y)).T
assert np.all(new_rect == [[1, 1], [3, 1], [1, 7], [3, 7]])
# Matrix validation error
with pytest.raises(InputParameterError) as err:
model.matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert str(err.value) ==\
"Expected transformation matrix to be a 2x2 array"
# Translation validation error
with pytest.raises(InputParameterError) as err:
model.translation = [1, 2, 3]
assert str(err.value) ==\
"Expected translation vector to be a 2 element row or column vector array"
with pytest.raises(InputParameterError) as err:
model.translation = [[1], [2]]
assert str(err.value) ==\
"Expected translation vector to be a 2 element row or column vector array"
with pytest.raises(InputParameterError) as err:
model.translation = [[1, 2, 3]]
assert str(err.value) ==\
"Expected translation vector to be a 2 element row or column vector array"
# Incompatible shape error
a = np.array([[1], [2], [3], [4]])
b = a.ravel()
with mk.patch.object(np, 'vstack', autospec=True,
side_effect=[a, b]) as mk_vstack:
message = "Incompatible input shapes"
with pytest.raises(ValueError) as err:
model(x, y)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model(x, y)
assert str(err.value) == message
assert mk_vstack.call_count == 2
# Input shape evaluation error
x = np.array([1, 2])
y = np.array([1, 2, 3])
with pytest.raises(ValueError) as err:
model.evaluate(x, y, model.matrix, model.translation)
assert str(err.value) ==\
"Expected input arrays to have the same shape"
def test_AffineTransformation2D_inverse():
# Test non-invertible model
model1 = projections.AffineTransformation2D(
matrix=[[1, 1], [1, 1]])
with pytest.raises(InputParameterError):
model1.inverse
model2 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]], translation=[9.1, 10.11])
# Coordinates for vertices of a rectangle
rect = [[0, 0], [1, 0], [0, 3], [1, 3]]
x, y = zip(*rect)
x_new, y_new = model2.inverse(*model2(x, y))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
model3 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.m)
x_new, y_new = model3.inverse(*model3(x * u.m, y * u.m))
assert_allclose([x, y], [x_new, y_new], atol=1e-10)
model4 = projections.AffineTransformation2D(
matrix=[[1.2, 3.4], [5.6, 7.8]] * u.m, translation=[9.1, 10.11] * u.km)
with pytest.raises(ValueError) as err:
model4.inverse(*model4(x * u.m, y * u.m))
assert str(err.value) ==\
"matrix and translation must have the same units."
def test_c_projection_striding():
# This is just a simple test to make sure that the striding is
# handled correctly in the projection C extension
coords = np.arange(10).reshape((5, 2))
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
phi, theta = model(coords[:, 0], coords[:, 1])
assert_almost_equal(
phi,
[0., 2.2790416, 4.4889294, 6.6250643, 8.68301])
assert_almost_equal(
theta,
[-76.4816918, -75.3594654, -74.1256332, -72.784558, -71.3406629])
def test_c_projections_shaped():
nx, ny = (5, 2)
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
xv, yv = np.meshgrid(x, y)
model = projections.Pix2Sky_TAN()
phi, theta = model(xv, yv)
assert_allclose(
phi,
[[0., 90., 90., 90., 90.],
[180., 165.96375653, 153.43494882, 143.13010235, 135.]])
assert_allclose(
theta,
[[90., 89.75000159, 89.50001269, 89.25004283, 89.00010152],
[89.00010152, 88.96933478, 88.88210788, 88.75019826, 88.58607353]])
def test_affine_with_quantities():
x = 1
y = 2
xdeg = (x * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
ydeg = (y * u.pix).to(u.deg, equivalencies=u.pixel_scale(2.5 * u.deg / u.pix))
xpix = x * u.pix
ypix = y * u.pix
# test affine with matrix only
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg)
with pytest.raises(ValueError):
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
# test affine with matrix and translation
qaff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]] * u.deg,
translation=[1, 2] * u.deg)
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(2.5 * u.deg / u.pix),
'y': u.pixel_scale(2.5 * u.deg / u.pix)})
aff = projections.AffineTransformation2D(matrix=[[1, 2], [2, 1]], translation=[1, 2])
x1, y1 = aff(xdeg.value, ydeg.value)
assert_quantity_allclose(qx1, x1 * u.deg)
assert_quantity_allclose(qy1, y1 * u.deg)
# test the case of WCS PC and CDELT transformations
pc = np.array([[0.86585778922708, 0.50029020461607],
[-0.50029020461607, 0.86585778922708]])
cdelt = np.array([[1, 3.0683055555556E-05], [3.0966944444444E-05, 1]])
matrix = cdelt * pc
qaff = projections.AffineTransformation2D(matrix=matrix * u.deg,
translation=[0, 0] * u.deg)
inv_matrix = np.linalg.inv(matrix)
inv_qaff = projections.AffineTransformation2D(matrix=inv_matrix * u.pix,
translation=[0, 0] * u.pix)
qaff.inverse = inv_qaff
qx1, qy1 = qaff(xpix, ypix, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
x1, y1 = qaff.inverse(qx1, qy1, equivalencies={
'x': u.pixel_scale(1 * u.deg / u.pix),
'y': u.pixel_scale(1 * u.deg / u.pix)})
assert_quantity_allclose(x1, xpix)
assert_quantity_allclose(y1, ypix)
def test_Pix2Sky_ZenithalPerspective_inverse():
model = projections.Pix2Sky_ZenithalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.gamma, model.gamma)
assert_allclose(inverse.gamma, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalPerspective_inverse():
model = projections.Sky2Pix_ZenithalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_AZP)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.gamma, model.gamma)
assert_allclose(inverse.gamma, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SlantZenithalPerspective_inverse():
model = projections.Pix2Sky_SlantZenithalPerspective(2, 30, 40)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SlantZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.phi0, model.phi0)
assert_allclose(inverse.theta0, model.theta0)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_SlantZenithalPerspective_inverse():
model = projections.Sky2Pix_SlantZenithalPerspective(2, 30, 40)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SlantZenithalPerspective)
assert inverse.mu == model.mu == 2
assert_allclose(inverse.phi0, model.phi0)
assert_allclose(inverse.theta0, model.theta0)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Gnomonic_inverse():
model = projections.Pix2Sky_Gnomonic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Gnomonic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Gnomonic_inverse():
model = projections.Sky2Pix_Gnomonic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Gnomonic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Stereographic_inverse():
model = projections.Pix2Sky_Stereographic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Stereographic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Stereographic_inverse():
model = projections.Sky2Pix_Stereographic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Stereographic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SlantOrthographic_inverse():
model = projections.Pix2Sky_SlantOrthographic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SlantOrthographic)
assert inverse.xi == model.xi == 2
assert inverse.eta == model.eta == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-8)
assert_allclose(b, y, atol=1e-8)
def test_Sky2Pix_SlantOrthographic_inverse():
model = projections.Sky2Pix_SlantOrthographic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SlantOrthographic)
assert inverse.xi == model.xi == 2
assert inverse.eta == model.eta == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-8)
assert_allclose(b, y, atol=1e-8)
def test_Pix2Sky_ZenithalEquidistant_inverse():
model = projections.Pix2Sky_ZenithalEquidistant()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalEquidistant)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalEquidistant_inverse():
model = projections.Sky2Pix_ZenithalEquidistant()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ZenithalEquidistant)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ZenithalEqualArea_inverse():
model = projections.Pix2Sky_ZenithalEqualArea()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ZenithalEqualArea)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ZenithalEqualArea_inverse():
model = projections.Sky2Pix_ZenithalEqualArea()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ZenithalEqualArea)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Airy_inverse():
model = projections.Pix2Sky_Airy(30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Airy)
assert inverse.theta_b == model.theta_b == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Airy_inverse():
model = projections.Sky2Pix_Airy(30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Airy)
assert inverse.theta_b == model.theta_b == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_CylindricalPerspective_inverse():
model = projections.Pix2Sky_CylindricalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_CylindricalPerspective)
assert inverse.mu == model.mu == 2
assert inverse.lam == model.lam == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_CylindricalPerspective_inverse():
model = projections.Sky2Pix_CylindricalPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_CylindricalPerspective)
assert inverse.mu == model.mu == 2
assert inverse.lam == model.lam == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_CylindricalEqualArea_inverse():
model = projections.Pix2Sky_CylindricalEqualArea(0.567)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_CylindricalEqualArea)
assert inverse.lam == model.lam == 0.567
def test_Sky2Pix_CylindricalEqualArea_inverse():
model = projections.Sky2Pix_CylindricalEqualArea(0.765)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_CylindricalEqualArea)
assert inverse.lam == model.lam == 0.765
def test_Pix2Sky_PlateCarree_inverse():
model = projections.Pix2Sky_PlateCarree()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_PlateCarree)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_PlateCarree_inverse():
model = projections.Sky2Pix_PlateCarree()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_PlateCarree)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Mercator_inverse():
model = projections.Pix2Sky_Mercator()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Mercator)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Mercator_inverse():
model = projections.Sky2Pix_Mercator()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Mercator)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_SansonFlamsteed_inverse():
model = projections.Pix2Sky_SansonFlamsteed()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_SansonFlamsteed)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_SansonFlamsteed_inverse():
model = projections.Sky2Pix_SansonFlamsteed()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_SansonFlamsteed)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Parabolic_inverse():
model = projections.Pix2Sky_Parabolic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Parabolic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Parabolic_inverse():
model = projections.Sky2Pix_Parabolic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Parabolic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Molleweide_inverse():
model = projections.Pix2Sky_Molleweide()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Molleweide)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Molleweide_inverse():
model = projections.Sky2Pix_Molleweide()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Molleweide)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HammerAitoff_inverse():
model = projections.Pix2Sky_HammerAitoff()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HammerAitoff)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HammerAitoff_inverse():
model = projections.Sky2Pix_HammerAitoff()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HammerAitoff)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicPerspective_inverse():
model = projections.Pix2Sky_ConicPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicPerspective)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicPerspective_inverse():
model = projections.Sky2Pix_ConicPerspective(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicPerspective)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicEqualArea_inverse():
model = projections.Pix2Sky_ConicEqualArea(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicEqualArea)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicEqualArea_inverse():
model = projections.Sky2Pix_ConicEqualArea(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicEqualArea)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicEquidistant_inverse():
model = projections.Pix2Sky_ConicEquidistant(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicEquidistant)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicEquidistant_inverse():
model = projections.Sky2Pix_ConicEquidistant(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicEquidistant)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_ConicOrthomorphic_inverse():
model = projections.Pix2Sky_ConicOrthomorphic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_ConicOrthomorphic)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_ConicOrthomorphic_inverse():
model = projections.Sky2Pix_ConicOrthomorphic(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_ConicOrthomorphic)
assert inverse.sigma == model.sigma == 2
assert_allclose(inverse.delta, model.delta)
assert_allclose(inverse.delta, 30)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_BonneEqualArea_inverse():
model = projections.Pix2Sky_BonneEqualArea(2)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_BonneEqualArea)
assert inverse.theta1 == model.theta1 == 2
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_BonneEqualArea_inverse():
model = projections.Sky2Pix_BonneEqualArea(2)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_BonneEqualArea)
assert inverse.theta1 == model.theta1 == 2
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_Polyconic_inverse():
model = projections.Pix2Sky_Polyconic()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_Polyconic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_Polyconic_inverse():
model = projections.Sky2Pix_Polyconic()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_Polyconic)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_TangentialSphericalCube_inverse():
model = projections.Pix2Sky_TangentialSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_TangentialSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_TangentialSphericalCube_inverse():
model = projections.Sky2Pix_TangentialSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_TangentialSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_COBEQuadSphericalCube_inverse():
model = projections.Pix2Sky_COBEQuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_COBEQuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
def test_Sky2Pix_COBEQuadSphericalCube_inverse():
model = projections.Sky2Pix_COBEQuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_COBEQuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-3)
assert_allclose(b, y, atol=1e-3)
def test_Pix2Sky_QuadSphericalCube_inverse():
model = projections.Pix2Sky_QuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_QuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_QuadSphericalCube_inverse():
model = projections.Sky2Pix_QuadSphericalCube()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_QuadSphericalCube)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HEALPix_inverse():
model = projections.Pix2Sky_HEALPix(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HEALPix)
assert inverse.H == model.H == 2
assert inverse.X == model.X == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HEALPix_inverse():
model = projections.Sky2Pix_HEALPix(2, 30)
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HEALPix)
assert inverse.H == model.H == 2
assert inverse.X == model.X == 30
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Pix2Sky_HEALPixPolar_inverse():
model = projections.Pix2Sky_HEALPixPolar()
inverse = model.inverse
assert isinstance(inverse, projections.Sky2Pix_HEALPixPolar)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
def test_Sky2Pix_HEALPixPolar_inverse():
model = projections.Sky2Pix_HEALPixPolar()
inverse = model.inverse
assert isinstance(inverse, projections.Pix2Sky_HEALPixPolar)
x = np.linspace(0, 1, 100)
y = np.linspace(0, 1, 100)
a, b = model(*inverse(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
a, b = inverse(*model(x, y))
assert_allclose(a, x, atol=1e-12)
assert_allclose(b, y, atol=1e-12)
|
3c6f92782b580f299994482b24cd4c9f40ef14694fbc4c12e7e78ec548d7b045 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides functions to help with testing against iraf tasks
"""
import numpy as np
from astropy.logger import log
iraf_models_map = {1.: 'Chebyshev',
2.: 'Legendre',
3.: 'Spline3',
4.: 'Spline1'}
def get_records(fname):
"""
Read the records of an IRAF database file into a python list
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
A list of records
"""
f = open(fname)
dtb = f.read()
f.close()
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
def get_database_string(fname):
"""
Read an IRAF database file
Parameters
----------
fname : str
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb
class Record:
"""
A base class for all records - represents an IRAF database record
Attributes
----------
recstr: string
the record as a string
fields: dict
the fields in the record
taskname: string
the name of the task which created the database file
"""
def __init__(self, recstr):
self.recstr = recstr
self.fields = self.get_fields()
self.taskname = self.get_task_name()
def aslist(self):
reclist = self.recstr.split('\n')
reclist = [l.strip() for l in reclist]
[reclist.remove(l) for l in reclist if len(l) == 0]
return reclist
def get_fields(self):
# read record fields as an array
fields = {}
flist = self.aslist()
numfields = len(flist)
for i in range(numfields):
line = flist[i]
if line and line[0].isalpha():
field = line.split()
if i + 1 < numfields:
if not flist[i + 1][0].isalpha():
fields[field[0]] = self.read_array_field(
flist[i:i + int(field[1]) + 1])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
continue
return fields
def get_task_name(self):
try:
return self.fields['task']
except KeyError:
return None
def read_array_field(self, fieldlist):
# Turn an iraf record array field into a numpy array
fieldline = [l.split() for l in fieldlist[1:]]
# take only the first 3 columns
# identify writes also strings at the end of some field lines
xyz = [l[:3] for l in fieldline]
try:
farr = np.array(xyz)
except Exception:
log.debug(f"Could not read array field {fieldlist[0].split()[0]}")
return farr.astype(np.float64)
class IdentifyRecord(Record):
"""
Represents a database record for the onedspec.identify task
Attributes
----------
x: array
the X values of the identified features
this represents values on axis1 (image rows)
y: int
the Y values of the identified features
(image columns)
z: array
the values which X maps into
modelname: string
the function used to fit the data
nterms: int
degree of the polynomial which was fit to the data
in IRAF this is the number of coefficients, not the order
mrange: list
the range of the data
coeff: array
function (modelname) coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._flatcoeff = self.fields['coefficients'].flatten()
self.x = self.fields['features'][:, 0]
self.y = self.get_ydata()
self.z = self.fields['features'][:, 1]
self.modelname = self.get_model_name()
self.nterms = self.get_nterms()
self.mrange = self.get_range()
self.coeff = self.get_coeff()
def get_model_name(self):
return iraf_models_map[self._flatcoeff[0]]
def get_nterms(self):
return self._flatcoeff[1]
def get_range(self):
low = self._flatcoeff[2]
high = self._flatcoeff[3]
return [low, high]
def get_coeff(self):
return self._flatcoeff[4:]
def get_ydata(self):
image = self.fields['image']
left = image.find('[') + 1
right = image.find(']')
section = image[left:right]
if ',' in section:
yind = image.find(',') + 1
return int(image[yind:-1])
else:
return int(section)
class FitcoordsRecord(Record):
"""
Represents a database record for the longslit.fitccords task
Attributes
----------
modelname: string
the function used to fit the data
xorder: int
number of terms in x
yorder: int
number of terms in y
xbounds: list
data range in x
ybounds: list
data range in y
coeff: array
function coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._surface = self.fields['surface'].flatten()
self.modelname = iraf_models_map[self._surface[0]]
self.xorder = self._surface[1]
self.yorder = self._surface[2]
self.xbounds = [self._surface[4], self._surface[5]]
self.ybounds = [self._surface[6], self._surface[7]]
self.coeff = self.get_coeff()
def get_coeff(self):
return self._surface[8:]
class IDB:
"""
Base class for an IRAF identify database
Attributes
----------
records: list
a list of all `IdentifyRecord` in the database
numrecords: int
number of records
"""
def __init__(self, dtbstr):
self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)]
self.numrecords = len(self.records)
def aslist(self, dtb):
# return a list of records
# if the first one is a comment remove it from the list
rl = dtb.split('begin')
try:
rl0 = rl[0].split('\n')
except Exception:
return rl
if len(rl0) == 2 and rl0[0].startswith('#') and not rl0[1].strip():
return rl[1:]
else:
return rl
class ReidentifyRecord(IDB):
"""
Represents a database record for the onedspec.reidentify task
"""
def __init__(self, databasestr):
super().__init__(databasestr)
self.x = np.array([r.x for r in self.records])
self.y = self.get_ydata()
self.z = np.array([r.z for r in self.records])
def get_ydata(self):
y = np.ones(self.x.shape)
y = y * np.array([r.y for r in self.records])[:, np.newaxis]
return y
|
836735efa2e5c73b93c9dc0874652f6ed457b7f003116d28e812ed1a47a4a7d1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Here are all the test parameters and values for the each
`~astropy.modeling.FittableModel` defined. There is a dictionary for 1D and a
dictionary for 2D models.
Explanation of keywords of the dictionaries:
"parameters" : list or dict
Model parameters, the model is tested with. Make sure you keep the right
order. For polynomials you can also use a dict to specify the
coefficients. See examples below.
"x_values" : list
x values where the model is evaluated.
"y_values" : list
Reference y values for the in x_values given positions.
"z_values" : list
Reference z values for the in x_values and y_values given positions.
(2D model option)
"x_lim" : list
x test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes.
"y_lim" : list
y test range for the model fitter. Depending on the model this can differ
e.g. the PowerLaw model should be tested over a few magnitudes. (2D model
option)
"log_fit" : bool
PowerLaw models should be tested over a few magnitudes. So log_fit should
be true.
"requires_scipy" : bool
If a model requires scipy (Bessel functions etc.) set this flag.
"integral" : float
Approximate value of the integral in the range x_lim (and y_lim).
"deriv_parameters" : list
If given the test of the derivative will use these parameters to create a
model (optional)
"deriv_initial" : list
If given the test of the derivative will use these parameters as initial
values for the fit (optional)
"""
import numpy as np
from astropy.modeling.functional_models import (AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D,
Box1D, Box2D, Const1D, Const2D, Cosine1D, Disk2D,
Exponential1D, Gaussian1D, Gaussian2D,
KingProjectedAnalytic1D, Linear1D, Logarithmic1D,
Lorentz1D, Moffat1D, Moffat2D, Planar2D,
RickerWavelet1D, RickerWavelet2D, Ring2D, Sersic1D,
Sersic2D, Sine1D, Tangent1D, Trapezoid1D,
TrapezoidDisk2D, Voigt1D)
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D,
LogParabola1D, PowerLaw1D, SmoothlyBrokenPowerLaw1D)
# 1D Models
models_1D = {
Gaussian1D: {
'parameters': [1, 0, 1],
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [1.0, 0.367879, 0.367879],
'x_lim': [-10, 10],
'integral': np.sqrt(2 * np.pi),
'bbox_peak': True
},
Sine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 2.5],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
Cosine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 2.5],
'y_values': [1, 0],
'x_lim': [-10, 10],
'integral': 0
},
Tangent1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1.25],
'y_values': [0, 1],
'x_lim': [-10, 10],
'integral': 0
},
ArcSine1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1],
'y_values': [0, 2.5],
'x_lim': [-0.5, 0.5],
'integral': 0
},
ArcCosine1D: {
'parameters': [1, 0.1, 0],
'x_values': [1, 0],
'y_values': [0, 2.5],
'x_lim': [-0.5, 0.5],
'integral': 0
},
ArcTangent1D: {
'parameters': [1, 0.1, 0],
'x_values': [0, 1],
'y_values': [0, 1.25],
'x_lim': [-10, 10],
'integral': 0
},
Box1D: {
'parameters': [1, 0, 10],
'x_values': [-5, 5, 0, -10, 10],
'y_values': [1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'integral': 10,
'bbox_peak': True
},
Linear1D: {
'parameters': [1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [0, np.pi, 42, -1],
'x_lim': [-10, 10],
'integral': 0
},
Lorentz1D: {
'parameters': [1, 0, 1],
'x_values': [0, -1, 1, 0.5, -0.5],
'y_values': [1., 0.2, 0.2, 0.5, 0.5],
'x_lim': [-10, 10],
'integral': 1,
'bbox_peak': True
},
RickerWavelet1D: {
'parameters': [1, 0, 1],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.0, 0.0, -0.088872, -0.088872],
'x_lim': [-20, 20],
'integral': 0,
'bbox_peak': True
},
Trapezoid1D: {
'parameters': [1, 0, 2, 1],
'x_values': [0, 1, -1, 1.5, -1.5, 2, 2],
'y_values': [1, 1, 1, 0.5, 0.5, 0, 0],
'x_lim': [-10, 10],
'integral': 3,
'bbox_peak': True
},
Const1D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'integral': 20
},
Moffat1D: {
'parameters': [1, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [1.0, 0.25, 0.25, 0.01, 0.01],
'x_lim': [-10, 10],
'integral': 1,
'deriv_parameters': [23.4, 1.2, 2.1, 2.3],
'deriv_initial': [10, 1, 1, 1]
},
PowerLaw1D: {
'parameters': [1, 1, 2],
'constraints': {'fixed': {'x_0': True}},
'x_values': [1, 10, 100],
'y_values': [1.0, 0.01, 0.0001],
'x_lim': [1, 10],
'log_fit': True,
'integral': 0.99
},
BrokenPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_break': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [1e2, 1.0, 1e-3, 1e-6],
'x_lim': [0.1, 100],
'log_fit': True
},
SmoothlyBrokenPowerLaw1D: {
'parameters': [1, 1, -2, 2, 0.5],
'constraints': {'fixed': {'x_break': True, 'delta': True}},
'x_values': [0.01, 1, 100],
'y_values': [3.99920012e-04, 1.0, 3.99920012e-04],
'x_lim': [0.01, 100],
'log_fit': True
},
ExponentialCutoffPowerLaw1D: {
'parameters': [1, 1, 2, 3],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [9.67216100e+01, 7.16531311e-01, 3.56739933e-04,
3.33823780e-19],
'x_lim': [0.01, 100],
'log_fit': True
},
LogParabola1D: {
'parameters': [1, 2, 3, 0.1],
'constraints': {'fixed': {'x_0': True}},
'x_values': [0.1, 1, 10, 100],
'y_values': [3.26089063e+03, 7.62472488e+00, 6.17440488e-03,
1.73160572e-06],
'x_lim': [0.1, 100],
'log_fit': True
},
Polynomial1D: {
'parameters': {'degree': 2, 'c0': 1., 'c1': 1., 'c2': 1.},
'x_values': [1, 10, 100],
'y_values': [3, 111, 10101],
'x_lim': [-3, 3]
},
Sersic1D: {
'parameters': [1, 20, 4],
'x_values': [0.1, 1, 10, 100],
'y_values': [2.78629391e+02, 5.69791430e+01, 3.38788244e+00,
2.23941982e-02],
'requires_scipy': True,
'x_lim': [0, 10],
'log_fit': True
},
Voigt1D: {
'parameters': [0, 1, 0.5, 0.9],
'x_values': [0, 0.2, 0.5, 1, 2, 4, 8, 20],
'y_values': [0.52092360, 0.479697445, 0.317550374, 0.0988079347,
1.73876624e-2, 4.00173216e-3, 9.82351731e-4, 1.56396993e-4],
'x_lim': [-3, 3]
},
KingProjectedAnalytic1D: {
'parameters': [1, 1, 2],
'x_values': [0, 0.1, 0.5, 0.8],
'y_values': [0.30557281, 0.30011069, 0.2, 0.1113258],
'x_lim': [0, 10],
'y_lim': [0, 10],
'bbox_peak': True
},
Drude1D: {
'parameters': [1.0, 8.0, 1.0],
'x_values': [7.0, 8.0, 9.0, 10.0],
'y_values': [0.17883212, 1.0, 0.21891892, 0.07163324],
'x_lim': [1.0, 20.0],
'y_lim': [0.0, 10.0],
'bbox_peak': True
},
Plummer1D: {
'parameters': [10., 0.5],
'x_values': [1.0000e-03, 2.5005e+00, 5.0000e+00],
'y_values': [1.90984022e+01, 5.53541843e-03, 1.86293603e-04],
'x_lim': [0.001, 100]
},
Exponential1D: {
'parameters': [1, 1],
'x_values': [0, 0.5, 1],
'y_values': [1, np.sqrt(np.e), np.e],
'x_lim': [0, 2],
'integral': (np.e**2 - 1.),
},
Logarithmic1D: {
'parameters': [1, 1],
'x_values': [1, np.e, np.e**2],
'y_values': [0, 1, 2],
'x_lim': [1, np.e**2],
'integral': (np.e**2 + 1),
}
}
# 2D Models
models_2D = {
Gaussian2D: {
'parameters': [1, 0, 0, 1, 1],
'constraints': {'fixed': {'theta': True}},
'x_values': [0, np.sqrt(2), -np.sqrt(2)],
'y_values': [0, np.sqrt(2), -np.sqrt(2)],
'z_values': [1, 1. / np.exp(1) ** 2, 1. / np.exp(1) ** 2],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 2 * np.pi,
'deriv_parameters': [137., 5.1, 5.4, 1.5, 2., np.pi/4],
'deriv_initial': [10, 5, 5, 4, 4, .5],
'bbox_peak': True
},
Const2D: {
'parameters': [1],
'x_values': [-1, 1, np.pi, -42., 0],
'y_values': [0, 1, 42, np.pi, -1],
'z_values': [1, 1, 1, 1, 1],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 400
},
Box2D: {
'parameters': [1, 0, 0, 10, 10],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 100,
'bbox_peak': True
},
RickerWavelet2D: {
'parameters': [1, 0, 0, 1],
'x_values': [0, 0, 0, 0, 0, 1, -1, 3, -3],
'y_values': [0, 1, -1, 3, -3, 0, 0, 0, 0],
'z_values': [1.0, 0.303265, 0.303265, -0.038881, -0.038881,
0.303265, 0.303265, -0.038881, -0.038881],
'x_lim': [-10, 11],
'y_lim': [-10, 11],
'integral': 0
},
TrapezoidDisk2D: {
'parameters': [1, 0, 0, 1, 1],
'x_values': [0, 0.5, 0, 1.5],
'y_values': [0, 0.5, 1.5, 0],
'z_values': [1, 1, 0.5, 0.5],
'x_lim': [-3, 3],
'y_lim': [-3, 3],
'bbox_peak': True
},
AiryDisk2D: {
'parameters': [7, 0, 0, 10],
'x_values': [0, 1, -1, -0.5, -0.5],
'y_values': [0, -1, 0.5, 0.5, -0.5],
'z_values': [7., 6.50158267, 6.68490643, 6.87251093, 6.87251093],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'requires_scipy': True
},
Moffat2D: {
'parameters': [1, 0, 0, 1, 2],
'x_values': [0, 1, -1, 3, -3],
'y_values': [0, -1, 3, 1, -3],
'z_values': [1.0, 0.111111, 0.008264, 0.008264, 0.00277],
'x_lim': [-3, 3],
'y_lim': [-3, 3]
},
Polynomial2D: {
'parameters': {'degree': 1, 'c0_0': 1., 'c1_0': 1., 'c0_1': 1.},
'x_values': [1, 2, 3],
'y_values': [1, 3, 2],
'z_values': [3, 6, 6],
'x_lim': [1, 100],
'y_lim': [1, 100]
},
Disk2D: {
'parameters': [1, 0, 0, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [0, 0, 1, 1, 1, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * 5 ** 2,
'bbox_peak': True
},
Ring2D: {
'parameters': [1, 0, 0, 5, 5],
'x_values': [-5, 5, -5, 5, 0, -10, 10],
'y_values': [-5, 5, 0, 0, 0, -10, 10],
'z_values': [1, 1, 1, 1, 0, 0, 0],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': np.pi * (10 ** 2 - 5 ** 2),
'bbox_peak': True
},
Sersic2D: {
'parameters': [1, 25, 4, 50, 50, 0.5, -1],
'x_values': [0.0, 1, 10, 100],
'y_values': [1, 100, 0.0, 10],
'z_values': [1.686398e-02, 9.095221e-02, 2.341879e-02, 9.419231e-02],
'requires_scipy': True,
'x_lim': [1, 1e10],
'y_lim': [1, 1e10]
},
Planar2D: {
'parameters': [1, 1, 0],
'x_values': [0, np.pi, 42, -1],
'y_values': [np.pi, 0, -1, 42],
'z_values': [np.pi, np.pi, 41, 41],
'x_lim': [-10, 10],
'y_lim': [-10, 10],
'integral': 0
}
}
|
c960a22be1ea9c197d6925b3634aa1233e578806dbac08c654caf7cefcf48ce0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
import warnings
from importlib.metadata import EntryPoint
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (Fitter, FittingWithOutlierRemoval, JointFitter,
LevMarLSQFitter, LinearLSQFitter, NonFiniteValueError,
SimplexLSQFitter, SLSQPLSQFitter, populate_entry_points)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomail fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_polynomial2D_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
nlfitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
new_model = nlfitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = nlfitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
def test_chebyshev2D_nonlinear_fitting_with_weights(self):
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
nlfitter = LevMarLSQFitter()
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = nlfitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
_ = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
def test_estimated_vs_analytic_deriv(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_estimated_vs_analytic_deriv_with_weights(self):
"""
Runs `LevMarLSQFitter` with estimated and analytic derivatives of a
`Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
efitter = LevMarLSQFitter()
emodel = efitter(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
def test_with_optimize(self):
"""
Tests results from `LevMarLSQFitter` against `scipy.optimize.leastsq`.
"""
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
def test_with_weights(self):
"""
Tests results from `LevMarLSQFitter` with weights.
"""
# part 1: weights are equal to 1
fitter = LevMarLSQFitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
@pytest.mark.parametrize('fitter_class', fitters)
def test_fitter_against_LevMar(self, fitter_class):
"""Tests results from non-linear fitters against `LevMarLSQFitter`."""
levmar = LevMarLSQFitter()
fitter = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter(self.gauss, self.xdata, self.ydata)
model = levmar(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_LSQ_SLSQP_with_constraints(self):
"""
Runs `LevMarLSQFitter` and `SLSQPLSQFitter` on a model with
constraints.
"""
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fitter = LevMarLSQFitter()
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
def test_LevMar_with_weights(self):
"""
Tests that issue #11581 has been solved.
"""
np.random.seed(42)
norder = 2
fitter1 = LevMarLSQFitter()
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx ** 2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter1(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
fitter1(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = c[0] + c[1] * tx + c[2] * (tx ** 2) + c[3] * ty + c[4] * (ty ** 2) + c[5] * tx * ty
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter1(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
def test_param_cov(self):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /
(len(y) - len(beta)))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
fitter = LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def setup_class(self):
self.exception_not_thrown = Exception("The test should not have gotten here. There was no exception thrown")
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
populate_entry_points([mock_entry_importerror])
except AstropyUserWarning as w:
if "ImportError" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_func(self):
"""This returns a function which fails the type check"""
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
populate_entry_points([mock_entry_badfunc])
except AstropyUserWarning as w:
if "Class" in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
populate_entry_points([mock_entry_badclass])
except AstropyUserWarning as w:
if 'modeling.Fitter' in w.args[0]: # any error for this case should have this in it.
pass
else:
raise w
else:
raise self.exception_not_thrown
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
self.y += (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, self.y)
assert_allclose(fitted_model.parameters, self.model_params, atol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, clipping to bounds')
def test_with_fitters_and_sigma_clip(self):
import scipy.stats as stats
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
self.z += (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
# test with Levenberg-Marquardt Least Squares fitter
fit = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Sequential Least Squares Programming fitter
fit = FittingWithOutlierRemoval(SLSQPLSQFitter(), sigma_clip, niter=3,
sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
# test with Simplex LSQ fitter
fit = FittingWithOutlierRemoval(SimplexLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, self.z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3, 3:5, 0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because weights does not propagate"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0, 0] and mask[0, 1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
def test_2d_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LevMarLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_with_weights_with_sigma_clip(self):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LevMarLSQFitter(), sigma_clip,
niter=3, sigma=3.)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit, filtered = fitter(model, self.x, self.y, self.z,
weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x, self.y, self.z,
weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
assert(fit.parameters[0] < 1.0) # outliers didn't pull it out of [-1:1] because they had been removed
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitters_with_weights():
"""Issue #5737 """
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LevMarLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
def test_fitters_interface():
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
levmar = LevMarLSQFitter()
slsqp = SLSQPLSQFitter()
simplex = SimplexLSQFitter()
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
simplex_kwargs = {'maxiter': 77, 'verblevel': 1, 'acc': 1e-6}
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
_ = slsqp(model, x, y, **kwargs)
_ = simplex(model, x, y, **simplex_kwargs)
kwargs.pop('verblevel')
_ = levmar(model, x, y, **kwargs)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter_class', [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) ==\
f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'message': mess
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'num_function_calls': funcalls
}
with mk.patch.object(fitter._opt_method.__class__, 'opt_method',
return_value=return_value):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(mk.MagicMock(), mk.MagicMock(),
mk.MagicMock(), xtol=xtol)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
optimization()
assert str(err.value) ==\
"Subclasses should implement this method"
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10., scale=1., size=(2, 25))
y[0, 14] = 100.
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info['niter'] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info['niter'] == 0
@pytest.mark.skipif('not HAS_SCIPY')
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]
def setup_class(self):
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(('single_model', 'model_set'),
list(zip(example_1D_models, example_1D_sets)))
def test_1d_models(self, single_model, model_set):
""" Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
levmar_fitter = LevMarLSQFitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model_levmar = levmar_fitter(single_model, self.x, y)
cov_model_levmar = fit_model_levmar.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) +\
np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in
fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),
fit_1d_set_linlsq.stds[0].stds)
def test_2d_models(self):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
levmar_fitter = LevMarLSQFitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],
model_set_axis=False)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model_levmar = levmar_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_levmar = fit_model_levmar.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model_levmar, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,
self.rand_grid))
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,
z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model_levmar)
assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),
fit_2d_set_linlsq.stds[0].stds)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.006, 0.041" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.038" in captured.out
assert "intercept| 0.203" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00144" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03799" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds['intercept']
@pytest.mark.skipif('not HAS_SCIPY')
def test_non_finite_filter():
"""Regression test filter introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = LevMarLSQFitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(NonFiniteValueError, match=r"Objective function has encountered.*"):
fit(m_init, x, y)
|
75ab4bab3ed4cd5ccdb1e93de53c5581824891dd26dac55b1f05f1e5e20d642b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal, assert_array_less
from astropy import units as u
from astropy.coordinates import Angle
from astropy.modeling import InputParameterError, fitting, models
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
from astropy.stats.funcs import gaussian_sigma_to_fwhm
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian1D():
model = models.Gaussian1D(4.2, 1.7, stddev=5.1)
x = np.mgrid[0:5]
g = model(x)
g_ref = [3.97302977, 4.16062403, 4.19273985, 4.06574509, 3.79389376]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose(model.fwhm, 12.009582229657841)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
# Test bad cov_matrix shape
cov_matrix = [[49., 3.14, -16.],
[3.14, -16., 9.],
[-16, 27, 3.14]]
with pytest.raises(ValueError) as err:
models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
assert str(err.value) == \
"Covariance matrix must be 2x2"
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
def test_Gaussian2D_theta_bbox():
y, x = np.mgrid[0:51, 0:51]
theta = Angle(90, 'deg')
model1 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta)
theta = theta.to('radian').value
model2 = models.Gaussian2D(1, 25, 25, 15, 5, theta=theta)
assert model1.bounding_box == model2.bounding_box
@pytest.mark.parametrize('gamma', (10, -10))
def test_moffat_fwhm(gamma):
ans = 34.641016151377542
kwargs = {'gamma': gamma, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
assert_array_less(0, [m1.fwhm, m2.fwhm])
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_RedshiftScaleFactor_inverse():
m = models.RedshiftScaleFactor(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_RedshiftScaleFactor_inverse_bounding_box():
model = models.RedshiftScaleFactor(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (3, 15)
assert_allclose(inverse_model(model(4, with_bounding_box=True), with_bounding_box=True), 4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_RedshiftScaleFactor_model_levmar_fit():
"""Test fitting RedshiftScaleFactor model with LevMarLSQFitter."""
init_model = models.RedshiftScaleFactor()
x = np.arange(10)
y = 2.7174 * x
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [1.7174])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Scale_inverse_bounding_box():
model = models.Scale(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse_bounding_box():
model = models.Multiply(2)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (2, 10)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse_bounding_box():
model = models.Shift(10)
model.bounding_box = (1, 5)
assert model.bounding_box == (1, 5)
inverse_model = model.inverse
assert inverse_model.bounding_box == (11, 15)
assert inverse_model(model(4, with_bounding_box=True), with_bounding_box=True) == 4.0
@pytest.mark.skipif('not HAS_SCIPY')
def test_Shift_model_levmar_fit():
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
init_model = models.Shift()
x = np.arange(10)
y = x + 0.1
fitter = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_evaluate_without_units(Model):
m = Model(factor=4*u.m)
kwargs = {'x': 3*u.m, 'y': 7*u.m}
mnu = m.without_units_for_data(**kwargs)
x = np.linspace(-1, 1, 100)
assert_allclose(mnu(x), 4*x)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
# Test with none of r_in, r_out, width specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 1
# Test with r_in specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 4
assert m.width.value == 1
# Test with r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 6
# Error when r_out is too small for default r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=0.5)
assert str(err.value) == "r_in=1 and width=-0.5 must both be >=0"
# Test with width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, width=11)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 1
assert m.width.value == 11
# Test with r_in and r_out specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 3
# Error when r_out is smaller than r_in
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, r_in=4)
assert str(err.value) == "r_in=4 and width=-3 must both be >=0"
# Test with r_in and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, width=4)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 2
assert m.width.value == 4
# Test with r_out and width specified only
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=12, width=7)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 5
assert m.width.value == 7
# Error when width is larger than r_out
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_out=1, width=4)
assert str(err.value) == "r_in=-3 and width=4 must both be >=0"
# Test with r_in, r_out, and width all specified
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=8)
assert m.amplitude.value == 1
assert m.x_0.value == 1
assert m.y_0.value == 1
assert m.r_in.value == 3
assert m.width.value == 8
# error when specifying all
with pytest.raises(InputParameterError) as err:
models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=3, r_out=11, width=7)
assert str(err.value) == "Width must be r_out - r_in"
@pytest.mark.skipif("not HAS_SCIPY")
def test_Voigt1D():
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
fitter = fitting.LevMarLSQFitter()
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
# Invalid method
with pytest.raises(ValueError) as err:
models.Voigt1D(method='test')
assert str(err.value) ==\
"Not a valid method for Voigt1D Faddeeva function: test."
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('algorithm', ('humlicek2', 'wofz'))
def test_Voigt1D_norm(algorithm):
"""Test integral of normalized Voigt profile."""
from scipy.integrate import quad
voi = models.Voigt1D(amplitude_L=1.0/np.pi, x_0=0.0, fwhm_L=2.0, fwhm_G=1.5, method=algorithm)
if algorithm == 'wofz':
atol = 1e-14
else:
atol = 1e-8
assert_allclose(quad(voi, -np.inf, np.inf)[0], 1.0, atol=atol)
@pytest.mark.skipif("not HAS_SCIPY")
@pytest.mark.parametrize('doppler', (1.e-3, 1.e-2, 0.1, 0.5, 1.0, 2.5, 5.0, 10))
def test_Voigt1D_hum2(doppler):
"""Verify accuracy of Voigt profile in Humlicek approximation to Faddeeva.cc (SciPy)."""
x = np.linspace(-20, 20, 400001)
voi_w = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='wofz')
vf_w = voi_w(x)
dvda_w = voi_w.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
voi_h = models.Voigt1D(amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler, method='humlicek2')
vf_h = voi_h(x)
dvda_h = voi_h.fit_deriv(x, x_0=0, amplitude_L=2.0/np.pi, fwhm_L=1.0, fwhm_G=doppler)
assert_allclose(vf_h, vf_w, rtol=1e-7 * (2 + 1 / np.sqrt(doppler)))
assert_allclose(dvda_h, dvda_w, rtol=1e-9, atol=1e-7 * (1 + 30 / doppler))
@pytest.mark.skipif("not HAS_SCIPY")
def test_KingProjectedAnalytic1D_fit():
km = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=2)
xarr = np.linspace(0.1, 2, 10)
yarr = km(xarr)
km_init = models.KingProjectedAnalytic1D(amplitude=1, r_core=1, r_tide=1)
fitter = fitting.LevMarLSQFitter()
km_fit = fitter(km_init, xarr, yarr)
assert_allclose(km_fit.param_sets, km.param_sets)
assert_allclose(km_fit.concentration, 0.30102999566398136)
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic1D_fit(model):
xarr = np.linspace(0.1, 10., 200)
assert_allclose(xarr, model.inverse(model(xarr)))
@pytest.mark.parametrize('model', [models.Exponential1D(), models.Logarithmic1D()])
def test_ExponentialAndLogarithmic_set_tau(model):
message = "0 is not an allowed value for tau"
with pytest.raises(ValueError) as err:
model.tau = 0
assert str(err.value) == message
def test_Linear1D_inverse():
model = models.Linear1D(slope=4, intercept=-12)
inverse = model.inverse
assert inverse.slope == 1/4
assert inverse.intercept == 3
@pytest.mark.parametrize('trig', [(models.Sine1D, [-0.25, 0.25]),
(models.ArcSine1D, [-0.25, 0.25]),
(models.Cosine1D, [0, 0.5]),
(models.ArcCosine1D, [0, 0.5]),
(models.Tangent1D, [-0.25, 0.25]),
(models.ArcTangent1D, [-0.25, 0.25])])
def test_trig_inverse(trig):
mdl = trig[0]()
lower, upper = trig[1]
x = np.arange(lower, upper, 0.01)
assert_allclose(mdl.inverse(mdl(x)), x, atol=1e-10)
assert_allclose(mdl(mdl.inverse(x)), x, atol=1e-10)
|
f68c3a3120d1dc623de969315450b70ef70d8cc2ebcb0e10554ba0286c2339e4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
from inspect import Parameter
import numpy as np
import pytest
from astropy.modeling.utils import (_SpecialOperatorsDict, _validate_domain_window,
get_inputs_and_params, poly_map_domain)
def test_poly_map_domain():
oldx = np.array([1, 2, 3, 4])
# test shift/scale
assert (poly_map_domain(oldx, (-4, 4), (-3, 3)) == [0.75, 1.5, 2.25, 3]).all()
# errors
MESSAGE = 'Expected "domain" and "window" to be a tuple of size 2.'
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4,), (-3, 3))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4, -4), (-3, 3))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4), (-3,))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
poly_map_domain(oldx, (-4, 4), (-3, 3, -3))
assert str(err.value) == MESSAGE
def test__validate_domain_window():
# Test if None
assert _validate_domain_window(None) is None
# Test normal
assert _validate_domain_window((-2, 2)) == (-2, 2)
assert _validate_domain_window([-2, 2]) == (-2, 2)
assert _validate_domain_window(np.array([-2, 2])) == (-2, 2)
# Test error
MESSAGE = 'domain and window should be tuples of size 2.'
with pytest.raises(ValueError) as err:
_validate_domain_window((-2, 2, -2))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window((-2,))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window([-2])
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window(np.array([-2]))
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
_validate_domain_window(-2)
assert str(err.value) == MESSAGE
def test_get_inputs_and_params():
# test normal
def func1(input0, input1, param0=5, param1=7):
pass
inputs, params = get_inputs_and_params(func1)
for index, _input in enumerate(inputs):
assert isinstance(_input, Parameter)
assert _input.name == f"input{index}"
assert _input.kind == _input.POSITIONAL_OR_KEYWORD
assert _input.default == Parameter.empty
default = [5, 7]
for index, param in enumerate(params):
assert isinstance(param, Parameter)
assert param.name == f"param{index}"
assert param.kind == param.POSITIONAL_OR_KEYWORD
assert param.default == default[index]
# Error
MESSAGE = "Signature must not have *args or **kwargs"
def func2(input0, input1, *args, param0=5, param1=7):
pass
def func3(input0, input1, param0=5, param1=7, **kwargs):
pass
with pytest.raises(ValueError) as err:
get_inputs_and_params(func2)
assert str(err.value) == MESSAGE
with pytest.raises(ValueError) as err:
get_inputs_and_params(func3)
assert str(err.value) == MESSAGE
class Test_SpecialOperatorsDict:
def setup(self):
self.key = 'test'
self.val = 'value'
def test__set_value(self):
special_operators = _SpecialOperatorsDict()
assert self.key not in special_operators
special_operators._set_value(self.key, self.val)
assert self.key in special_operators
assert special_operators[self.key] == self.val
with pytest.raises(ValueError, match='Special operator "test" already exists'):
special_operators._set_value(self.key, self.val)
def test___setitem__(self):
special_operators = _SpecialOperatorsDict()
assert self.key not in special_operators
with pytest.deprecated_call():
special_operators[self.key] = self.val
assert self.key in special_operators
assert special_operators[self.key] == self.val
def test__SpecialOperatorsDict__get_unique_id(self):
special_operators = _SpecialOperatorsDict()
assert special_operators._unique_id == 0
assert special_operators._get_unique_id() == 1
assert special_operators._unique_id == 1
assert special_operators._get_unique_id() == 2
assert special_operators._unique_id == 2
assert special_operators._get_unique_id() == 3
assert special_operators._unique_id == 3
def test__SpecialOperatorsDict_add(self):
special_operators = _SpecialOperatorsDict()
operator_name = 'test'
operator = 'operator'
key0 = special_operators.add(operator_name, operator)
assert key0 == (operator_name, special_operators._unique_id)
assert key0 in special_operators
assert special_operators[key0] == operator
key1 = special_operators.add(operator_name, operator)
assert key1 == (operator_name, special_operators._unique_id)
assert key1 in special_operators
assert special_operators[key1] == operator
assert key0 != key1
|
aeed1c74245409a578dddec218e692db93a09a65701af41c87e3d0d7710821e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst:
"""
Tests for model evaluation.
Compare the results of some models with other programs.
"""
import unittest.mock as mk
import numpy as np
# pylint: disable=invalid-name, no-member
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy.modeling.tabular as tabular_models
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import FittableModel, Model, _ModelMeta
from astropy.modeling.models import Gaussian2D
from astropy.modeling.parameters import InputParameterError, Parameter
from astropy.modeling.polynomial import PolynomialBase
from astropy.modeling.powerlaws import (BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D,
LogParabola1D, PowerLaw1D, SmoothlyBrokenPowerLaw1D)
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from .example_models import models_1D, models_2D
@pytest.mark.skipif('not HAS_SCIPY')
def test_custom_model(amplitude=4, frequency=1):
def sine_model(x, amplitude=4, frequency=1):
"""
Model function
"""
return amplitude * np.sin(2 * np.pi * frequency * x)
def sine_deriv(x, amplitude=4, frequency=1):
"""
Jacobian of model function, e.g. derivative of the function with
respect to the *parameters*
"""
da = np.sin(2 * np.pi * frequency * x)
df = 2 * np.pi * x * amplitude * np.cos(2 * np.pi * frequency * x)
return np.vstack((da, df))
SineModel = models.custom_model(sine_model, fit_deriv=sine_deriv)
x = np.linspace(0, 4, 50)
sin_model = SineModel()
sin_model.evaluate(x, 5., 2.)
sin_model.fit_deriv(x, 5., 2.)
np.random.seed(0)
data = sin_model(x) + np.random.rand(len(x)) - 0.5
fitter = fitting.LevMarLSQFitter()
model = fitter(sin_model, x, data)
assert np.all((np.array([model.amplitude.value, model.frequency.value]) -
np.array([amplitude, frequency])) < 0.001)
def test_custom_model_init():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel(amplitude=2., frequency=0.5)
assert sin_model.amplitude == 2.
assert sin_model.frequency == 0.5
def test_custom_model_defaults():
@models.custom_model
def SineModel(x, amplitude=4, frequency=1):
"""Model function"""
return amplitude * np.sin(2 * np.pi * frequency * x)
sin_model = SineModel()
assert SineModel.amplitude.default == 4
assert SineModel.frequency.default == 1
assert sin_model.amplitude == 4
assert sin_model.frequency == 1
def test_inconsistent_input_shapes():
g = Gaussian2D()
x = np.arange(-1., 1, .2)
y = x.copy()
# check scalar input broadcasting works
assert np.abs(g(x, 0) - g(x, 0 * x)).sum() == 0
# but not array broadcasting
x.shape = (10, 1)
y.shape = (1, 10)
result = g(x, y)
assert result.shape == (10, 10)
def test_custom_model_bounding_box():
"""Test bounding box evaluation for a 3D model"""
def ellipsoid(x, y, z, x0=13, y0=10, z0=8, a=4, b=3, c=2, amp=1):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(models.custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
bbox = model.bounding_box
zlim, ylim, xlim = bbox.bounding_box()
dz, dy, dx = np.diff(bbox) / 2
z1, y1, x1 = np.mgrid[slice(zlim[0], zlim[1] + 1),
slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
z2, y2, x2 = np.mgrid[slice(zlim[0] - dz, zlim[1] + dz + 1),
slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2, z2, with_bounding_box=True)
sub_arr = model(x1, y1, z1, with_bounding_box=True)
# check for flux agreement
assert abs(np.nansum(arr) - np.nansum(sub_arr)) < np.nansum(arr) * 1e-7
class Fittable2DModelTester:
"""
Test class for all two dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.1
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
def test_input2D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x, self.y)
model(self.x1, self.y1)
model(self.x2, self.y2)
def test_eval2D(self, model_class, test_parameters):
"""Test model values add certain given points"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
z = test_parameters['z_values']
assert np.all(np.abs(model(x, y) - z) < self.eval_error)
def test_bounding_box2D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = ((-5, 5), (-5, 5))
assert model.bounding_box == ((-5, 5), (-5, 5))
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
# test the exception of dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = (-5, 5)
del model.bounding_box
try:
bbox = model.bounding_box
except NotImplementedError:
return
ddx = 0.01
ylim, xlim = bbox
x1 = np.arange(xlim[0], xlim[1], ddx)
y1 = np.arange(ylim[0], ylim[1], ddx)
x2 = np.concatenate(([xlim[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[xlim[1] + idx * ddx for idx in range(1, 10)]))
y2 = np.concatenate(([ylim[0] - idx * ddx for idx in range(10, 0, -1)],
y1,
[ylim[1] + idx * ddx for idx in range(1, 10)]))
inside_bbox = model(x1, y1)
outside_bbox = model(x2, y2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box2D_peak(self, model_class, test_parameters):
if not test_parameters.pop('bbox_peak', False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
ylim, xlim = bbox
dy, dx = np.diff(bbox)/2
y1, x1 = np.mgrid[slice(ylim[0], ylim[1] + 1),
slice(xlim[0], xlim[1] + 1)]
y2, x2 = np.mgrid[slice(ylim[0] - dy, ylim[1] + dy + 1),
slice(xlim[0] - dx, xlim[1] + dx + 1)]
arr = model(x2, y2)
sub_arr = model(x1, y1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * 1e-7
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter2D(self, model_class, test_parameters):
"""Test if the parametric model works with the fitter."""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.N)
xv, yv = np.meshgrid(x, y)
np.random.seed(0)
# add 10% noise to the amplitude
noise = np.random.rand(self.N, self.N) - 0.5
data = model(xv, yv) + 0.1 * parameters[0] * noise
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, xv, yv, data)
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected,
atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
def test_deriv_2D(self, model_class, test_parameters):
"""
Test the derivative of a model by fitting with an estimated and
analytical derivative.
"""
x_lim = test_parameters['x_lim']
y_lim = test_parameters['y_lim']
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
y = np.logspace(y_lim[0], y_lim[1], self.M)
x_test = np.logspace(x_lim[0], x_lim[1], self.N*10)
y_test = np.logspace(y_lim[0], y_lim[1], self.M*10)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
y = np.linspace(y_lim[0], y_lim[1], self.M)
x_test = np.linspace(x_lim[0], x_lim[1], self.N*10)
y_test = np.linspace(y_lim[0], y_lim[1], self.M*10)
xv, yv = np.meshgrid(x, y)
xv_test, yv_test = np.meshgrid(x_test, y_test)
try:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
model = create_model(model_class, test_parameters,
use_constraints=False,
parameter_key='deriv_initial')
except KeyError:
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model = create_model(model_class, test_parameters,
use_constraints=False)
# add 10% noise to the amplitude
rsn = np.random.default_rng(0)
amplitude = test_parameters['parameters'][0]
n = 0.1 * amplitude * (rsn.random((self.M, self.N)) - 0.5)
data = model(xv, yv) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, xv, yv,
data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, xv, yv, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv(xv_test, yv_test),
new_model_no_deriv(xv_test, yv_test),
rtol=1e-2)
if model_class != Gaussian2D:
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters,
rtol=0.1)
class Fittable1DModelTester:
"""
Test class for all one dimensional parametric models.
Test values have to be defined in example_models.py. It currently test the
model with different input types, evaluates the model at different
positions and assures that it gives the correct values. And tests if the
model works with non-linear fitters.
This can be used as a base class for user defined model testing.
"""
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
_non_finite_models = [
BrokenPowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D,
PowerLaw1D,
SmoothlyBrokenPowerLaw1D
]
def setup_class(self):
self.N = 100
self.M = 100
self.eval_error = 0.0001
self.fit_error = 0.11
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_input1D(self, model_class, test_parameters):
"""Test model with different input types."""
model = create_model(model_class, test_parameters)
model(self.x)
model(self.x1)
model(self.x2)
def test_eval1D(self, model_class, test_parameters):
"""
Test model values at certain given points
"""
model = create_model(model_class, test_parameters)
x = test_parameters['x_values']
y = test_parameters['y_values']
assert_allclose(model(x), y, atol=self.eval_error)
def test_bounding_box1D(self, model_class, test_parameters):
"""Test bounding box evaluation"""
model = create_model(model_class, test_parameters)
# testing setter
model.bounding_box = (-5, 5)
model.bounding_box = None
with pytest.raises(NotImplementedError):
model.bounding_box
del model.bounding_box
# test exception if dimensions don't match
with pytest.raises(ValueError):
model.bounding_box = 5
try:
bbox = model.bounding_box.bounding_box()
except NotImplementedError:
return
ddx = 0.01
x1 = np.arange(bbox[0], bbox[1], ddx)
x2 = np.concatenate(([bbox[0] - idx * ddx for idx in range(10, 0, -1)],
x1,
[bbox[1] + idx * ddx for idx in range(1, 10)]))
inside_bbox = model(x1)
outside_bbox = model(x2, with_bounding_box=True)
outside_bbox = outside_bbox[~np.isnan(outside_bbox)]
assert np.all(inside_bbox == outside_bbox)
def test_bounding_box1D_peak(self, model_class, test_parameters):
if not test_parameters.pop('bbox_peak', False):
return
model = create_model(model_class, test_parameters)
bbox = model.bounding_box
if isinstance(model, models.Lorentz1D) or isinstance(model, models.Drude1D):
rtol = 0.01 # 1% agreement is enough due to very extended wings
ddx = 0.1 # Finer sampling to "integrate" flux for narrow peak
else:
rtol = 1e-7
ddx = 1
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
dx = np.diff(bbox) / 2
x1 = np.mgrid[slice(bbox[0], bbox[1] + 1, ddx)]
x2 = np.mgrid[slice(bbox[0] - dx, bbox[1] + dx + 1, ddx)]
arr = model(x2)
sub_arr = model(x1)
# check for flux agreement
assert abs(arr.sum() - sub_arr.sum()) < arr.sum() * rtol
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitter1D(self, model_class, test_parameters):
"""
Test if the parametric model works with the fitter.
"""
x_lim = test_parameters['x_lim']
parameters = test_parameters['parameters']
model = create_model(model_class, test_parameters)
if isinstance(parameters, dict):
parameters = [parameters[name] for name in model.param_names]
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
np.random.seed(0)
# add 10% noise to the amplitude
relative_noise_amplitude = 0.01
data = ((1 + relative_noise_amplitude * np.random.randn(len(x))) *
model(x))
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, data)
# Only check parameters that were free in the fit
params = [getattr(new_model, name) for name in new_model.param_names]
fixed = [param.fixed for param in params]
expected = np.array([val for val, fixed in zip(parameters, fixed)
if not fixed])
fitted = np.array([param.value for param in params
if not param.fixed])
assert_allclose(fitted, expected, atol=self.fit_error)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
def test_deriv_1D(self, model_class, test_parameters):
"""
Test the derivative of a model by comparing results with an estimated
derivative.
"""
if model_class in self._non_finite_models:
return
x_lim = test_parameters['x_lim']
if model_class.fit_deriv is None or issubclass(model_class, PolynomialBase):
return
if "log_fit" in test_parameters:
if test_parameters['log_fit']:
x = np.logspace(x_lim[0], x_lim[1], self.N)
else:
x = np.linspace(x_lim[0], x_lim[1], self.N)
parameters = test_parameters['parameters']
model_with_deriv = create_model(model_class, test_parameters,
use_constraints=False)
model_no_deriv = create_model(model_class, test_parameters,
use_constraints=False)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
rsn_rand_1234567890 = np.array([
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
n = 0.1 * parameters[0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.15)
def create_model(model_class, test_parameters, use_constraints=True,
parameter_key='parameters'):
"""Create instance of model class."""
constraints = {}
if issubclass(model_class, PolynomialBase):
return model_class(**test_parameters[parameter_key])
elif issubclass(model_class, FittableModel):
if "requires_scipy" in test_parameters and not HAS_SCIPY:
pytest.skip("SciPy not found")
if use_constraints:
if 'constraints' in test_parameters:
constraints = test_parameters['constraints']
return model_class(*test_parameters[parameter_key], **constraints)
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_1D.items(), key=lambda x: str(x[0])))
class TestFittable1DModels(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.parametrize(('model_class', 'test_parameters'),
sorted(models_2D.items(), key=lambda x: str(x[0])))
class TestFittable2DModels(Fittable2DModelTester):
pass
def test_ShiftModel():
# Shift by a scalar
m = models.Shift(42)
assert m(0) == 42
assert_equal(m([1, 2]), [43, 44])
# Shift by a list
m = models.Shift([42, 43], n_models=2)
assert_equal(m(0), [42, 43])
assert_equal(m([1, 2], model_set_axis=False),
[[43, 44], [44, 45]])
def test_ScaleModel():
# Scale by a scalar
m = models.Scale(42)
assert m(0) == 0
assert_equal(m([1, 2]), [42, 84])
# Scale by a list
m = models.Scale([42, 43], n_models=2)
assert_equal(m(0), [0, 0])
assert_equal(m([1, 2], model_set_axis=False),
[[42, 84], [43, 86]])
def test_voigt_model():
"""
Currently just tests that the model peaks at its origin.
Regression test for https://github.com/astropy/astropy/issues/3942
"""
m = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
x = np.arange(0, 10, 0.01)
y = m(x)
assert y[500] == y.max() # y[500] is right at the center
def test_model_instance_repr():
m = models.Gaussian1D(1.5, 2.5, 3.5)
assert repr(m) == '<Gaussian1D(amplitude=1.5, mean=2.5, stddev=3.5)>'
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_1d():
"""
Test Tabular1D model.
"""
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
LookupTable = models.tabular_model(1)
model = LookupTable(points=points, lookup_table=values)
xnew = [0., .7, 1.4, 2.1, 3.9]
ans1 = [1., 7.3, 6.8, 6.3, 1.8]
assert_allclose(model(xnew), ans1)
# Test evaluate without passing `points`.
model = LookupTable(lookup_table=values)
assert_allclose(model(xnew), ans1)
# Test bounds error.
xextrap = [0., .7, 1.4, 2.1, 3.9, 4.1]
with pytest.raises(ValueError):
model(xextrap)
# test extrapolation and fill value
model = LookupTable(lookup_table=values, bounds_error=False,
fill_value=None)
assert_allclose(model(xextrap),
[1., 7.3, 6.8, 6.3, 1.8, -7.8])
# Test unit support
xnew = xnew * u.nm
ans1 = ans1 * u.nJy
model = LookupTable(points=points*u.nm, lookup_table=values*u.nJy)
assert_quantity_allclose(model(xnew), ans1)
assert_quantity_allclose(model(xnew.to(u.nm)), ans1)
assert model.bounding_box == (0 * u.nm, 4 * u.nm)
# Test fill value unit conversion and unitless input on table with unit
model = LookupTable([1, 2, 3], [10, 20, 30] * u.nJy, bounds_error=False,
fill_value=1e-33*(u.W / (u.m * u.m * u.Hz)))
assert_quantity_allclose(model(np.arange(5)),
[100, 10, 20, 30, 100] * u.nJy)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_interp_2d():
table = np.array([
[-0.04614432, -0.02512547, -0.00619557, 0.0144165, 0.0297525],
[-0.04510594, -0.03183369, -0.01118008, 0.01201388, 0.02496205],
[-0.05464094, -0.02804499, -0.00960086, 0.01134333, 0.02284104],
[-0.04879338, -0.02539565, -0.00440462, 0.01795145, 0.02122417],
[-0.03637372, -0.01630025, -0.00157902, 0.01649774, 0.01952131]])
points = np.arange(0, 5)
points = (points, points)
xnew = np.array([0., .7, 1.4, 2.1, 3.9])
LookupTable = models.tabular_model(2)
model = LookupTable(points, table)
znew = model(xnew, xnew)
result = np.array(
[-0.04614432, -0.03450009, -0.02241028, -0.0069727, 0.01938675])
assert_allclose(znew, result, atol=1e-7)
# test 2D arrays as input
a = np.arange(12).reshape((3, 4))
y, x = np.mgrid[:3, :4]
t = models.Tabular2D(lookup_table=a)
r = t(y, x)
assert_allclose(a, r)
with pytest.raises(ValueError):
model = LookupTable(points=([1.2, 2.3], [1.2, 6.7], [3, 4]))
with pytest.raises(ValueError):
model = LookupTable(lookup_table=[1, 2, 3])
with pytest.raises(NotImplementedError):
model = LookupTable(n_models=2)
with pytest.raises(ValueError):
model = LookupTable(([1, 2], [3, 4]), [5, 6])
with pytest.raises(ValueError):
model = LookupTable(([1, 2] * u.m, [3, 4]), [[5, 6], [7, 8]])
with pytest.raises(ValueError):
model = LookupTable(points, table, bounds_error=False,
fill_value=1*u.Jy)
# Test unit support
points = points[0] * u.nm
points = (points, points)
xnew = xnew * u.nm
model = LookupTable(points, table * u.nJy)
result = result * u.nJy
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
xnew = xnew.to(u.m)
assert_quantity_allclose(model(xnew, xnew), result, atol=1e-7*u.nJy)
bbox = (0 * u.nm, 4 * u.nm)
bbox = (bbox, bbox)
assert model.bounding_box == bbox
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_nd():
a = np.arange(24).reshape((2, 3, 4))
x, y, z = np.mgrid[:2, :3, :4]
tab = models.tabular_model(3)
t = tab(lookup_table=a)
result = t(x, y, z)
assert_allclose(a, result)
with pytest.raises(ValueError):
models.tabular_model(0)
def test_with_bounding_box():
"""
Test the option to evaluate a model respecting
its bunding_box.
"""
p = models.Polynomial2D(2) & models.Polynomial2D(2)
m = models.Mapping((0, 1, 0, 1)) | p
with NumpyRNGContext(1234567):
m.parameters = np.random.rand(12)
m.bounding_box = ((3, 9), (1, 8))
x, y = np.mgrid[:10, :10]
a, b = m(x, y)
aw, bw = m(x, y, with_bounding_box=True)
ind = (~np.isnan(aw)).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
aw, bw = m(x, y, with_bounding_box=True, fill_value=1000)
ind = (aw != 1000).nonzero()
assert_allclose(a[ind], aw[ind])
assert_allclose(b[ind], bw[ind])
# test the order of bbox is not reversed for 1D models
p = models.Polynomial1D(1, c0=12, c1=2.3)
p.bounding_box = (0, 5)
assert(p(1) == p(1, with_bounding_box=True))
t3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
t3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(t3([1, 1], [7, 7], [3, 5], with_bounding_box=True),
[[np.nan, 11], [np.nan, 14], [np.nan, 4]])
trans3 = models.Shift(10) & models.Scale(2) & models.Shift(-1)
trans3.bounding_box = ((4.3, 6.9), (6, 15), (-1, 10))
assert_allclose(trans3(1, 7, 5, with_bounding_box=True), [11, 14, 4])
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_with_bounding_box():
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t(1, with_bounding_box=True)
assert result == 3.4
assert t.inverse(result, with_bounding_box=True) == 1.
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_bounding_box_with_units():
points = np.arange(5)*u.pix
lt = np.arange(5)*u.AA
t = models.Tabular1D(points, lt)
result = t(1*u.pix, with_bounding_box=True)
assert result == 1.*u.AA
assert t.inverse(result, with_bounding_box=True) == 1*u.pix
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular1d_inverse():
"""Test that the Tabular1D inverse is defined"""
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
result = t.inverse((3.4, 6.7))
assert_allclose(result, np.array((1., 2.)))
# Check that it works for descending values in lookup_table
t2 = models.Tabular1D(points, values[::-1])
assert_allclose(t2.inverse.points[0], t2.lookup_table[::-1])
result2 = t2.inverse((7, 6.7))
assert_allclose(result2, np.array((1., 2.)))
# Check that it errors on double-valued lookup_table
points = np.arange(5)
values = np.array([1.5, 3.4, 3.4, 32, 25])
t = models.Tabular1D(points, values)
with pytest.raises(NotImplementedError):
t.inverse((3.4, 7.))
# Check that Tabular2D.inverse raises an error
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t3 = models.Tabular2D(points=points, lookup_table=table)
with pytest.raises(NotImplementedError):
t3.inverse((3, 3))
# Check that it uses the same kwargs as the original model
points = np.arange(5)
values = np.array([1.5, 3.4, 6.7, 7, 32])
t = models.Tabular1D(points, values)
with pytest.raises(ValueError):
t.inverse(100)
t = models.Tabular1D(points, values, bounds_error=False, fill_value=None)
result = t.inverse(100)
assert_allclose(t(result), 100)
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_grid_shape_mismatch_error():
points = np.arange(5)
lt = np.mgrid[0:5, 0:5][0]
with pytest.raises(ValueError) as err:
models.Tabular2D(points, lt)
assert str(err.value) ==\
"Expected grid points in 2 directions, got 5."
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_repr():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert repr(t) ==\
"<Tabular1D(points=(array([0, 1, 2, 3, 4]),), lookup_table=[0 1 2 3 4])>"
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert repr(t) ==\
"<Tabular2D(points=(array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4])), " +\
"lookup_table=[[ 0 1 2 3 4]\n" +\
" [ 5 6 7 8 9]\n" +\
" [10 11 12 13 14]\n" +\
" [15 16 17 18 19]\n" +\
" [20 21 22 23 24]])>"
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_str():
points = np.arange(5)
lt = np.arange(5)
t = models.Tabular1D(points, lt)
assert str(t) ==\
"Model: Tabular1D\n" +\
"N_inputs: 1\n" +\
"N_outputs: 1\n" +\
"Parameters: \n" +\
" points: (array([0, 1, 2, 3, 4]),)\n" +\
" lookup_table: [0 1 2 3 4]\n" +\
" method: linear\n" +\
" fill_value: nan\n" +\
" bounds_error: True"
table = np.arange(5*5).reshape(5, 5)
points = np.arange(0, 5)
points = (points, points)
t = models.Tabular2D(points=points, lookup_table=table)
assert str(t) ==\
"Model: Tabular2D\n" +\
"N_inputs: 2\n" +\
"N_outputs: 1\n" +\
"Parameters: \n" +\
" points: (array([0, 1, 2, 3, 4]), array([0, 1, 2, 3, 4]))\n" +\
" lookup_table: [[ 0 1 2 3 4]\n" +\
" [ 5 6 7 8 9]\n" +\
" [10 11 12 13 14]\n" +\
" [15 16 17 18 19]\n" +\
" [20 21 22 23 24]]\n" +\
" method: linear\n" +\
" fill_value: nan\n" +\
" bounds_error: True"
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_evaluate():
points = np.arange(5)
lt = np.arange(5)[::-1]
t = models.Tabular1D(points, lt)
assert (t.evaluate([1, 2, 3]) == [3, 2, 1]).all()
assert (t.evaluate(np.array([1, 2, 3]) * u.m) == [3, 2, 1]).all()
t.n_outputs = 2
value = [np.array([3, 2, 1]), np.array([1, 2, 3])]
with mk.patch.object(tabular_models, 'interpn', autospec=True, return_value=value) as mkInterpn:
outputs = t.evaluate([1, 2, 3])
for index, output in enumerate(outputs):
assert np.all(value[index] == output)
assert mkInterpn.call_count == 1
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_module_name():
"""
The module name must be set manually because
these classes are created dynamically.
"""
for model in [models.Tabular1D, models.Tabular2D]:
assert model.__module__ == "astropy.modeling.tabular"
class classmodel(FittableModel):
f = Parameter(default=1)
x = Parameter(default=0)
y = Parameter(default=2)
def __init__(self, f=f.default, x=x.default, y=y.default):
super().__init__(f, x, y)
def evaluate(self):
pass
class subclassmodel(classmodel):
f = Parameter(default=3, fixed=True)
x = Parameter(default=10)
y = Parameter(default=12)
h = Parameter(default=5)
def __init__(self, f=f.default, x=x.default, y=y.default, h=h.default):
super().__init__(f, x, y)
def evaluate(self):
pass
def test_parameter_inheritance():
b = subclassmodel()
assert b.param_names == ('f', 'x', 'y', 'h')
assert b.h == 5
assert b.f == 3
assert b.f.fixed == True # noqa: E712
def test_parameter_description():
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
model = models.Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
assert model.amplitude_L._description == "The Lorentzian amplitude"
assert model.fwhm_L._description == "The Lorentzian full width at half maximum"
assert model.fwhm_G._description == "The Gaussian full width at half maximum"
def test_SmoothlyBrokenPowerLaw1D_validators():
with pytest.raises(InputParameterError) as err:
SmoothlyBrokenPowerLaw1D(amplitude=-1)
assert str(err.value) ==\
"amplitude parameter must be > 0"
with pytest.raises(InputParameterError) as err:
SmoothlyBrokenPowerLaw1D(delta=0)
assert str(err.value) ==\
"delta parameter must be >= 0.001"
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
def test_SmoothlyBrokenPowerLaw1D_fit_deriv():
x_lim = [0.01, 100]
x = np.logspace(x_lim[0], x_lim[1], 100)
parameters = {'parameters': [1, 10, -2, 2, 0.5],
'constraints': {'fixed': {'x_break': True, 'delta': True}}}
model_with_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
use_constraints=False)
model_no_deriv = create_model(SmoothlyBrokenPowerLaw1D, parameters,
use_constraints=False)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
# add 10% noise to the amplitude
rsn_rand_1234567890 = np.array([
0.61879477, 0.59162363, 0.88868359, 0.89165480, 0.45756748,
0.77818808, 0.26706377, 0.99610621, 0.54009489, 0.53752161,
0.40099938, 0.70540579, 0.40518559, 0.94999075, 0.03075388,
0.13602495, 0.08297726, 0.42352224, 0.23449723, 0.74743526,
0.65177865, 0.68998682, 0.16413419, 0.87642114, 0.44733314,
0.57871104, 0.52377835, 0.62689056, 0.34869427, 0.26209748,
0.07498055, 0.17940570, 0.82999425, 0.98759822, 0.11326099,
0.63846415, 0.73056694, 0.88321124, 0.52721004, 0.66487673,
0.74209309, 0.94083846, 0.70123128, 0.29534353, 0.76134369,
0.77593881, 0.36985514, 0.89519067, 0.33082813, 0.86108824,
0.76897859, 0.61343376, 0.43870907, 0.91913538, 0.76958966,
0.51063556, 0.04443249, 0.57463611, 0.31382006, 0.41221713,
0.21531811, 0.03237521, 0.04166386, 0.73109303, 0.74556052,
0.64716325, 0.77575353, 0.64599254, 0.16885816, 0.48485480,
0.53844248, 0.99690349, 0.23657074, 0.04119088, 0.46501519,
0.35739006, 0.23002665, 0.53420791, 0.71639475, 0.81857486,
0.73994342, 0.07948837, 0.75688276, 0.13240193, 0.48465576,
0.20624753, 0.02298276, 0.54257873, 0.68123230, 0.35887468,
0.36296147, 0.67368397, 0.29505730, 0.66558885, 0.93652252,
0.36755130, 0.91787687, 0.75922703, 0.48668067, 0.45967890])
n = 0.1 * parameters['parameters'][0] * (rsn_rand_1234567890 - 0.5)
data = model_with_deriv(x) + n
fitter_with_deriv = fitting.LevMarLSQFitter()
new_model_with_deriv = fitter_with_deriv(model_with_deriv, x, data)
fitter_no_deriv = fitting.LevMarLSQFitter()
new_model_no_deriv = fitter_no_deriv(model_no_deriv, x, data,
estimate_jacobian=True)
assert_allclose(new_model_with_deriv.parameters,
new_model_no_deriv.parameters, atol=0.5)
class _ExtendedModelMeta(_ModelMeta):
@classmethod
def __prepare__(mcls, name, bases, **kwds):
# this shows the parent class machinery still applies
namespace = super().__prepare__(name, bases, **kwds)
# the custom bit
namespace.update(kwds)
return namespace
model = models.Gaussian1D(1.5, 2.5, 3.5)
assert model.amplitude._description == "Amplitude (peak value) of the Gaussian"
assert model.mean._description == "Position of peak (Gaussian)"
def test_metaclass_kwargs():
"""Test can pass kwargs to Models"""
class ClassModel(FittableModel, flag="flag"):
def evaluate(self):
pass
# Nothing further to test, just making the class is good enough.
def test_submetaclass_kwargs():
"""Test can pass kwargs to Model subclasses."""
class ClassModel(FittableModel, metaclass=_ExtendedModelMeta, flag="flag"):
def evaluate(self):
pass
assert ClassModel.flag == "flag"
class ModelDefault(Model):
slope = Parameter()
intercept = Parameter()
_separable = False
@staticmethod
def evaluate(x, slope, intercept):
return slope * x + intercept
class ModelCustom(ModelDefault):
def _calculate_separability_matrix(self):
return np.array([[0, ]])
def test_custom_separability_matrix():
original = separability_matrix(ModelDefault(slope=1, intercept=2))
assert original.all()
custom = separability_matrix(ModelCustom(slope=1, intercept=2))
assert not custom.any()
|
1416b4978fcbbcd7eec8d4c40f3946568177945cbe2770c50ab73a66f83f1457 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests model set evaluation and fitting for some common use cases.
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import Model
from astropy.modeling.fitting import FittingWithOutlierRemoval, LinearLSQFitter
from astropy.modeling.models import (Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D, Legendre1D,
Legendre2D, Linear1D, Planar2D, Polynomial1D, Polynomial2D)
from astropy.modeling.parameters import Parameter
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
x = np.arange(4)
xx = np.array([x, x + 10])
xxx = np.arange(24).reshape((3, 4, 2))
_RANDOM_SEED = 0x1337
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
# standard_broadasting = False
n_inputs = 1
outputs = ('x',)
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(x, coeff, e):
return x*coeff + e
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_1(model_class):
"""
Test that a model initialized with model_set_axis=1
can be evaluated with model_set_axis=False.
"""
n_models = 2
model_axis = 1
c0 = [[2, 3]]
c1 = [[1, 2]]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
p1 = model_class(1, c0=c0, c1=c1, n_models=n_models, model_set_axis=model_axis)
with pytest.raises(ValueError):
p1(x)
y = p1(x, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0], t1(x))
assert_allclose(y[:, 1], t2(x))
y = p1(xx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :], t1(xx))
assert_allclose(y[:, 1, :], t2(xx))
y = p1(xxx, model_set_axis=False)
assert y.shape[model_axis] == n_models
assert_allclose(y[:, 0, :, :], t1(xxx))
assert_allclose(y[:, 1, :, :], t2(xxx))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, c0=[[[1, 2, 3]]], c1=[[[10, 20, 30]]],
n_models=3, model_set_axis=2)
t1 = model_class(1, c0=1, c1=10)
t2 = model_class(1, c0=2, c1=20)
t3 = model_class(1, c0=3, c1=30)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
y = p1(x, model_set_axis=False)
assert y.shape == (1, 4, 3)
assert_allclose(y[:, :, 0].flatten(), t1(x))
assert_allclose(y[:, :, 1].flatten(), t2(x))
assert_allclose(y[:, :, 2].flatten(), t3(x))
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_model1d_axis_0(model_class):
"""
Test that a model initialized with model_set_axis=0
can be evaluated with model_set_axis=False.
"""
p1 = model_class(1, n_models=2, model_set_axis=0)
p1.c0 = [2, 3]
p1.c1 = [1, 2]
t1 = model_class(1, c0=2, c1=1)
t2 = model_class(1, c0=3, c1=2)
with pytest.raises(ValueError):
p1(x)
y = p1(xx)
assert len(y) == 2
assert_allclose(y[0], t1(xx[0]))
assert_allclose(y[1], t2(xx[1]))
y = p1(x, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(x))
assert_allclose(y[1], t2(x))
y = p1(xx, model_set_axis=False)
assert len(y) == 2
assert_allclose(y[0], t1(xx))
assert_allclose(y[1], t2(xx))
y = p1(xxx, model_set_axis=False)
assert_allclose(y[0], t1(xxx))
assert_allclose(y[1], t2(xxx))
assert len(y) == 2
@pytest.mark.parametrize('model_class', [Chebyshev2D, Legendre2D, Hermite2D])
def test_model2d_axis_2(model_class):
"""
Test that a model initialized with model_set_axis=2
can be evaluated with model_set_axis=False.
"""
p2 = model_class(1, 1, c0_0=[[[0, 1, 2]]], c0_1=[[[3, 4, 5]]],
c1_0=[[[5, 6, 7]]], c1_1=[[[1,1,1]]], n_models=3, model_set_axis=2)
t1 = model_class(1, 1, c0_0=0, c0_1=3, c1_0=5, c1_1=1)
t2 = model_class(1, 1, c0_0=1, c0_1=4, c1_0=6, c1_1=1)
t3 = model_class(1, 1, c0_0=2, c0_1=5, c1_0=7, c1_1=1)
assert p2.c0_0.shape == (1, 1, 3)
y = p2(x, x, model_set_axis=False)
assert y.shape == (1, 4, 3)
# These are columns along the 2nd axis.
assert_allclose(y[:, :, 0].flatten(), t1(x, x))
assert_allclose(y[:, :, 1].flatten(), t2(x, x))
assert_allclose(y[:, :, 2].flatten(), t3(x, x))
def test_negative_axis():
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
t1 = Polynomial1D(1, c0=1, c1=3)
t2 = Polynomial1D(1, c0=2, c1=4)
with pytest.raises(ValueError):
p1(x)
with pytest.raises(ValueError):
p1(xx)
xxt = xx.T
y = p1(xxt)
assert_allclose(y[:, 0], t1(xxt[:, 0]))
assert_allclose(y[:, 1], t2(xxt[:, 1]))
def test_shapes():
p2 = Polynomial1D(1, n_models=3, model_set_axis=2)
assert p2.c0.shape == (1, 1, 3)
assert p2.c1.shape == (1, 1, 3)
p1 = Polynomial1D(1, n_models=2, model_set_axis=1)
assert p1.c0.shape == (1, 2)
assert p1.c1.shape == (1, 2)
p1 = Polynomial1D(1, c0=[1, 2], c1=[3, 4], n_models=2, model_set_axis=-1)
assert p1.c0.shape == (2,)
assert p1.c1.shape == (2,)
e1 = [1, 2]
e2 = [3, 4]
a1 = np.array([[10, 20], [30, 40]])
a2 = np.array([[50, 60], [70, 80]])
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=-1)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([[a1, a2]], [[e1, e2]], n_models=2, model_set_axis=1)
assert t.coeff.shape == (1, 2, 2, 2)
assert t.e.shape == (1, 2, 2)
t = TParModel([a1, a2], [e1, e2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
t = TParModel([a1, a2], e=[1, 2], n_models=2, model_set_axis=0)
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2,)
def test_eval():
""" Tests evaluation of Linear1D and Planar2D with different model_set_axis."""
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
p = Polynomial1D(1, c0=[3, 4], c1=[1, 2], n_models=2)
assert_allclose(model(xx), p(xx))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(x)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
p = Polynomial1D(1, c0=[[3, 4]], c1=[[1, 2]], n_models=2, model_set_axis=1)
assert_allclose(model(xx.T), p(xx.T))
assert_allclose(model(x, model_set_axis=False), p(x, model_set_axis=False))
with pytest.raises(ValueError):
model(xx)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
assert y.shape == (2, 4)
with pytest.raises(ValueError):
model(x)
# Test fitting
@pytest.mark.parametrize('model_class', [Polynomial1D, Chebyshev1D, Legendre1D, Hermite1D])
def test_linearlsqfitter(model_class):
"""
Issue #7159
"""
p = model_class(1, n_models=2, model_set_axis=1)
# Generate data for fitting 2 models and re-stack them along the last axis:
y = np.array([2*x+1, x+4])
y = np.rollaxis(y, 0, -1).T
f = LinearLSQFitter()
# This seems to fit the model_set correctly:
fit = f(p, x, y)
model_y = fit(x, model_set_axis=False)
m1 = model_class(1, c0=fit.c0[0][0], c1=fit.c1[0][0], domain=fit.domain)
m2 = model_class(1, c0=fit.c0[0][1], c1=fit.c1[0][1], domain=fit.domain)
assert_allclose(model_y[:, 0], m1(x))
assert_allclose(model_y[:, 1], m2(x))
p = model_class(1, n_models=2, model_set_axis=0)
fit = f(p, x, y.T)
def test_model_set_axis_outputs():
fitter = LinearLSQFitter()
model_set = Polynomial2D(1, n_models=2, model_set_axis=2)
y2, x2 = np.mgrid[: 5, : 5]
# z = np.moveaxis([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 2)
z = np.rollaxis(np.array([x2 + y2, 1 - 0.1 * x2 + 0.2 * y2]), 0, 3)
model = fitter(model_set, x2, y2, z)
res = model(x2, y2, model_set_axis=False)
assert z.shape == res.shape
# Test initializing with integer model_set_axis
# and evaluating with a different model_set_axis
model_set = Polynomial1D(1, c0=[1, 2], c1=[2, 3],
n_models=2, model_set_axis=0)
y0 = model_set(xx)
y1 = model_set(xx.T, model_set_axis=1)
assert_allclose(y0[0], y1[:, 0])
assert_allclose(y0[1], y1[:, 1])
model_set = Polynomial1D(1, c0=[[1, 2]], c1=[[2, 3]],
n_models=2, model_set_axis=1)
y0 = model_set(xx.T)
y1 = model_set(xx, model_set_axis=0)
assert_allclose(y0[:, 0], y1[0])
assert_allclose(y0[:, 1], y1[1])
with pytest.raises(ValueError):
model_set(x)
def test_fitting_shapes():
""" Test fitting model sets of Linear1D and Planar2D."""
fitter = LinearLSQFitter()
model = Linear1D(slope=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx)
fit_model = fitter(model, x, y)
model = Linear1D(slope=[[1, 2]], intercept=[[3, 4]], n_models=2, model_set_axis=1)
fit_model = fitter(model, x, y.T)
model = Planar2D(slope_x=[1, 2], slope_y=[1, 2], intercept=[3, 4], n_models=2)
y = model(xx, xx)
fit_model = fitter(model, x, x, y)
def test_compound_model_sets():
with pytest.raises(ValueError):
Polynomial1D(1, n_models=2, model_set_axis=1) | Polynomial1D(1, n_models=2, model_set_axis=0)
def test_linear_fit_model_set_errors():
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y = init_model(x, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y)
with pytest.raises(ValueError):
fitter(init_model, x, y[:, :5])
def test_linear_fit_model_set_common_weight():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
weights = np.ones(10)
weights[[0, -1]] = 0
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
# ValueError: On entry to DLASCL parameter number 4 had an illegal value
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x, y, weights=np.zeros(10))
def test_linear_fit_model_set_weights():
"""Tests fitting multiple models simultaneously."""
init_model = Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
weights = np.ones_like(y)
# Put a null weight for the min and max values
weights[[0, 1], weights.argmin(axis=1)] = 0
weights[[0, 1], weights.argmax(axis=1)] = 0
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, weights=weights)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
# Check that using null weights raises an error
weights[0] = 0
with pytest.raises(ValueError,
match='Found NaNs in the coefficient matrix'):
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x, y, weights=weights)
# Now we mask the values where weight is 0
with pytest.warns(RuntimeWarning,
match=r'invalid value encountered in.*divide'):
fitted_model = fitter(init_model, x,
np.ma.array(y, mask=np.isclose(weights, 0)),
weights=weights)
# Parameters for the first model are all NaNs
assert np.all(np.isnan(fitted_model.param_sets[:, 0]))
assert np.all(np.isnan(fitted_model(x, model_set_axis=False)[0]))
# Second model is fitted correctly
assert_allclose(fitted_model(x, model_set_axis=False)[1], y_expected[1],
rtol=1e-1)
def test_linear_fit_2d_model_set_errors():
init_model = Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z = init_model(x, y, model_set_axis=False)
fitter = LinearLSQFitter()
with pytest.raises(ValueError):
fitter(init_model, x[:5], y, z)
with pytest.raises(ValueError):
fitter(init_model, x, y, z[:, :5])
def test_linear_fit_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=np.ones((5, 5)))
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_common_weight():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones(25)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
weights = [np.ones((5, 5)), np.ones((5, 5))]
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_flat_2d_model_set_weights():
init_model = Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
x, y = x.flatten(), y.flatten()
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
weights = np.ones((2, 25))
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz, weights=weights)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
class Test1ModelSet:
"""
Check that fitting a single model works with a length-1 model set axis.
It's not clear that this was originally intended usage, but it can be
convenient, eg. when fitting a range of image rows that may be a single
row, and some existing scripts might rely on it working.
Currently this does not work with FittingWithOutlierRemoval.
"""
def setup_class(self):
self.x1 = np.arange(0, 10)
self.y1 = np.array([0.5 + 2.5*self.x1])
self.w1 = np.ones((10,))
self.y1[0, 8] = 100.
self.w1[8] = 0.
self.y2, self.x2 = np.mgrid[0:10, 0:10]
self.z2 = np.array([1 - 0.1*self.x2 + 0.2*self.y2])
self.w2 = np.ones((10, 10))
self.z2[0, 1, 2] = 100.
self.w2[1, 2] = 0.
def test_linear_1d_common_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1, weights=self.w1)
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights(self):
model = Polynomial1D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1,
weights=self.w1[np.newaxis, ...])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_1d_separate_weights_axis_1(self):
model = Polynomial1D(1, model_set_axis=1)
fitter = LinearLSQFitter()
model = fitter(model, self.x1, self.y1.T,
weights=self.w1[..., np.newaxis])
assert_allclose(model.c0, 0.5, atol=1e-12)
assert_allclose(model.c1, 2.5, atol=1e-12)
def test_linear_2d_common_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2, weights=self.w2)
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights(self):
model = Polynomial2D(1)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, self.z2,
weights=self.w2[np.newaxis, ...])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
def test_linear_2d_separate_weights_axis_2(self):
model = Polynomial2D(1, model_set_axis=2)
fitter = LinearLSQFitter()
model = fitter(model, self.x2, self.y2, np.rollaxis(self.z2, 0, 3),
weights=self.w2[..., np.newaxis])
assert_allclose(model.c0_0, 1., atol=1e-12)
assert_allclose(model.c1_0, -0.1, atol=1e-12)
assert_allclose(model.c0_1, 0.2, atol=1e-12)
|
1323459bf540a398f03287c26fda2b743fec365763d0e14465f7c096783b3e89 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.modeling import models
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
c2n = models.RotateCelestial2Native(crval[0] * u.deg, crval[1] * u.deg, lonpole * u.deg)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(40 * u.deg, -0.057 * u.rad), (21.5 * u.arcsec, 45.9 * u.deg)])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42 * u.deg, (43 * u.deg).to(u.arcsec), (44 * u.deg).to(u.rad)
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_quantity_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13 * u.deg)
assert_quantity_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13 * u.deg)
def test_Rotation2D():
model = models.Rotation2D(angle=90 * u.deg)
a, b = 1 * u.deg, 0 * u.deg
x, y = model(a, b)
assert_quantity_allclose([x, y], [0 * u.deg, 1 * u.deg], atol=1e-10 * u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494 * u.deg)
x, y = model.inverse(*model(1 * u.deg, 0 * u.deg))
assert_quantity_allclose([x, y], [1 * u.deg, 0 * u.deg], atol=1e-10 * u.deg)
def test_euler_angle_rotations():
ydeg = (90 * u.deg, 0 * u.deg)
y = (90, 0)
z = (0, 90)
# rotate y into minus z
model = models.EulerAngleRotation(0 * u.rad, np.pi / 2 * u.rad, 0 * u.rad, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
model = models.EulerAngleRotation(0 * u.deg, 90 * u.deg, 0 * u.deg, 'zxz')
assert_quantity_allclose(model(*(z * u.deg)), ydeg, atol=10**-12 * u.deg)
@pytest.mark.parametrize(('params'), [(60, 10, 25),
(60 * u.deg, 10 * u.deg, 25 * u.deg),
((60 * u.deg).to(u.rad),
(10 * u.deg).to(u.rad),
(25 * u.deg).to(u.rad))])
def test_euler_rotations_with_units(params):
x = 1 * u.deg
y = 1 * u.deg
phi, theta, psi = params
urot = models.EulerAngleRotation(phi, theta, psi, axes_order='xyz')
a, b = urot(x.value, y.value)
assert_allclose((a, b), (-23.614457631192547, 9.631254579686113))
a, b = urot(x, y)
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
a, b = urot(x.to(u.rad), y.to(u.rad))
assert_quantity_allclose((a, b), (-23.614457631192547 * u.deg, 9.631254579686113 * u.deg))
def test_attributes():
n2c = models.RotateNative2Celestial(20016 * u.arcsec, -72.3 * u.deg, np.pi * u.rad)
assert_allclose(n2c.lat.value, -72.3)
assert_allclose(n2c.lat._raw_value, -1.2618730491919001)
assert_allclose(n2c.lon.value, 20016)
assert_allclose(n2c.lon._raw_value, 0.09704030641088472)
assert_allclose(n2c.lon_pole.value, np.pi)
assert_allclose(n2c.lon_pole._raw_value, np.pi)
assert n2c.lon.unit is u.Unit("arcsec")
assert n2c.lon.internal_unit is u.Unit("rad")
assert n2c.lat.unit is u.Unit("deg")
assert n2c.lat.internal_unit is u.Unit("rad")
assert n2c.lon_pole.unit is u.Unit("rad")
assert n2c.lon_pole.internal_unit is u.Unit("rad")
|
513d74d24805fcd7341b7b23db7a14e4849dfe90a6c4ae5be6d4a50970fd224a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to evaluating models with quantity parameters
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.modeling.core import Model
from astropy.modeling.models import Gaussian1D, Pix2Sky_TAN, Scale, Shift
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
# We start off by taking some simple cases where the units are defined by
# whatever the model is initialized with, and we check that the model evaluation
# returns quantities.
def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError) as exc:
gq(1)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', (dimensionless), could not be "
"converted to required input units of m (length)")
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError) as exc:
gq(3 * u.s)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', s (time), could not be "
"converted to required input units of m (length)")
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(UnitsError) as exc:
g(3 * u.m)
# TODO: determine what error message should be here
# assert exc.value.args[0] == ("Units of input 'x', m (length), could not be "
# "converted to required dimensionless input")
def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError) as exc:
g(30 * u.PHz)
assert exc.value.args[0] == ("Gaussian1D: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"nm (length)")
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(g(30 * u.PHz, equivalencies={'x': u.spectral()}),
g(9.993081933333332 * u.nm))
class MyTestModel(Model):
n_inputs = 2
n_outputs = 1
def evaluate(self, a, b):
print('a', a)
print('b', b)
return a * b
class TestInputUnits():
def setup_method(self, method):
self.model = MyTestModel()
def test_evaluate(self):
# We should be able to evaluate with anything
assert_quantity_allclose(self.model(3, 5), 15)
assert_quantity_allclose(self.model(4 * u.m, 5), 20 * u.m)
assert_quantity_allclose(self.model(3 * u.deg, 5), 15 * u.deg)
def test_input_units(self):
self.model._input_units = {'x': u.deg}
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
assert_quantity_allclose(self.model(4 * u.rad, 2 * u.s), 8 * u.rad * u.s)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', s (time), could not be "
"converted to required input units of deg (angle)")
with pytest.raises(UnitsError) as exc:
self.model(3, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', (dimensionless), could "
"not be converted to required input units of deg (angle)")
def test_input_units_allow_dimensionless(self):
self.model._input_units = {'x': u.deg}
self.model._input_units_allow_dimensionless = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
assert_quantity_allclose(self.model(4 * u.rad, 2), 8 * u.rad)
with pytest.raises(UnitsError) as exc:
self.model(4 * u.s, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', s (time), could not be "
"converted to required input units of deg (angle)")
assert_quantity_allclose(self.model(3, 3), 9)
def test_input_units_strict(self):
self.model._input_units = {'x': u.deg}
self.model._input_units_strict = True
assert_quantity_allclose(self.model(3 * u.deg, 4), 12 * u.deg)
result = self.model(np.pi * u.rad, 2)
assert_quantity_allclose(result, 360 * u.deg)
assert result.unit is u.deg
def test_input_units_equivalencies(self):
self.model._input_units = {'x': u.micron}
with pytest.raises(UnitsError) as exc:
self.model(3 * u.PHz, 3)
assert exc.value.args[0] == ("MyTestModel: Units of input 'x', PHz (frequency), could "
"not be converted to required input units of "
"micron (length)")
self.model.input_units_equivalencies = {'x': u.spectral()}
assert_quantity_allclose(self.model(3 * u.PHz, 3),
3 * (3 * u.PHz).to(u.micron, equivalencies=u.spectral()))
def test_return_units(self):
self.model._input_units = {'z': u.deg}
self.model._return_units = {'z': u.rad}
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_return_units_scalar(self):
# Check that return_units also works when giving a single unit since
# there is only one output, so is unambiguous.
self.model._input_units = {'x': u.deg}
self.model._return_units = u.rad
result = self.model(3 * u.deg, 4)
assert_quantity_allclose(result, 12 * u.deg)
assert result.unit is u.rad
def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec)
def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec)
def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec)
def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError):
cs(10 * u.pix)
def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={'x': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg)
def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a composite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.pix, 10 * u.pix, equivalencies={'x0': u.pixel_scale(0.5 * u.deg / u.pix),
'x1': u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg)
def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
assert cs.input_units_equivalencies == {'x': u.pixel_scale(0.5 * u.deg / u.pix)}
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
assert cs.input_units_equivalencies is None
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
assert cs.input_units_equivalencies == {'x0': u.pixel_scale(0.5 * u.deg / u.pix)}
cs = cs.rename('TestModel')
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError) as exc:
out = cs(20 * u.pix, 10 * u.pix)
assert exc.value.args[0] == "Shift: Units of input 'x', pix (unknown), could not be converted to required input units of deg (angle)"
def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec
def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {'x': u.deg}
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', m (length), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename('TestModel')
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename('TestModel')
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10 * u.m)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', m (length), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError) as exc:
out = cs(10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
s1._input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2._input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename('TestModel')
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError) as exc:
out = cs(10, 10)
assert exc.value.args[0] == ("ScaleDegrees: Units of input 'x', (dimensionless), "
"could not be converted to required input units of deg (angle)")
def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
n_inputs = 2
n_outputs = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def input_units(self):
""" Input units. """
return {'x0': u.deg, 'x1': u.deg}
@property
def return_units(self):
""" Output units. """
return {'x0': u.deg, 'x1': u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0*u.deg, 0*u.deg), (0, 90)*u.deg)
|
7e607aa678a47fee419618e92126f222c8a315949360765e0cb7833a7426ffaa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to fitting models with quantity parameters
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
# Fitting should be as intuitive as possible to the user. Essentially, models
# and fitting should work without units, but if one has units, the other should
# have units too, and the resulting fitted parameters will also have units.
def _fake_gaussian_data():
# Generate fake data
with NumpyRNGContext(12345):
x = np.linspace(-5., 5., 2000)
y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
y += np.random.normal(0., 0.2, x.shape)
# Attach units to data
x = x * u.m
y = y * u.Jy
return x, y
compound_models_no_units = [
models.Linear1D() + models.Gaussian1D() + models.Gaussian1D(),
models.Linear1D() + models.Gaussian1D() | models.Scale(),
models.Linear1D() + models.Gaussian1D() | models.Shift(),
]
class CustomInputNamesModel(Fittable1DModel):
n_inputs = 1
n_outputs = 1
a = Parameter(default=1.0)
b = Parameter(default=1.0)
def __init__(self, a=a, b=b):
super().__init__(a=a, b=b)
self.inputs = ('inn',)
self.outputs = ('out',)
@staticmethod
def evaluate(inn, a, b):
return a * inn + b
@property
def input_units(self):
if self.a.unit is None and self.b.unit is None:
return None
else:
return {'inn': self.b.unit / self.a.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'a': outputs_unit['out'] / inputs_unit['inn'],
'b': outputs_unit['out']
}
def models_with_custom_names():
line = models.Linear1D(1 * u.m / u.s, 2 * u.m)
line.inputs = ('inn',)
line.outputs = ('out',)
custom_names_model = CustomInputNamesModel(1 * u.m / u.s, 2 * u.m)
return [line, custom_names_model]
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_simple():
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D()
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_with_initial_values():
x, y = _fake_gaussian_data()
# Fit the data using a Gaussian with units
g_init = models.Gaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
# TODO: update actual numerical results once implemented, but these should
# be close to the values below.
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_missing_data_units():
"""
Raise an error if the model has units but the data doesn't
"""
class UnorderedGaussian1D(models.Gaussian1D):
# Parameters are ordered differently here from Gaussian1D
# to ensure the order does not break functionality.
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'amplitude': outputs_unit['y'],
'mean': inputs_unit['x'],
'stddev': inputs_unit['x']}
g_init = UnorderedGaussian1D(amplitude=1. * u.mJy,
mean=3 * u.cm,
stddev=2 * u.mm)
fit_g = fitting.LevMarLSQFitter()
# We define flux unit so that conversion fails at wavelength unit.
# This is because the order of parameter unit conversion seems to
# follow the order defined in _parameter_units_for_data_units method.
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3],
[4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz)))
assert exc.value.args[0] == ("'cm' (length) and '' (dimensionless) are "
"not convertible")
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3] * u.m, [4, 5, 6])
assert exc.value.args[0] == ("'mJy' (spectral flux density) and '' "
"(dimensionless) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_missing_model_units():
"""
Proceed if the data has units but the model doesn't
"""
x, y = _fake_gaussian_data()
g_init = models.Gaussian1D(amplitude=1., mean=3, stddev=2)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
g_init = models.Gaussian1D(amplitude=1., mean=3 * u.m, stddev=2 * u.m)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fitting_incompatible_units():
"""
Raise an error if the data and model have incompatible units
"""
g_init = models.Gaussian1D(amplitude=1. * u.Jy,
mean=3 * u.m,
stddev=2 * u.cm)
fit_g = fitting.LevMarLSQFitter()
with pytest.raises(UnitsError) as exc:
fit_g(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy)
assert exc.value.args[0] == ("'Hz' (frequency) and 'm' (length) are not convertible")
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize('model', compound_models_no_units)
def test_compound_without_units(model):
x = np.linspace(-5, 5, 10) * u.Angstrom
with NumpyRNGContext(12345):
y = np.random.sample(10)
fitter = fitting.LevMarLSQFitter()
res_fit = fitter(model, x, y * u.Hz)
for param_name in res_fit.param_names:
print(getattr(res_fit, param_name))
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, u.Quantity)
res_fit = fitter(model, np.arange(10) * u.Unit('Angstrom'), y)
assert all([res_fit[i]._has_units for i in range(3)])
z = res_fit(x)
assert isinstance(z, np.ndarray)
# FIXME: See https://github.com/astropy/astropy/issues/10675
# @pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.skip(reason='Flaky and ill-conditioned')
def test_compound_fitting_with_units():
x = np.linspace(-5, 5, 15) * u.Angstrom
y = np.linspace(-5, 5, 15) * u.Angstrom
fitter = fitting.LevMarLSQFitter()
m = models.Gaussian2D(10*u.Hz,
3*u.Angstrom, 4*u.Angstrom,
1*u.Angstrom, 2*u.Angstrom)
p = models.Planar2D(3*u.Hz/u.Angstrom, 4*u.Hz/u.Angstrom, 1*u.Hz)
model = m + p
z = model(x, y)
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
model = models.Gaussian2D() + models.Planar2D()
res = fitter(model, x, y, z)
assert isinstance(res(x, y), np.ndarray)
assert all([res[i]._has_units for i in range(2)])
# A case of a mixture of models with and without units
model = models.BlackBody(temperature=3000 * u.K) * models.Const1D(amplitude=1.0)
x = np.linspace(1, 3, 10000) * u.micron
with NumpyRNGContext(12345):
n = np.random.normal(3)
y = model(x)
res = fitter(model, x, y * (1 + n))
# The large rtol here is due to different results on linux and macosx, likely
# the model is ill-conditioned.
np.testing.assert_allclose(res.parameters, [3000, 2.1433621e+00, 2.647347e+00], rtol=0.4)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters*')
@pytest.mark.parametrize('model', models_with_custom_names())
def test_fitting_custom_names(model):
""" Tests fitting of models with custom inputs and outsputs names."""
x = np.linspace(0, 10, 100) * u.s
y = model(x)
fitter = fitting.LevMarLSQFitter()
new_model = fitter(model, x, y)
for param_name in model.param_names:
assert_quantity_allclose(getattr(new_model, param_name).quantity,
getattr(model, param_name).quantity)
|
c006063ca90d8d32205373e50256e6cb981fd70a0b76b43ef758cdece10c9817 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests fitting and model evaluation with various inputs
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel, FittableModel, Model
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
model1d_params = [
(models.Polynomial1D, [2]),
(models.Legendre1D, [2]),
(models.Chebyshev1D, [2]),
(models.Shift, [2]),
(models.Scale, [2])
]
model2d_params = [
(models.Polynomial2D, [2]),
(models.Legendre2D, [1, 2]),
(models.Chebyshev2D, [1, 2])
]
class TestInputType:
"""
This class tests that models accept numbers, lists and arrays.
Add new models to one of the lists above to test for this.
"""
def setup_class(self):
self.x = 5.3
self.y = 6.7
self.x1 = np.arange(1, 10, .1)
self.y1 = np.arange(1, 10, .1)
self.y2, self.x2 = np.mgrid[:10, :8]
@pytest.mark.parametrize(('model', 'params'), model1d_params)
def test_input1D(self, model, params):
m = model(*params)
m(self.x)
m(self.x1)
m(self.x2)
@pytest.mark.parametrize(('model', 'params'), model2d_params)
def test_input2D(self, model, params):
m = model(*params)
m(self.x, self.y)
m(self.x1, self.y1)
m(self.x2, self.y2)
class TestFitting:
"""Test various input options to fitting routines."""
def setup_class(self):
self.x1 = np.arange(10)
self.y, self.x = np.mgrid[:10, :10]
def test_linear_fitter_1set(self):
"""1 set 1D x, 1pset"""
expected = np.array([0, 1, 1, 1])
p1 = models.Polynomial1D(3)
p1.parameters = [0, 1, 1, 1]
y1 = p1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.parameters, expected, atol=10 ** (-7))
def test_linear_fitter_Nset(self):
"""1 set 1D x, 2 sets 1D y, 2 param_sets"""
expected = np.array([[0, 0], [1, 1], [2, 2], [3, 3]])
p1 = models.Polynomial1D(3, n_models=2)
p1.parameters = [0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0]
params = {}
for i in range(4):
params[p1.param_names[i]] = [i, i]
p1 = models.Polynomial1D(3, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
def test_linear_fitter_1dcheb(self):
"""1 pset, 1 set 1D x, 1 set 1D y, Chebyshev 1D polynomial"""
expected = np.array(
[[2817.2499999999995,
4226.6249999999991,
1680.7500000000009,
273.37499999999926]]).T
ch1 = models.Chebyshev1D(3)
ch1.parameters = [0, 1, 2, 3]
y1 = ch1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(ch1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-2))
def test_linear_fitter_1dlegend(self):
"""
1 pset, 1 set 1D x, 1 set 1D y, Legendre 1D polynomial
"""
expected = np.array(
[[1925.5000000000011,
3444.7500000000005,
1883.2500000000014,
364.4999999999996]]).T
leg1 = models.Legendre1D(3)
leg1.parameters = [1, 2, 3, 4]
y1 = leg1(self.x1)
pfit = fitting.LinearLSQFitter()
model = pfit(leg1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-12))
def test_linear_fitter_1set2d(self):
p2 = models.Polynomial2D(2)
p2.parameters = [0, 1, 2, 3, 4, 5]
expected = [0, 1, 2, 3, 4, 5]
z = p2(self.x, self.y)
pfit = fitting.LinearLSQFitter()
model = pfit(p2, self.x, self.y, z)
assert_allclose(model.parameters, expected, atol=10 ** (-12))
assert_allclose(model(self.x, self.y), z, atol=10 ** (-12))
def test_wrong_numpset(self):
"""
A ValueError is raised if a 1 data set (1d x, 1d y) is fit
with a model with multiple parameter sets.
"""
with pytest.raises(ValueError):
p1 = models.Polynomial1D(5)
y1 = p1(self.x1)
p1 = models.Polynomial1D(5, n_models=2)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
def test_wrong_pset(self):
"""A case of 1 set of x and multiple sets of y and parameters."""
expected = np.array([[1., 0],
[1, 1],
[1, 2],
[1, 3],
[1, 4],
[1, 5]])
p1 = models.Polynomial1D(5, n_models=2)
params = {}
for i in range(6):
params[p1.param_names[i]] = [1, i]
p1 = models.Polynomial1D(5, model_set_axis=0, **params)
y1 = p1(self.x1, model_set_axis=False)
pfit = fitting.LinearLSQFitter()
model = pfit(p1, self.x1, y1)
assert_allclose(model.param_sets, expected, atol=10 ** (-7))
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_1d(self):
"""1 set 1D x, 1 set 1D y, 1 pset NonLinearFitter"""
g1 = models.Gaussian1D(10, mean=3, stddev=.2)
y1 = g1(self.x1)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
assert_allclose(model.parameters, [10, 3, .2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_1d(self):
"""1 set 1D x, 1 set 1D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g1 = models.Gaussian1D([10.2, 10], mean=[3, 3.2], stddev=[.23, .2],
n_models=2)
y1 = g1(self.x1, model_set_axis=False)
gfit = fitting.LevMarLSQFitter()
model = gfit(g1, self.x1, y1)
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_1set_2d(self):
"""1 set 2d x, 1set 2D y, 1 pset, NonLinearFitter"""
g2 = models.Gaussian2D(10, x_mean=3, y_mean=4, x_stddev=.3,
y_stddev=.2, theta=0)
z = g2(self.x, self.y)
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
assert_allclose(model.parameters, [10, 3, 4, .3, .2, 0])
@pytest.mark.skipif('not HAS_SCIPY')
def test_nonlinear_lsqt_Nset_2d(self):
"""1 set 2d x, 1set 2D y, 2 param_sets, NonLinearFitter"""
with pytest.raises(ValueError):
g2 = models.Gaussian2D([10, 10], [3, 3], [4, 4], x_stddev=[.3, .3],
y_stddev=[.2, .2], theta=[0, 0], n_models=2)
z = g2(self.x.flatten(), self.y.flatten())
gfit = fitting.LevMarLSQFitter()
model = gfit(g2, self.x, self.y, z)
class TestEvaluation:
"""
Test various input options to model evaluation
TestFitting actually covers evaluation of polynomials
"""
def setup_class(self):
self.x1 = np.arange(20)
self.y, self.x = np.mgrid[:10, :10]
def test_non_linear_NYset(self):
"""
This case covers:
N param sets , 1 set 1D x --> N 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
y1 = g1(self.x1, model_set_axis=False)
assert np.all((y1[0, :] - y1[1, :]).nonzero() == np.array([]))
def test_non_linear_NXYset(self):
"""
This case covers: N param sets , N sets 1D x --> N N sets 1D y data
"""
g1 = models.Gaussian1D([10, 10], [3, 3], [.2, .2], n_models=2)
xx = np.array([self.x1, self.x1])
y1 = g1(xx)
assert_allclose(y1[:, 0], y1[:, 1], atol=10 ** (-12))
def test_p1_1set_1pset(self):
"""1 data set, 1 pset, Polynomial1D"""
p1 = models.Polynomial1D(4)
y1 = p1(self.x1)
assert y1.shape == (20,)
def test_p1_nset_npset(self):
"""N data sets, N param_sets, Polynomial1D"""
p1 = models.Polynomial1D(4, n_models=2)
y1 = p1(np.array([self.x1, self.x1]).T, model_set_axis=-1)
assert y1.shape == (20, 2)
assert_allclose(y1[0, :], y1[1, :], atol=10 ** (-12))
def test_p2_1set_1pset(self):
"""1 pset, 1 2D data set, Polynomial2D"""
p2 = models.Polynomial2D(5)
z = p2(self.x, self.y)
assert z.shape == (10, 10)
def test_p2_nset_npset(self):
"""N param_sets, N 2D data sets, Poly2d"""
p2 = models.Polynomial2D(5, n_models=2)
xx = np.array([self.x, self.x])
yy = np.array([self.y, self.y])
z = p2(xx, yy)
assert z.shape == (2, 10, 10)
def test_nset_domain(self):
"""
Test model set with negative model_set_axis.
In this case model_set_axis=-1 is identical to model_set_axis=1.
"""
xx = np.array([self.x1, self.x1]).T
xx[0, 0] = 100
xx[1, 0] = 100
xx[2, 0] = 99
p1 = models.Polynomial1D(5, c0=[1, 2], c1=[3, 4], n_models=2)
yy = p1(xx, model_set_axis=-1)
assert_allclose(xx.shape, yy.shape)
yy1 = p1(xx, model_set_axis=1)
assert_allclose(yy, yy1)
#x1 = xx[:, 0]
#x2 = xx[:, 1]
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x1), yy[0, :], atol=10 ** (-12))
#p1 = models.Polynomial1D(5)
#assert_allclose(p1(x2), yy[1, :], atol=10 ** (-12))
def test_evaluate_gauss2d(self):
cov = np.array([[1., 0.8], [0.8, 3]])
g = models.Gaussian2D(1., 5., 4., cov_matrix=cov)
y, x = np.mgrid[:10, :10]
g(x, y)
class TModel_1_1(Fittable1DModel):
p1 = Parameter()
p2 = Parameter()
@staticmethod
def evaluate(x, p1, p2):
return x + p1 + p2
class TestSingleInputSingleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=1.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_1(1, 10)
y = t(100)
assert isinstance(y, float)
assert np.ndim(y) == 0
assert y == 111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y = t(100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([[100], [200]])
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]])
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3])
y1 = t([10, 20, 30])
assert np.shape(y1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
y2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
class TestSingleInputSingleOutputTwoModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=2.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
With n_models=2 all outputs should have a first dimension of size 2 (unless
defined with model_set_axis != 0).
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a 1-D array with
size equal to the number of models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y = t(100)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_scalar_parameters_1d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
with pytest.raises(ValueError):
y = t(np.arange(5) * 100)
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([100, 200], model_set_axis=False)
# In this case the value [100, 200, 300] should be evaluated on each
# model rather than evaluating the first model with 100 and the second
# model with 200
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 211], [122, 222]])
y3 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y3) == (2, 3)
assert np.all(y3 == [[111, 211, 311], [122, 222, 322]])
def test_scalar_parameters_2d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
y1 = t(np.arange(6).reshape(2, 3) * 100)
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[11, 111, 211],
[322, 422, 522]])
y2 = t(np.arange(6).reshape(2, 3) * 100, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3)
assert np.all(y2 == [[[11, 111, 211], [311, 411, 511]],
[[22, 122, 222], [322, 422, 522]]])
def test_scalar_parameters_3d_array_input(self):
"""
The dimension of the input should match the number of models unless
model_set_axis=False is given, in which case the input is copied across
all models.
"""
t = TModel_1_1([1, 2], [10, 20], n_models=2)
data = np.arange(12).reshape(2, 3, 2) * 100
y1 = t(data)
assert np.shape(y1) == (2, 3, 2)
assert np.all(y1 == [[[11, 111], [211, 311], [411, 511]],
[[622, 722], [822, 922], [1022, 1122]]])
y2 = t(data, model_set_axis=False)
assert np.shape(y2) == (2, 2, 3, 2)
assert np.all(y2 == np.array([data + 11, data + 22]))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
y = t(100)
assert np.shape(y) == (2, 3)
assert np.all(y == [[111, 122, 133], [144, 155, 166]])
def test_1d_array_parameters_1d_array_input(self):
"""
When the input is an array, if model_set_axis=False then it must
broadcast with the shapes of the parameters (excluding the
model_set_axis).
Otherwise all dimensions must be broadcastable.
"""
t = TModel_1_1([[1, 2, 3], [4, 5, 6]],
[[10, 20, 30], [40, 50, 60]], n_models=2)
with pytest.raises(ValueError):
y1 = t([100, 200, 300])
y1 = t([100, 200])
assert np.shape(y1) == (2, 3)
assert np.all(y1 == [[111, 122, 133], [244, 255, 266]])
with pytest.raises(ValueError):
# Doesn't broadcast with the shape of the parameters, (3,)
y2 = t([100, 200], model_set_axis=False)
y2 = t([100, 200, 300], model_set_axis=False)
assert np.shape(y2) == (2, 3)
assert np.all(y2 == [[111, 222, 333],
[144, 255, 366]])
def test_2d_array_parameters_2d_array_input(self):
t = TModel_1_1([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
n_models=2)
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2, 2)
assert np.all(y1 == [[[111, 222], [133, 244]],
[[355, 466], [377, 488]]])
with pytest.raises(ValueError):
y2 = t([[100, 200, 300], [400, 500, 600]])
y2 = t([[[100, 200], [300, 400]], [[500, 600], [700, 800]]])
assert np.shape(y2) == (2, 2, 2)
assert np.all(y2 == [[[111, 222], [333, 444]],
[[555, 666], [777, 888]]])
def test_mixed_array_parameters_1d_array_input(self):
t = TModel_1_1([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
with pytest.raises(ValueError):
y = t([10, 20, 30])
y = t([10, 20, 30], model_set_axis=False)
assert np.shape(y) == (2, 2, 3)
assert_allclose(y, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[14.07, 25.08, 36.09], [14.10, 25.11, 36.12]]])
class TModel_1_2(FittableModel):
n_inputs =1
n_outputs = 2
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
@staticmethod
def evaluate(x, p1, p2, p3):
return (x + p1 + p2, x + p1 + p2 + p3)
class TestSingleInputDoubleOutputSingleModel:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = 1 but n_output = 2 on a toy model with n_models=1.
As of writing there are not enough controls to adjust how outputs from such
a model should be formatted (currently the shapes of outputs are assumed to
be directly associated with the shapes of corresponding inputs when
n_inputs == n_outputs). For now, the approach taken for cases like this is
to assume all outputs should have the same format.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(100)
assert isinstance(y, float)
assert isinstance(z, float)
assert np.ndim(y) == np.ndim(z) == 0
assert y == 111
assert z == 1111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
assert np.all(z == (y + 1000))
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3)
assert np.all(y == [[11, 111, 211],
[311, 411, 511]])
assert np.all(z == (y + 1000))
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_2(1, 10, 1000)
y, z = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2, 3, 2)
assert np.all(y == [[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]]])
assert np.all(z == (y + 1000))
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y, z = t(100)
assert isinstance(y, np.ndarray)
assert isinstance(z, np.ndarray)
assert np.shape(y) == np.shape(z) == (2,)
assert np.all(y == [111, 122])
assert np.all(z == [1111, 2122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([1, 2], [10, 20], [1000, 2000])
y1, z1 = t([100, 200])
assert np.shape(y1) == np.shape(z1) == (2,)
assert np.all(y1 == [111, 222])
assert np.all(z1 == [1111, 2222])
y2, z2 = t([[100], [200]])
assert np.shape(y2) == np.shape(z2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
assert np.all(z2 == [[1111, 2122], [1211, 2222]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[1, 2], [3, 4]], [[10, 20], [30, 40]],
[[1000, 2000], [3000, 4000]])
y1, z1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == np.shape(z1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
assert np.all(z1 == [[1111, 2222], [3333, 4444]])
y2, z2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == np.shape(z2) == (2, 2, 2, 2)
assert np.all(y2 == [[[[111, 122], [133, 144]],
[[211, 222], [233, 244]]],
[[[311, 322], [333, 344]],
[[411, 422], [433, 444]]]])
assert np.all(z2 == [[[[1111, 2122], [3133, 4144]],
[[1211, 2222], [3233, 4244]]],
[[[1311, 2322], [3333, 4344]],
[[1411, 2422], [3433, 4444]]]])
with pytest.raises(ValueError):
# Doesn't broadcast
y3, z3 = t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_2([[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]]],
[1, 2, 3], [100, 200, 300])
y1, z1 = t([10, 20, 30])
assert np.shape(y1) == np.shape(z1) == (2, 2, 3)
assert_allclose(y1, [[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]]])
assert_allclose(z1, [[[111.01, 222.02, 333.03],
[111.04, 222.05, 333.06]],
[[111.07, 222.08, 333.09],
[111.10, 222.11, 333.12]]])
y2, z2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == np.shape(z2) == (3, 2, 2, 3)
assert_allclose(y2, [[[[11.01, 12.02, 13.03],
[11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09],
[11.10, 12.11, 13.12]]],
[[[21.01, 22.02, 23.03],
[21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09],
[21.10, 22.11, 23.12]]],
[[[31.01, 32.02, 33.03],
[31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09],
[31.10, 32.11, 33.12]]]])
assert_allclose(z2, [[[[111.01, 212.02, 313.03],
[111.04, 212.05, 313.06]],
[[111.07, 212.08, 313.09],
[111.10, 212.11, 313.12]]],
[[[121.01, 222.02, 323.03],
[121.04, 222.05, 323.06]],
[[121.07, 222.08, 323.09],
[121.10, 222.11, 323.12]]],
[[[131.01, 232.02, 333.03],
[131.04, 232.05, 333.06]],
[[131.07, 232.08, 333.09],
[131.10, 232.11, 333.12]]]])
# test broadcasting rules
broadcast_models = [
{
'model': models.Identity(2),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1]]
},
{
'model': models.Identity(2),
'inputs': [[1, 1], 0],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((0, 1)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1]]
},
{
'model': models.Mapping((1, 0)),
'inputs': [0, [1, 1]],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((1, 0), n_inputs=3),
'inputs': [0, [1, 1], 2],
'outputs': [[1, 1], 0]
},
{
'model': models.Mapping((0, 1, 0)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1], 0]
},
{
'model': models.Mapping((0, 1, 1)),
'inputs': [0, [1, 1]],
'outputs': [0, [1, 1], [1, 1]]
},
{
'model': models.Polynomial2D(1, c0_0=1),
'inputs': [0, [1, 1]],
'outputs': [1, 1]
},
{
'model': models.Polynomial2D(1, c0_0=1),
'inputs': [0, 1],
'outputs': 1
},
{
'model': models.Gaussian2D(1, 1, 2, 1, 1.2),
'inputs': [0, [1, 1]],
'outputs': [0.42860385, 0.42860385]
},
{
'model': models.Gaussian2D(1, 1, 2, 1, 1.2),
'inputs': [0, 1],
'outputs': 0.428603846153
},
{
'model': models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2),
'inputs': [1, 1, 1, 1],
'outputs': (1, 2)
},
{
'model': models.Polynomial2D(1, c0_0=1) & models.Polynomial2D(1, c0_0=2),
'inputs': [1, 1, [1, 1], [1, 1]],
'outputs': (1, [2, 2])
},
{
'model': models.math.MultiplyUfunc(),
'inputs': [np.array([np.linspace(0, 1, 5)]).T, np.arange(2)],
'outputs': np.array([[0., 0.],
[0., 0.25],
[0., 0.5],
[0., 0.75],
[0., 1.]])
}
]
@pytest.mark.parametrize('model', broadcast_models)
def test_mixed_input(model):
result = model['model'](*model['inputs'])
if np.isscalar(result):
assert_allclose(result, model['outputs'])
else:
for i in range(len(result)):
assert_allclose(result[i], model['outputs'][i])
def test_more_outputs():
class M(FittableModel):
standard_broadcasting = False
n_inputs = 2
n_outputs = 3
a = Parameter()
def evaluate(self, x, y, a):
return a*x, a-x, a+y
def __call__(self, *args, **kwargs):
inputs, _ = super().prepare_inputs(*args, **kwargs)
outputs = self.evaluate(*inputs, *self.parameters)
output_shapes = [out.shape for out in outputs]
output_shapes = [() if shape == (1,) else shape for shape in output_shapes]
return self.prepare_outputs((tuple(output_shapes),), *outputs, **kwargs)
c = M(1)
result = c([1, 1], 1)
expected = [[1., 1.], [0., 0.], 2.]
for r, e in zip(result, expected):
assert_allclose(r, e)
c = M(1)
result = c(1, [1, 1])
expected = [1., 0., [2., 2.]]
for r, e in zip(result, expected):
assert_allclose(r, e)
class TInputFormatter(Model):
"""
A toy model to test input/output formatting.
"""
n_inputs = 2
n_outputs = 2
outputs = ('x', 'y')
@staticmethod
def evaluate(x, y):
return x, y
def test_format_input_scalars():
model = TInputFormatter()
result = model(1, 2)
assert result == (1, 2)
def test_format_input_arrays():
model = TInputFormatter()
result = model([1, 1], [2, 2])
assert_allclose(result, (np.array([1, 1]), np.array([2, 2])))
def test_format_input_arrays_transposed():
model = TInputFormatter()
input = np.array([[1, 1]]).T, np.array([[2, 2]]).T
result = model(*input)
assert_allclose(result, input)
@pytest.mark.parametrize('model',
[models.Gaussian2D(), models.Polynomial2D(1,),
models.Pix2Sky_TAN(), models.Tabular2D(lookup_table=np.ones((4,5)))])
@pytest.mark.skipif('not HAS_SCIPY')
def test_call_keyword_args_2(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x=1, y=2))
assert_allclose(positional, model(1, y=2))
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model(1)
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Gaussian1D(), models.Polynomial1D(1,),
models.Tabular1D(lookup_table=np.ones((5,))),
models.Rotation2D(), models.Pix2Sky_TAN()])
@pytest.mark.skipif('not HAS_SCIPY')
def test_call_keyword_args_1(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1)
assert_allclose(positional, model(x=1))
model.inputs = ('r',)
assert_allclose(positional, model(r=1))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model()
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Gaussian2D() | models.Polynomial1D(1,),
models.Gaussian1D() & models.Polynomial1D(1,),
models.Gaussian2D() + models.Polynomial2D(1,),
models.Gaussian2D() - models.Polynomial2D(1,),
models.Gaussian2D() * models.Polynomial2D(1,),
models.Identity(2) | models.Polynomial2D(1),
models.Mapping((1,)) | models.Polynomial1D(1)])
def test_call_keyword_args_1(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t = 2))
assert_allclose(positional, model(1, t=2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model()
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
@pytest.mark.parametrize('model',
[models.Identity(2), models.Mapping((0, 1)),
models.Mapping((1,))])
def test_call_keyword_mappings(model):
"""
Test calling a model with positional, keywrd and a mixture of both arguments.
"""
positional = model(1, 2)
assert_allclose(positional, model(x0=1, x1=2))
assert_allclose(positional, model(1, x1=2))
# We take a copy before modifying the model since otherwise this changes
# the instance used in the parametrize call and affects future test runs.
model = model.copy()
model.inputs = ('r', 't')
assert_allclose(positional, model(r=1, t=2))
assert_allclose(positional, model(1, t=2))
assert_allclose(positional, model(1, 2))
with pytest.raises(ValueError):
model(1, 2, 3)
with pytest.raises(ValueError):
model(1)
with pytest.raises(ValueError):
model(1, 2, t=12, r=3)
|
69c6966ab4942ec38b1060723cc83df3c21c349c89453396d87a4aa3e7ff6519 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests that relate to using quantities/units on parameters of models.
"""
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import coordinates as coord
from astropy import units as u
from astropy.modeling.core import Fittable1DModel, InputParameterError
from astropy.modeling.models import Gaussian1D, Pix2Sky_TAN, RotateNative2Celestial, Rotation2D
from astropy.modeling.parameters import Parameter, ParameterDefinitionError
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import UnitsError
class BaseTestModel(Fittable1DModel):
@staticmethod
def evaluate(x, a):
return x
def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m
def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy)
def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
with pytest.raises(UnitsError) as exc:
g.amplitude = 2
assert exc.value.args[0] == ("The 'amplitude' parameter should be given as "
"a Quantity because it was originally "
"initialized as a Quantity")
def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy)
def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
with pytest.raises(ValueError) as exc:
g.amplitude.unit = u.Jy
assert exc.value.args[0] == ("Cannot attach units to parameters that were "
"not initially specified with units")
# But changing to another unit should not, even if it is an equivalent unit
with pytest.raises(ValueError) as exc:
g.mean.unit = u.cm
assert exc.value.args[0] == ("Cannot change the unit attribute directly, "
"instead change the parameter to a new quantity")
def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
with pytest.raises(TypeError) as exc:
g.amplitude.value = 3 * u.Jy
assert exc.value.args[0] == \
("The .value property on parameters should be set"
" to unitless values, not Quantity objects. To set"
"a parameter to a quantity simply set the "
"parameter directly without using .value")
def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
with pytest.raises(TypeError) as exc:
g.amplitude.quantity = 3
assert exc.value.args[0] == "The .quantity attribute should be set to a Quantity object"
def test_parameter_default_units_match():
# If the unit and default quantity units are different, raise an error
with pytest.raises(ParameterDefinitionError) as exc:
class TestC(Fittable1DModel):
a = Parameter(default=1.0 * u.m, unit=u.Jy)
assert exc.value.args[0] == ("parameter default 1.0 m does not have units "
"equivalent to the required unit Jy")
@pytest.mark.parametrize(('unit', 'default'), ((u.m, 1.0), (None, 1 * u.m)))
def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
with pytest.raises(InputParameterError) as exc:
TestModel(1.0)
assert exc.value.args[0] == ("TestModel.__init__() requires a "
"Quantity for parameter 'a'")
def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m ** 2))
assert (2 * u.m) * g.mean == (2 * (u.m ** 2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
with pytest.raises(UnitsError) as exc:
g.mean + 1
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
1 + g.mean
assert exc.value.args[0] == ("Can only apply 'add' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
with pytest.raises(UnitsError) as exc:
g.mean < 2
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
2 > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError) as exc:
g.mean < [3, 4]
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
with pytest.raises(UnitsError) as exc:
[3, 4] > g.mean
assert exc.value.args[0] == ("Can only apply 'less' function to "
"dimensionless quantities when other argument "
"is not a quantity (unless the latter is all "
"zero/infinity/nan)")
def test_parameters_compound_models():
tan = Pix2Sky_TAN()
sky_coords = coord.SkyCoord(ra=5.6, dec=-72, unit=u.deg)
lon_pole = 180 * u.deg
n2c = RotateNative2Celestial(sky_coords.ra, sky_coords.dec, lon_pole)
rot = Rotation2D(23)
m = rot | n2c
|
38f0ed8c874bb218b09c9a1dd5e29ff4e9e6d1c277ccdb4a27d17a610e29a931 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for polynomial models."""
# pylint: disable=invalid-name
import os
import unittest.mock as mk
import warnings
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy import conf, wcs
from astropy.io import fits
from astropy.modeling import fitting
from astropy.modeling.functional_models import Linear1D
from astropy.modeling.mappings import Identity
from astropy.modeling.polynomial import (SIP, Chebyshev1D, Chebyshev2D, Hermite1D, Hermite2D,
Legendre1D, Legendre2D, OrthoPolynomialBase, Polynomial1D,
Polynomial2D, PolynomialBase)
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
linear1d = {
Chebyshev1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Hermite1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Legendre1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Polynomial1D: {
'args': (3,),
'kwargs': {'domain': [1, 10]},
'parameters': {'c0': 1.2, 'c1': 2, 'c2': 2.3, 'c3': 0.2},
'constraints': {'fixed': {'c0': True}}
},
Linear1D: {
'args': (),
'kwargs': {},
'parameters': {'intercept': 1.2, 'slope': 23.1},
'constraints': {'fixed': {'intercept': True}}
}
}
linear2d = {
Chebyshev2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Hermite2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Legendre2D: {
'args': (1, 1),
'kwargs': {'x_domain': [0, 99], 'y_domain': [0, 82]},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3, 'c1_1': 0.2},
'constraints': {'fixed': {'c0_0': True}}
},
Polynomial2D: {
'args': (1,),
'kwargs': {},
'parameters': {'c0_0': 1.2, 'c1_0': 2, 'c0_1': 2.3},
'constraints': {'fixed': {'c0_0': True}}
}
}
@pytest.mark.skipif('not HAS_SCIPY')
class TestFitting:
"""Test linear fitter with polynomial models."""
def setup_class(self):
self.N = 100
self.M = 100
self.x1 = np.linspace(1, 10, 100)
self.y2, self.x2 = np.mgrid[:100, :83]
rsn = np.random.default_rng(0)
self.n1 = rsn.standard_normal(self.x1.size) * .1
self.n2 = rsn.standard_normal(self.x2.size)
self.n2.shape = self.x2.shape
self.linear_fitter = fitting.LinearLSQFitter()
self.non_linear_fitter = fitting.LevMarLSQFitter()
# TODO: Most of these test cases have some pretty repetitive setup that we
# could probably factor out
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
# For the constraints tests we're not checking the overall fit,
# just that the constraint was maintained
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear1d, key=str), (False, True))))
def test_non_linear_fitter_1D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear1d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
y1 = model(self.x1)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = self.non_linear_fitter(model, self.x1, y1 + self.n1)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with LinearLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=r'The fit may be poorly conditioned',
category=AstropyUserWarning)
model_lin = self.linear_fitter(model, self.x2, self.y2, z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_lin, param).value == expected
else:
assert_allclose(model_lin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize(('model_class', 'constraints'),
list(product(sorted(linear2d, key=str), (False, True))))
def test_non_linear_fitter_2D(self, model_class, constraints):
"""Test fitting with non-linear LevMarLSQFitter"""
model_args = linear2d[model_class]
kwargs = {}
kwargs.update(model_args['kwargs'])
kwargs.update(model_args['parameters'])
if constraints:
kwargs.update(model_args['constraints'])
model = model_class(*model_args['args'], **kwargs)
z = model(self.x2, self.y2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
model_nlin = self.non_linear_fitter(model, self.x2, self.y2,
z + self.n2)
if constraints:
fixed = model_args['constraints'].get('fixed', None)
if fixed:
for param, value in fixed.items():
expected = model_args['parameters'][param]
assert getattr(model_nlin, param).value == expected
else:
assert_allclose(model_nlin.parameters, model.parameters,
atol=0.2)
@pytest.mark.parametrize('model_class',
[cls for cls in list(linear1d) + list(linear2d)])
def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if '1D' in model_class.__name__:
param = 'c0'
else:
param = 'c0_0'
if issubclass(model_class, Linear1D):
param = 'intercept'
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
if issubclass(model_class, OrthoPolynomialBase):
assert repr(m) ==\
f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>"
assert str(m) ==\
f"Model: {model_class.__name__}\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"X_Degree: 2\n" +\
"Y_Degree: 2\n" +\
"Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n" +\
" ---- ---- ---- ---- ---- ---- ---- ---- ----\n" +\
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
else:
if model_class.__name__ == 'Polynomial2D':
assert repr(m) ==\
"<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., c0_2=0., c1_1=0.)>"
assert str(m) ==\
"Model: Polynomial2D\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"Degree: 2\n" +\
"Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n" +\
" ---- ---- ---- ---- ---- ----\n" +\
" 0.0 0.0 0.0 0.0 0.0 0.0"
elif model_class.__name__ == 'Linear1D':
assert repr(m) ==\
"<Linear1D(slope=2., intercept=0.)>"
assert str(m) ==\
"Model: Linear1D\n" +\
"Inputs: ('x',)\n" +\
"Outputs: ('y',)\n" +\
"Model set size: 1\n" +\
"Parameters:\n" +\
" slope intercept\n" +\
" ----- ---------\n" +\
" 2.0 0.0"
else:
assert repr(m) ==\
f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>"
assert str(m) ==\
f"Model: {model_class.__name__}\n" +\
"Inputs: ('x',)\n" +\
"Outputs: ('y',)\n" +\
"Model set size: 1\n" +\
"Degree: 2\n" +\
"Parameters:\n" +\
" c0 c1 c2\n" +\
" --- --- ---\n" +\
" 0.0 0.0 0.0"
def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'hst_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
# Test changing of inputs and calling it with keyword argumenrts.
sip.inputs = ("r", "t")
assert_allclose(sip(r=1, t=1), astwcs_result)
assert_allclose(sip(1, t=1), astwcs_result)
# Test representations
assert repr(sip) ==\
"<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, " +\
"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, " +\
"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., " +\
"A_2_1=-0., A_2_2=0., A_3_1=0.)>, " +\
"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, " +\
"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., " +\
"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>"
with conf.set_temp('max_width', 80):
assert str(sip) ==\
"Model: SIP\n" +\
" Model: Shift\n" +\
" Inputs: ('x',)\n" +\
" Outputs: ('y',)\n" +\
" Model set size: 1\n" +\
" Parameters:\n" +\
" offset\n" +\
" -------\n" +\
" -2048.0\n" +\
"\n" +\
" Model: Shift\n" +\
" Inputs: ('x',)\n" +\
" Outputs: ('y',)\n" +\
" Model set size: 1\n" +\
" Parameters:\n" +\
" offset\n" +\
" -------\n" +\
" -1024.0\n" +\
"\n" +\
" Model: _SIP1D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Order: 4\n" +\
" Coeff. Prefix: A\n" +\
" Parameters:\n" +\
" A_2_0 A_3_0 ... A_3_1 \n" +\
" --------------------- ---------------------- ... ---------------------\n" +\
" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n" +\
"\n" +\
" Model: _SIP1D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Order: 4\n" +\
" Coeff. Prefix: B\n" +\
" Parameters:\n" +\
" B_2_0 B_3_0 ... B_3_1 \n" +\
" ---------------------- --------------------- ... ----------------------\n" +\
" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n"
# Test get num of coeffs
assert sip.sip1d_a.get_num_coeff(1) == 6
# Test error
message = "Degree of polynomial must be 2< deg < 9"
sip.sip1d_a.order = 1
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
sip.sip1d_a.order = 10
with pytest.raises(ValueError) as err:
sip.sip1d_a.get_num_coeff(1)
assert str(err.value) == message
def test_sip_irac():
"""Test forward and inverse SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join('data', 'irac_sip.hdr'))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr['CRPIX1']
crpix2 = hdr['CRPIX2']
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr['A_*'])
b_pars = dict(**hdr['B_*'])
ap_pars = dict(**hdr['AP_*'])
bp_pars = dict(**hdr['BP_*'])
a_order = a_pars.pop('A_ORDER')
b_order = b_pars.pop('B_ORDER')
ap_order = ap_pars.pop('AP_ORDER')
bp_order = bp_pars.pop('BP_ORDER')
del a_pars['A_DMAX']
del b_pars['B_DMAX']
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars,
ap_order=ap_order, ap_coeff=ap_pars, bp_order=bp_order,
bp_coeff=bp_pars)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) +
foc[0] - rel_pix, newpix - pix)
# Test inverse representations
assert repr(sip.inverse) ==\
"<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, " +\
"c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, " +\
"<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, " +\
"c0_2=-0.00002601, c1_1=0.00002944)>])>"
assert str(sip.inverse) ==\
"Model: InverseSIP\n" +\
" Model: Polynomial2D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Degree: 2\n" +\
" Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" +\
" ---- -------- --------- ---------- ---------- ----------\n" +\
" 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n" +\
"\n" +\
" Model: Polynomial2D\n" +\
" Inputs: ('x', 'y')\n" +\
" Outputs: ('z',)\n" +\
" Model set size: 1\n" +\
" Degree: 2\n" +\
" Parameters:\n" +\
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n" +\
" ---- ---------- --------- --------- ---------- ---------\n" +\
" 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n"
def test_sip_no_coeff():
sip = SIP([10, 12], 2, 2)
assert_allclose(sip.sip1d_a.parameters, [0., 0., 0])
assert_allclose(sip.sip1d_b.parameters, [0., 0., 0])
with pytest.raises(NotImplementedError):
sip.inverse
# Test model set
sip = SIP([10, 12], 2, 2, n_models=2)
assert sip.sip1d_a.model_set_axis == 0
assert sip.sip1d_b.model_set_axis == 0
@pytest.mark.parametrize('cls', (Polynomial1D, Chebyshev1D, Legendre1D,
Polynomial2D, Chebyshev2D, Legendre2D))
def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
message = "Degree of polynomial must be positive or null"
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
# different shaped x and y inputs
a = np.array([1, 2, 3])
b = np.array([1, 2])
with mk.patch.object(PolynomialBase, 'prepare_inputs', autospec=True,
return_value=((a, b), mk.MagicMock())):
with pytest.raises(ValueError) as err:
p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"Expected input arrays to have the same shape"
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(x_degree=-1, y_degree=0)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
cls(x_degree=0, y_degree=-1)
assert str(err.value) == message
else:
p2 = cls(degree=0, c0_0=1)
# Error from negative degree
with pytest.raises(ValueError) as err:
cls(degree=-1)
assert str(err.value) == message
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10)
@pytest.mark.skipif('not HAS_SCIPY')
def test_2d_orthopolynomial_in_compound_model():
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
y, x = np.mgrid[0:5, 0:5]
z = x + y
fitter = fitting.LevMarLSQFitter()
simple_model = Chebyshev2D(2, 2)
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
simple_fit = fitter(simple_model, x, y, z)
fitter = fitting.LevMarLSQFitter() # re-init to compare like with like
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_model.fittable = True
compound_model.linear = True
with pytest.warns(AstropyUserWarning,
match='Model is linear in parameters'):
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-15)
def test_Hermite1D_clenshaw():
model = Hermite1D(degree=2)
assert model.clenshaw(1, [3]) == 3
assert model.clenshaw(1, [3, 4]) == 11
assert model.clenshaw(1, [3, 4, 5]) == 21
assert model.clenshaw(1, [3, 4, 5, 6]) == -3
def test__fcache():
model = OrthoPolynomialBase(x_degree=2, y_degree=2)
with pytest.raises(NotImplementedError) as err:
model._fcache(np.asanyarray(1), np.asanyarray(1))
assert str(err.value) == "Subclasses should implement this"
model = Hermite2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: 2,
3: np.asanyarray(1),
4: 2,
2: 2.0,
5: -4.0
}
model = Legendre2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
model = Chebyshev2D(x_degree=2, y_degree=2)
assert model._fcache(np.asanyarray(1), np.asanyarray(1)) ==\
{
0: np.asanyarray(1),
1: np.asanyarray(1),
2: 1.0,
3: np.asanyarray(1),
4: np.asanyarray(1),
5: 1.0
}
def test_fit_deriv_shape_error():
model = Hermite2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) ==\
"x and y must have the same shape"
model = Chebyshev2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) ==\
"x and y must have the same shape"
model = Legendre2D(x_degree=2, y_degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) ==\
"x and y must have the same shape"
model = Polynomial2D(degree=2)
with pytest.raises(ValueError) as err:
model.fit_deriv(np.array([1, 2]), np.array([3, 4, 5]))
assert str(err.value) ==\
"Expected x and y to be of equal size"
|
978feec48f41401b4c743b5d45deedeb9d5972f0eb8ddcf66563bcdf772adf2d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import unittest.mock as mk
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates import SpectralCoord
from astropy.modeling.bounding_box import (CompoundBoundingBox, ModelBoundingBox, _BaseInterval,
_BaseSelectorArgument, _BoundingDomain,
_ignored_interval, _Interval, _SelectorArgument,
_SelectorArguments)
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import Gaussian1D, Gaussian2D, Identity, Scale, Shift
class Test_Interval:
def test_create(self):
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
assert isinstance(interval, _BaseInterval)
assert interval.lower == lower
assert interval.upper == upper
assert interval == (lower, upper)
assert interval.__repr__() == \
f"Interval(lower={lower}, upper={upper})"
def test_copy(self):
interval = _Interval(0.5, 1.5)
copy = interval.copy()
assert interval == copy
assert id(interval) != id(copy)
# Same float values have will have same id
assert interval.lower == copy.lower
assert id(interval.lower) == id(copy.lower)
# Same float values have will have same id
assert interval.upper == copy.upper
assert id(interval.upper) == id(copy.upper)
def test__validate_shape(self):
message = "An interval must be some sort of sequence of length 2"
lower = mk.MagicMock()
upper = mk.MagicMock()
interval = _Interval(lower, upper)
# Passes (2,)
interval._validate_shape((1, 2))
interval._validate_shape([1, 2])
interval._validate_shape((1*u.m, 2*u.m))
interval._validate_shape([1*u.m, 2*u.m])
# Passes (1, 2)
interval._validate_shape(((1, 2),))
interval._validate_shape(([1, 2],))
interval._validate_shape([(1, 2)])
interval._validate_shape([[1, 2]])
interval._validate_shape(((1*u.m, 2*u.m),))
interval._validate_shape(([1*u.m, 2*u.m],))
interval._validate_shape([(1*u.m, 2*u.m)])
interval._validate_shape([[1*u.m, 2*u.m]])
# Passes (2, 0)
interval._validate_shape((mk.MagicMock(), mk.MagicMock()))
interval._validate_shape([mk.MagicMock(), mk.MagicMock()])
# Passes with array inputs:
interval._validate_shape((np.array([-2.5, -3.5]), np.array([2.5, 3.5])))
interval._validate_shape((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
# Fails shape (no units)
with pytest.raises(ValueError) as err:
interval._validate_shape((1, 2, 3))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1, 2, 3], [4, 5, 6]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1)
assert str(err.value) == message
# Fails shape (units)
message = "An interval must be some sort of sequence of length 2"
with pytest.raises(ValueError) as err:
interval._validate_shape((1*u.m, 2*u.m, 3*u.m))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([1*u.m, 2*u.m, 3*u.m])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape([[1*u.m, 2*u.m, 3*u.m], [4*u.m, 5*u.m, 6*u.m]])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape(1*u.m)
assert str(err.value) == message
# Fails shape (arrays):
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]),
np.array([2.5, 3.5]),
np.array([3, 4])))
assert str(err.value) == message
with pytest.raises(ValueError) as err:
interval._validate_shape((np.array([-2.5, -3.5]), [2.5, 3.5]))
assert str(err.value) == message
def test__validate_bounds(self):
# Passes
assert _Interval._validate_bounds(1, 2) == (1, 2)
assert _Interval._validate_bounds(1*u.m, 2*u.m) == (1*u.m, 2*u.m)
interval = _Interval._validate_bounds(np.array([-2.5, -3.5]), np.array([2.5, 3.5]))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
# Fails
with pytest.warns(RuntimeWarning,
match="Invalid interval: upper bound 1 is strictly "
r"less than lower bound 2\."):
_Interval._validate_bounds(2, 1)
with pytest.warns(RuntimeWarning,
match=r"Invalid interval: upper bound 1\.0 m is strictly "
r"less than lower bound 2\.0 m\."):
_Interval._validate_bounds(2*u.m, 1*u.m)
def test_validate(self):
# Passes
assert _Interval.validate((1, 2)) == (1, 2)
assert _Interval.validate([1, 2]) == (1, 2)
assert _Interval.validate((1*u.m, 2*u.m)) == (1*u.m, 2*u.m)
assert _Interval.validate([1*u.m, 2*u.m]) == (1*u.m, 2*u.m)
assert _Interval.validate(((1, 2),)) == (1, 2)
assert _Interval.validate(([1, 2],)) == (1, 2)
assert _Interval.validate([(1, 2)]) == (1, 2)
assert _Interval.validate([[1, 2]]) == (1, 2)
assert _Interval.validate(((1*u.m, 2*u.m),)) == (1*u.m, 2*u.m)
assert _Interval.validate(([1*u.m, 2*u.m],)) == (1*u.m, 2*u.m)
assert _Interval.validate([(1*u.m, 2*u.m)]) == (1*u.m, 2*u.m)
assert _Interval.validate([[1*u.m, 2*u.m]]) == (1*u.m, 2*u.m)
interval = _Interval.validate((np.array([-2.5, -3.5]),
np.array([2.5, 3.5])))
assert (interval.lower == np.array([-2.5, -3.5])).all()
assert (interval.upper == np.array([2.5, 3.5])).all()
interval = _Interval.validate((np.array([-2.5, -3.5, -4.5]),
np.array([2.5, 3.5, 4.5])))
assert (interval.lower == np.array([-2.5, -3.5, -4.5])).all()
assert (interval.upper == np.array([2.5, 3.5, 4.5])).all()
# Fail shape
with pytest.raises(ValueError):
_Interval.validate((1, 2, 3))
# Fail bounds
with pytest.warns(RuntimeWarning):
_Interval.validate((2, 1))
def test_outside(self):
interval = _Interval.validate((0, 1))
assert (interval.outside(np.linspace(-1, 2, 13)) ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
def test_domain(self):
interval = _Interval.validate((0, 1))
assert (interval.domain(0.25) == np.linspace(0, 1, 5)).all()
def test__ignored_interval(self):
assert _ignored_interval.lower == -np.inf
assert _ignored_interval.upper == np.inf
for num in [0, -1, -100, 3.14, 10**100, -10**100]:
assert not num < _ignored_interval[0]
assert num > _ignored_interval[0]
assert not num > _ignored_interval[1]
assert num < _ignored_interval[1]
assert not (_ignored_interval.outside(np.array([num]))).all()
def test_validate_with_SpectralCoord(self):
"""Regression test for issue #12439"""
lower = SpectralCoord(1, u.um)
upper = SpectralCoord(10, u.um)
interval = _Interval.validate((lower, upper))
assert interval.lower == lower
assert interval.upper == upper
class Test_BoundingDomain:
def setup(self):
class BoundingDomain(_BoundingDomain):
def fix_inputs(self, model, fix_inputs):
super().fix_inputs(model, fixed_inputs=fix_inputs)
def prepare_inputs(self, input_shape, inputs):
super().prepare_inputs(input_shape, inputs)
self.BoundingDomain = BoundingDomain
def test_create(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
bounding_box = self.BoundingDomain(model, order='F')
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
bounding_box = self.BoundingDomain(Gaussian2D(), ['x'])
assert bounding_box._ignored == [0]
assert bounding_box._order == 'C'
# Error
with pytest.raises(ValueError):
self.BoundingDomain(model, order=mk.MagicMock())
def test_model(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
assert bounding_box._model == model
assert bounding_box.model == model
def test_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock(), order='C')
assert bounding_box._order == 'C'
assert bounding_box.order == 'C'
bounding_box = self.BoundingDomain(mk.MagicMock(), order='F')
assert bounding_box._order == 'F'
assert bounding_box.order == 'F'
bounding_box._order = 'test'
assert bounding_box.order == 'test'
def test_ignored(self):
ignored = [0]
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model, ignored=ignored)
assert bounding_box._ignored == ignored
assert bounding_box.ignored == ignored
def test__get_order(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Success (default 'C')
assert bounding_box._order == 'C'
assert bounding_box._get_order() == 'C'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Success (default 'F')
bounding_box._order = 'F'
assert bounding_box._order == 'F'
assert bounding_box._get_order() == 'F'
assert bounding_box._get_order('C') == 'C'
assert bounding_box._get_order('F') == 'F'
# Error
order = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_order(order)
assert str(err.value) ==\
"order must be either 'C' (C/python order) or " +\
f"'F' (Fortran/mathematical order), got: {order}."
def test__get_index(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass input name
assert bounding_box._get_index('x') == 0
assert bounding_box._get_index('y') == 1
# Pass invalid input name
with pytest.raises(ValueError) as err:
bounding_box._get_index('z')
assert str(err.value) ==\
"'z' is not one of the inputs: ('x', 'y')."
# Pass valid index
assert bounding_box._get_index(0) == 0
assert bounding_box._get_index(1) == 1
assert bounding_box._get_index(np.int32(0)) == 0
assert bounding_box._get_index(np.int32(1)) == 1
assert bounding_box._get_index(np.int64(0)) == 0
assert bounding_box._get_index(np.int64(1)) == 1
# Pass invalid index
MESSAGE = "Integer key: 2 must be non-negative and < 2."
with pytest.raises(IndexError) as err:
bounding_box._get_index(2)
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int32(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(np.int64(2))
assert str(err.value) == MESSAGE
with pytest.raises(IndexError) as err:
bounding_box._get_index(-1)
assert str(err.value) ==\
"Integer key: -1 must be non-negative and < 2."
# Pass invalid key
value = mk.MagicMock()
with pytest.raises(ValueError) as err:
bounding_box._get_index(value)
assert str(err.value) ==\
f"Key value: {value} must be string or integer."
def test__get_name(self):
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = self.BoundingDomain(model)
index = mk.MagicMock()
name = mk.MagicMock()
model.inputs = mk.MagicMock()
model.inputs.__getitem__.return_value = name
assert bounding_box._get_name(index) == name
assert model.inputs.__getitem__.call_args_list == [mk.call(index)]
def test_ignored_inputs(self):
model = mk.MagicMock()
ignored = list(range(4, 8))
model.n_inputs = 8
model.inputs = [mk.MagicMock() for _ in range(8)]
bounding_box = self.BoundingDomain(model, ignored=ignored)
inputs = bounding_box.ignored_inputs
assert isinstance(inputs, list)
for index, _input in enumerate(inputs):
assert _input in model.inputs
assert model.inputs[index + 4] == _input
for index, _input in enumerate(model.inputs):
if _input in inputs:
assert inputs[index - 4] == _input
else:
assert index < 4
def test__validate_ignored(self):
bounding_box = self.BoundingDomain(Gaussian2D())
# Pass
assert bounding_box._validate_ignored(None) == []
assert bounding_box._validate_ignored(['x', 'y']) == [0, 1]
assert bounding_box._validate_ignored([0, 1]) == [0, 1]
assert bounding_box._validate_ignored([np.int32(0), np.int64(1)]) == [0, 1]
# Fail
with pytest.raises(ValueError):
bounding_box._validate_ignored([mk.MagicMock()])
with pytest.raises(ValueError):
bounding_box._validate_ignored(['z'])
with pytest.raises(IndexError):
bounding_box._validate_ignored([3])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int32(3)])
with pytest.raises(IndexError):
bounding_box._validate_ignored([np.int64(3)])
def test___call__(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
with pytest.raises(RuntimeError) as err:
bounding_box(*args, **kwargs)
assert str(err.value) ==\
"This bounding box is fixed by the model and does not have " +\
"adjustable parameters."
def test_fix_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
model = mk.MagicMock()
fixed_inputs = mk.MagicMock()
with pytest.raises(NotImplementedError) as err:
bounding_box.fix_inputs(model, fixed_inputs)
assert str(err.value) ==\
"This should be implemented by a child class."
def test__prepare_inputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
bounding_box.prepare_inputs(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This has not been implemented for BoundingDomain."
def test__base_ouput(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Simple shape
input_shape = (13,)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
# Complex shape
input_shape = (13, 7)
output = bounding_box._base_output(input_shape, 0)
assert (output == 0).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, np.nan)
assert (np.isnan(output)).all()
assert output.shape == input_shape
output = bounding_box._base_output(input_shape, 14)
assert (output == 14).all()
assert output.shape == input_shape
def test__all_out_output(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
# Simple shape
model.n_outputs = 1
input_shape = (13,)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (1, 13)
assert output_unit is None
# Complex shape
model.n_outputs = 6
input_shape = (13, 7)
output, output_unit = bounding_box._all_out_output(input_shape, 0)
assert (np.array(output) == 0).all()
assert np.array(output).shape == (6, 13, 7)
assert output_unit is None
def test__modify_output(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
# Simple shape
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.asanyarray(0)) as mkBase:
assert (np.array([1, 2, 3]) ==
bounding_box._modify_output([1, 2, 3], valid_index, input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
# Replacement
with mk.patch.object(_BoundingDomain, '_base_output', autospec=True,
return_value=np.array([1, 2, 3, 4, 5, 6])) as mkBase:
assert (np.array([7, 2, 8, 4, 9, 6]) ==
bounding_box._modify_output([7, 8, 9], np.array([[0, 2, 4]]), input_shape, fill_value)).all()
assert mkBase.call_args_list == [mk.call(input_shape, fill_value)]
def test__prepare_outputs(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
valid_outputs = [mk.MagicMock() for _ in range(3)]
effects = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_BoundingDomain, '_modify_output', autospec=True,
side_effect=effects) as mkModify:
assert effects == bounding_box._prepare_outputs(valid_outputs, valid_index,
input_shape, fill_value)
assert mkModify.call_args_list == \
[mk.call(bounding_box, valid_outputs[idx], valid_index, input_shape, fill_value)
for idx in range(3)]
def test_prepare_outputs(self):
model = mk.MagicMock()
bounding_box = self.BoundingDomain(model)
valid_outputs = mk.MagicMock()
valid_index = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_prepare_outputs', autospec=True) as mkPrepare:
# Reshape valid_outputs
model.n_outputs = 1
assert mkPrepare.return_value == \
bounding_box.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, [valid_outputs], valid_index, input_shape, fill_value)]
mkPrepare.reset_mock()
# No reshape valid_outputs
model.n_outputs = 2
assert mkPrepare.return_value == \
bounding_box.prepare_outputs(valid_outputs, valid_index, input_shape, fill_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, valid_outputs, valid_index, input_shape, fill_value)]
def test__get_valid_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# Don't get unit
assert bounding_box._get_valid_outputs_unit(mk.MagicMock(), False) is None
# Get unit from unitless
assert bounding_box._get_valid_outputs_unit(7, True) is None
# Get unit
assert bounding_box._get_valid_outputs_unit(25 * u.m, True) == u.m
def test__evaluate_model(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
valid_inputs = mk.MagicMock()
input_shape = mk.MagicMock()
valid_index = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
with mk.patch.object(_BoundingDomain, '_get_valid_outputs_unit',
autospec=True) as mkGet:
with mk.patch.object(_BoundingDomain, 'prepare_outputs',
autospec=True) as mkPrepare:
assert bounding_box._evaluate_model(evaluate, valid_inputs,
valid_index, input_shape,
fill_value, with_units) == \
(mkPrepare.return_value, mkGet.return_value)
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, evaluate.return_value, valid_index,
input_shape, fill_value)]
assert mkGet.call_args_list == \
[mk.call(evaluate.return_value, with_units)]
assert evaluate.call_args_list == \
[mk.call(valid_inputs)]
def test__evaluate(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
input_shape = mk.MagicMock()
fill_value = mk.MagicMock()
with_units = mk.MagicMock()
valid_inputs = mk.MagicMock()
valid_index = mk.MagicMock()
effects = [(valid_inputs, valid_index, True), (valid_inputs, valid_index, False)]
with mk.patch.object(self.BoundingDomain, 'prepare_inputs', autospec=True,
side_effect=effects) as mkPrepare:
with mk.patch.object(_BoundingDomain, '_all_out_output',
autospec=True) as mkAll:
with mk.patch.object(_BoundingDomain, '_evaluate_model',
autospec=True) as mkEvaluate:
# all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == \
mkAll.return_value
assert mkAll.call_args_list == \
[mk.call(bounding_box, input_shape, fill_value)]
assert mkEvaluate.call_args_list == []
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, input_shape, inputs)]
mkAll.reset_mock()
mkPrepare.reset_mock()
# not all_out
assert bounding_box._evaluate(evaluate, inputs, input_shape,
fill_value, with_units) == \
mkEvaluate.return_value
assert mkAll.call_args_list == []
assert mkEvaluate.call_args_list == \
[mk.call(bounding_box, evaluate, valid_inputs, valid_index,
input_shape, fill_value, with_units)]
assert mkPrepare.call_args_list == \
[mk.call(bounding_box, input_shape, inputs)]
def test__set_outputs_unit(self):
bounding_box = self.BoundingDomain(mk.MagicMock())
# set no unit
assert 27 == bounding_box._set_outputs_unit(27, None)
# set unit
assert 27 * u.m == bounding_box._set_outputs_unit(27, u.m)
def test_evaluate(self):
bounding_box = self.BoundingDomain(Gaussian2D())
evaluate = mk.MagicMock()
inputs = mk.MagicMock()
fill_value = mk.MagicMock()
outputs = mk.MagicMock()
valid_outputs_unit = mk.MagicMock()
value = (outputs, valid_outputs_unit)
with mk.patch.object(_BoundingDomain, '_evaluate',
autospec=True, return_value=value) as mkEvaluate:
with mk.patch.object(_BoundingDomain, '_set_outputs_unit',
autospec=True) as mkSet:
with mk.patch.object(Model, 'input_shape', autospec=True) as mkShape:
with mk.patch.object(Model, 'bbox_with_units',
new_callable=mk.PropertyMock) as mkUnits:
assert tuple(mkSet.return_value) == \
bounding_box.evaluate(evaluate, inputs, fill_value)
assert mkSet.call_args_list == \
[mk.call(outputs, valid_outputs_unit)]
assert mkEvaluate.call_args_list == \
[mk.call(bounding_box, evaluate, inputs, mkShape.return_value,
fill_value, mkUnits.return_value)]
assert mkShape.call_args_list == \
[mk.call(bounding_box._model, inputs)]
assert mkUnits.call_args_list == [mk.call()]
class TestModelBoundingBox:
def test_create(self):
intervals = ()
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'C'
# Set optional
intervals = {}
model = mk.MagicMock()
bounding_box = ModelBoundingBox(intervals, model, order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {}
assert bounding_box._model == model
assert bounding_box._ignored == []
assert bounding_box._order == 'F'
# Set interval
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
# Set ignored
intervals = (1, 2)
model = mk.MagicMock()
model.n_inputs = 2
model.inputs = ['x', 'y']
bounding_box = ModelBoundingBox(intervals, model, ignored=[1])
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2)}
assert bounding_box._model == model
assert bounding_box._ignored == [1]
intervals = ((1, 2), (3, 4))
model = mk.MagicMock()
model.n_inputs = 3
model.inputs = ['x', 'y', 'z']
bounding_box = ModelBoundingBox(intervals, model, ignored=[2], order='F')
assert isinstance(bounding_box, _BoundingDomain)
assert bounding_box._intervals == {0: (1, 2), 1: (3, 4)}
assert bounding_box._model == model
assert bounding_box._ignored == [2]
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4.5, 4.5), (-1.4, 1.4)))
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
assert bounding_box.ignored == copy.ignored
assert id(bounding_box.ignored) != id(copy.ignored)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
# Check interval objects
for index, interval in bounding_box.intervals.items():
assert interval == copy.intervals[index]
assert id(interval) != id(copy.intervals[index])
# Same float values have will have same id
assert interval.lower == copy.intervals[index].lower
assert id(interval.lower) == id(copy.intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.intervals[index].upper
assert id(interval.upper) == id(copy.intervals[index].upper)
assert len(bounding_box.intervals) == len(copy.intervals)
assert bounding_box.intervals.keys() == copy.intervals.keys()
def test_intervals(self):
intervals = {0: _Interval(1, 2)}
model = mk.MagicMock()
model.n_inputs = 1
model.inputs = ['x']
bounding_box = ModelBoundingBox(intervals, model)
assert bounding_box._intervals == intervals
assert bounding_box.intervals == intervals
def test_named_intervals(self):
intervals = {idx: _Interval(idx, idx + 1) for idx in range(4)}
model = mk.MagicMock()
model.n_inputs = 4
model.inputs = [mk.MagicMock() for _ in range(4)]
bounding_box = ModelBoundingBox(intervals, model)
named = bounding_box.named_intervals
assert isinstance(named, dict)
for name, interval in named.items():
assert name in model.inputs
assert intervals[model.inputs.index(name)] == interval
for index, name in enumerate(model.inputs):
assert index in intervals
assert name in named
assert intervals[index] == named[name]
def test___repr__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.__repr__() ==\
"ModelBoundingBox(\n" +\
" intervals={\n" +\
" x: Interval(lower=-1, upper=1)\n" +\
" y: Interval(lower=-4, upper=4)\n" +\
" }\n" +\
" model=Gaussian2D(inputs=('x', 'y'))\n" +\
" order='C'\n" +\
")"
intervals = {0: _Interval(-1, 1)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box.__repr__() ==\
"ModelBoundingBox(\n" +\
" intervals={\n" +\
" x: Interval(lower=-1, upper=1)\n" +\
" }\n" +\
" ignored=['y']\n" +\
" model=Gaussian2D(inputs=('x', 'y'))\n" +\
" order='C'\n" +\
")"
def test___len__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert len(bounding_box) == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert len(bounding_box) == 0 == len(bounding_box._intervals)
def test___contains__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
# General not in
assert mk.MagicMock() not in bounding_box
# Contains with ignored
del bounding_box['y']
# Contains with keys
assert 'x' in bounding_box
assert 'y' in bounding_box
assert 'z' not in bounding_box
# Contains with index
assert 0 in bounding_box
assert 1 in bounding_box
assert 2 not in bounding_box
def test___getitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Get using input key
assert bounding_box['x'] == (-1, 1)
assert bounding_box['y'] == (-4, 4)
# Fail with input key
with pytest.raises(ValueError):
bounding_box['z']
# Get using index
assert bounding_box[0] == (-1, 1)
assert bounding_box[1] == (-4, 4)
assert bounding_box[np.int32(0)] == (-1, 1)
assert bounding_box[np.int32(1)] == (-4, 4)
assert bounding_box[np.int64(0)] == (-1, 1)
assert bounding_box[np.int64(1)] == (-4, 4)
# Fail with index
with pytest.raises(IndexError):
bounding_box[2]
with pytest.raises(IndexError):
bounding_box[np.int32(2)]
with pytest.raises(IndexError):
bounding_box[np.int64(2)]
# get ignored interval
del bounding_box[0]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == (-4, 4)
del bounding_box[1]
assert bounding_box[0] == _ignored_interval
assert bounding_box[1] == _ignored_interval
def test_bounding_box(self):
# 0D
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x'])
assert bounding_box.bounding_box() == (-np.inf, np.inf)
assert bounding_box.bounding_box('C') == (-np.inf, np.inf)
assert bounding_box.bounding_box('F') == (-np.inf, np.inf)
# 1D
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == (-1, 1)
assert bounding_box.bounding_box(mk.MagicMock()) == (-1, 1)
# > 1D
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.bounding_box() == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('C') == ((-4, 4), (-1, 1))
assert bounding_box.bounding_box('F') == ((-1, 1), (-4, 4))
def test___eq__(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == bounding_box
assert bounding_box == ModelBoundingBox.validate(model.copy(), intervals.copy())
assert bounding_box == (-1, 1)
assert not (bounding_box == mk.MagicMock())
assert not (bounding_box == (-2, 2))
assert not (bounding_box == ModelBoundingBox.validate(model, {0: _Interval(-2, 2)}))
# Respect ordering
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box_1 = ModelBoundingBox.validate(model, intervals)
bounding_box_2 = ModelBoundingBox.validate(model, intervals, order='F')
assert bounding_box_1._order == 'C'
assert bounding_box_1 == ((-4, 4), (-1, 1))
assert not (bounding_box_1 == ((-1, 1), (-4, 4)))
assert bounding_box_2._order == 'F'
assert not (bounding_box_2 == ((-4, 4), (-1, 1)))
assert bounding_box_2 == ((-1, 1), (-4, 4))
assert bounding_box_1 == bounding_box_2
# Respect ignored
model = Gaussian2D()
bounding_box_1._ignored = [mk.MagicMock()]
bounding_box_2._ignored = [mk.MagicMock()]
assert bounding_box_1._ignored != bounding_box_2._ignored
assert not (bounding_box_1 == bounding_box_2)
def test__setitem__(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, {}, ignored=[0, 1])
assert bounding_box._ignored == [0, 1]
# USING Intervals directly
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = _Interval(-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = _Interval(-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
del bounding_box[0]
del bounding_box[1]
# USING tuples
# Set interval using key
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box['x'] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box['x'], _Interval)
assert bounding_box['x'] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box['y'] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box['y'], _Interval)
assert bounding_box['y'] == (-4, 4)
del bounding_box['x']
del bounding_box['y']
# Set interval using index
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
bounding_box[0] = (-1, 1)
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert isinstance(bounding_box[0], _Interval)
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
bounding_box[1] = (-4, 4)
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert isinstance(bounding_box[1], _Interval)
assert bounding_box[1] == (-4, 4)
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# USING Intervals directly
# Set interval using key
assert 'x' not in bounding_box
bounding_box['x'] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = _Interval(np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
# USING tuples
# Set interval using key
bounding_box._intervals = {}
assert 'x' not in bounding_box
bounding_box['x'] = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' in bounding_box
assert isinstance(bounding_box['x'], _Interval)
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# Set interval using index
bounding_box._intervals = {}
assert 0 not in bounding_box
bounding_box[0] = (np.array([-1, -2]), np.array([1, 2]))
assert 0 in bounding_box
assert isinstance(bounding_box[0], _Interval)
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test___delitem__(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Using index
assert 0 in bounding_box.intervals
assert 0 not in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
del bounding_box[0]
assert 0 not in bounding_box.intervals
assert 0 in bounding_box.ignored
assert 0 in bounding_box
assert 'x' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box[0]
assert str(err.value) ==\
"Cannot delete ignored input: 0!"
# Using key
assert 1 in bounding_box.intervals
assert 1 not in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
del bounding_box['y']
assert 1 not in bounding_box.intervals
assert 1 in bounding_box.ignored
assert 0 in bounding_box
assert 'y' in bounding_box
# Delete an ignored item
with pytest.raises(RuntimeError) as err:
del bounding_box['y']
assert str(err.value) ==\
"Cannot delete ignored input: y!"
def test__validate_dict(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Input name keys
intervals = {'x': _Interval(-1, 1), 'y': _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Input index
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
# name keys
intervals = {'x': _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 'x' not in bounding_box
bounding_box._validate_dict(intervals)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
# input index
bounding_box._intervals = {}
intervals = {0: _Interval(np.array([-1, -2]), np.array([1, 2]))}
assert 0 not in bounding_box
bounding_box._validate_dict(intervals)
assert 0 in bounding_box
assert (bounding_box[0].lower == np.array([-1, -2])).all()
assert (bounding_box[0].upper == np.array([1, 2])).all()
def test__validate_sequence(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# C order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='C')
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Fortran order
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Invalid order
bounding_box._intervals = {}
order = mk.MagicMock()
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError):
bounding_box._validate_sequence(((-4, 4), (-1, 1)), order=order)
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__n_inputs(self):
model = Gaussian2D()
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box._n_inputs == 2
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'])
assert bounding_box._n_inputs == 1
bounding_box = ModelBoundingBox.validate(model, {}, ignored=['x', 'y'])
assert bounding_box._n_inputs == 0
bounding_box._ignored = ['x', 'y', 'z']
assert bounding_box._n_inputs == 0
def test__validate_iterable(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate_iterable(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 0 not in bounding_box
assert 1 not in bounding_box
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass with ignored
bounding_box._intervals = {}
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1)}
assert 0 not in bounding_box.intervals
bounding_box._validate_iterable(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
# Invalid iterable
bounding_box._intervals = {}
bounding_box._ignored = []
assert 'x' not in bounding_box
assert 'y' not in bounding_box
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(((-4, 4), (-1, 1), (-3, 3)))
assert str(err.value) ==\
"Found 3 intervals, but must have exactly 2."
assert len(bounding_box.intervals) == 0
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._ignored = [1]
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) ==\
"Found 2 intervals, but must have exactly 1."
assert len(bounding_box.intervals) == 0
bounding_box._ignored = []
intervals = {0: _Interval(-1, 1)}
with pytest.raises(ValueError) as err:
bounding_box._validate_iterable(intervals)
assert str(err.value) ==\
"Found 1 intervals, but must have exactly 2."
assert 'x' not in bounding_box
assert 'y' not in bounding_box
assert len(bounding_box.intervals) == 0
def test__validate(self):
model = Gaussian2D()
bounding_box = ModelBoundingBox({}, model)
# Pass sequence Default order
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box._intervals = {}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(((-4, 4), (-1, 1)), order='F')
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
bounding_box._intervals = {}
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
assert 'x' not in bounding_box
assert 'y' not in bounding_box
bounding_box._validate(intervals)
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass single with ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox({}, model, ignored=[1])
assert 0 not in bounding_box.intervals
assert 1 not in bounding_box.intervals
bounding_box._validate(intervals)
assert 0 in bounding_box.intervals
assert bounding_box[0] == (-1, 1)
assert 1 not in bounding_box.intervals
assert len(bounding_box.intervals) == 1
# Pass single
model = Gaussian1D()
bounding_box = ModelBoundingBox({}, model)
assert 'x' not in bounding_box
bounding_box._validate((-1, 1))
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
bounding_box = ModelBoundingBox({}, model)
sequence = (np.array([-1, -2]), np.array([1, 2]))
assert 'x' not in bounding_box
bounding_box._validate(sequence)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_validate(self):
model = Gaussian2D()
kwargs = {'test': mk.MagicMock()}
# Pass sequence Default order
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == (-4, 4)
assert len(bounding_box.intervals) == 2
# Pass sequence
bounding_box = ModelBoundingBox.validate(model, ((-4, 4), (-1, 1)), order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-4, 4)
assert 'y' in bounding_box
assert bounding_box['y'] == (-1, 1)
assert len(bounding_box.intervals) == 2
# Pass Dict
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
bounding_box = ModelBoundingBox.validate(model, intervals, order='F', **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass ModelBoundingBox
bbox = bounding_box
bounding_box = ModelBoundingBox.validate(model, bbox, **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 0 in bounding_box
assert bounding_box[0] == (-1, 1)
assert 1 in bounding_box
assert bounding_box[1] == (-4, 4)
assert len(bounding_box.intervals) == 2
assert bounding_box.order == 'F'
# Pass single ignored
intervals = {0: _Interval(-1, 1)}
bounding_box = ModelBoundingBox.validate(model, intervals, ignored=['y'], **kwargs)
assert (bounding_box._model.parameters == model.parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert 'y' in bounding_box
assert bounding_box['y'] == _ignored_interval
assert len(bounding_box.intervals) == 1
# Pass single
bounding_box = ModelBoundingBox.validate(Gaussian1D(), (-1, 1), **kwargs)
assert (bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in bounding_box
assert bounding_box['x'] == (-1, 1)
assert len(bounding_box.intervals) == 1
# Model set support
model = Gaussian1D([0.1, 0.2], [0, 0], [5, 7], n_models=2)
sequence = (np.array([-1, -2]), np.array([1, 2]))
bounding_box = ModelBoundingBox.validate(model, sequence, **kwargs)
assert 'x' in bounding_box
assert (bounding_box['x'].lower == np.array([-1, -2])).all()
assert (bounding_box['x'].upper == np.array([1, 2])).all()
def test_fix_inputs(self):
bounding_box = ModelBoundingBox.validate(Gaussian2D(), ((-4, 4), (-1, 1)))
# keep_ignored = False (default)
new_bounding_box = bounding_box.fix_inputs(Gaussian1D(), {1: mk.MagicMock()})
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian1D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' not in new_bounding_box
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == []
# keep_ignored = True
new_bounding_box = bounding_box.fix_inputs(Gaussian2D(), {1: mk.MagicMock()},
_keep_ignored=True)
assert not (bounding_box == new_bounding_box)
assert (new_bounding_box._model.parameters == Gaussian2D().parameters).all()
assert 'x' in new_bounding_box
assert new_bounding_box['x'] == (-1, 1)
assert 'y' in new_bounding_box
assert 'y' in new_bounding_box.ignored_inputs
assert len(new_bounding_box.intervals) == 1
assert new_bounding_box.ignored == [1]
def test_dimension(self):
intervals = {0: _Interval(-1, 1)}
model = Gaussian1D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 1 == len(bounding_box._intervals)
intervals = {0: _Interval(-1, 1), 1: _Interval(-4, 4)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
assert bounding_box.dimension == 2 == len(bounding_box._intervals)
bounding_box._intervals = {}
assert bounding_box.dimension == 0 == len(bounding_box._intervals)
def test_domain(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# test defaults
assert (np.array(bounding_box.domain(0.25)) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test C order
assert (np.array(bounding_box.domain(0.25, 'C')) ==
np.array([np.linspace(0, 2, 9), np.linspace(-1, 1, 9)])).all()
# test Fortran order
assert (np.array(bounding_box.domain(0.25, 'F')) ==
np.array([np.linspace(-1, 1, 9), np.linspace(0, 2, 9)])).all()
# test error order
order = mk.MagicMock()
with pytest.raises(ValueError):
bounding_box.domain(0.25, order)
def test__outside(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False for _ in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index ==
[True, True, True, True,
False, False, False, False, False,
True, True, True, True]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True for _ in range(13)]).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [False]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
outside_index, all_out = bounding_box._outside(input_shape, inputs)
assert (outside_index == [True]).all()
assert all_out and isinstance(all_out, bool)
def test__valid_index(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
valid_index, all_out = bounding_box._valid_index(input_shape, inputs)
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
def test_prepare_inputs(self):
intervals = {0: _Interval(-1, 1), 1: _Interval(0, 2)}
model = Gaussian2D()
bounding_box = ModelBoundingBox.validate(model, intervals)
# Normal array input, all inside
x = np.linspace(-1, 1, 13)
y = np.linspace(0, 2, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array(inputs)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [idx for idx in range(13)]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, some inside and some outside
x = np.linspace(-2, 1, 13)
y = np.linspace(0, 3, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) ==
np.array(
[
[x[4], x[5], x[6], x[7], x[8]],
[y[4], y[5], y[6], y[7], y[8]],
]
)).all()
assert len(valid_index) == 1
assert (valid_index[0] == [4, 5, 6, 7, 8]).all()
assert not all_out and isinstance(all_out, bool)
# Normal array input, all outside
x = np.linspace(2, 3, 13)
y = np.linspace(-2, -1, 13)
input_shape = x.shape
inputs = (x, y)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
# Scalar input inside bounding_box
inputs = (0.5, 0.5)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert (np.array(new_inputs) == np.array([[0.5], [0.5]])).all()
assert len(valid_index) == 1
assert (valid_index[0] == [0]).all()
assert not all_out and isinstance(all_out, bool)
# Scalar input outside bounding_box
inputs = (2, -1)
input_shape = (1,)
new_inputs, valid_index, all_out = bounding_box.prepare_inputs(input_shape, inputs)
assert new_inputs == ()
assert len(valid_index) == 1
assert (valid_index[0] == []).all()
assert all_out and isinstance(all_out, bool)
class Test_SelectorArgument:
def test_create(self):
index = mk.MagicMock()
ignore = mk.MagicMock()
argument = _SelectorArgument(index, ignore)
assert isinstance(argument, _BaseSelectorArgument)
assert argument.index == index
assert argument.ignore == ignore
assert argument == (index, ignore)
def test_validate(self):
model = Gaussian2D()
# default integer
assert _SelectorArgument.validate(model, 0) == (0, True)
assert _SelectorArgument.validate(model, 1) == (1, True)
# default string
assert _SelectorArgument.validate(model, 'x') == (0, True)
assert _SelectorArgument.validate(model, 'y') == (1, True)
ignore = mk.MagicMock()
# non-default integer
assert _SelectorArgument.validate(model, 0, ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 1, ignore) == (1, ignore)
# non-default string
assert _SelectorArgument.validate(model, 'x', ignore) == (0, ignore)
assert _SelectorArgument.validate(model, 'y', ignore) == (1, ignore)
# Fail
with pytest.raises(ValueError):
_SelectorArgument.validate(model, 'z')
with pytest.raises(ValueError):
_SelectorArgument.validate(model, mk.MagicMock())
with pytest.raises(IndexError):
_SelectorArgument.validate(model, 2)
def test_get_selector(self):
# single inputs
inputs = [idx + 17 for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# numpy array of single inputs
inputs = [np.array([idx + 11]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
inputs = [np.asanyarray(idx + 13) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == inputs[index]
# multi entry numpy array
inputs = [np.array([idx + 27, idx - 31]) for idx in range(3)]
for index in range(3):
assert _SelectorArgument(index, mk.MagicMock()).get_selector(*inputs) == tuple(inputs[index])
def test_name(self):
model = Gaussian2D()
for index in range(model.n_inputs):
assert _SelectorArgument(index, mk.MagicMock()).name(model) == model.inputs[index]
def test_pretty_repr(self):
model = Gaussian2D()
assert _SelectorArgument(0, False).pretty_repr(model) ==\
"Argument(name='x', ignore=False)"
assert _SelectorArgument(0, True).pretty_repr(model) ==\
"Argument(name='x', ignore=True)"
assert _SelectorArgument(1, False).pretty_repr(model) ==\
"Argument(name='y', ignore=False)"
assert _SelectorArgument(1, True).pretty_repr(model) ==\
"Argument(name='y', ignore=True)"
def test_get_fixed_value(self):
model = Gaussian2D()
values = {0: 5, 'y': 7}
# Get index value
assert _SelectorArgument(0, mk.MagicMock()).get_fixed_value(model, values) == 5
# Get name value
assert _SelectorArgument(1, mk.MagicMock()).get_fixed_value(model, values) == 7
# Fail
values = {0: 5}
with pytest.raises(RuntimeError) as err:
_SelectorArgument(1, True).get_fixed_value(model, values)
assert str(err.value) == \
"Argument(name='y', ignore=True) was not found in {0: 5}"
def test_is_argument(self):
model = Gaussian2D()
argument = _SelectorArgument.validate(model, 0)
# Is true
assert argument.is_argument(model, 0) == True
assert argument.is_argument(model, 'x') == True
# Is false
assert argument.is_argument(model, 1) == False
assert argument.is_argument(model, 'y') == False
# Fail
with pytest.raises(ValueError):
argument.is_argument(model, 'z')
with pytest.raises(ValueError):
argument.is_argument(model, mk.MagicMock())
with pytest.raises(IndexError):
argument.is_argument(model, 2)
def test_named_tuple(self):
model = Gaussian2D()
for index in range(model.n_inputs):
ignore = mk.MagicMock()
assert _SelectorArgument(index, ignore).named_tuple(model) == \
(model.inputs[index], ignore)
class Test_SelectorArguments:
def test_create(self):
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == []
kept_ignore = mk.MagicMock()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)), kept_ignore)
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments._kept_ignore == kept_ignore
def test_pretty_repr(self):
model = Gaussian2D()
arguments = _SelectorArguments((_SelectorArgument(0, True), _SelectorArgument(1, False)))
assert arguments.pretty_repr(model) ==\
"SelectorArguments(\n" +\
" Argument(name='x', ignore=True)\n" +\
" Argument(name='y', ignore=False)\n" +\
")"
def test_ignore(self):
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True))).ignore == [0, 1]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, True)), [13, 4]).ignore == [0, 1, 13, 4]
assert _SelectorArguments((_SelectorArgument(0, True),
_SelectorArgument(1, False))).ignore == [0]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, True))).ignore == [1]
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False))).ignore == []
assert _SelectorArguments((_SelectorArgument(0, False),
_SelectorArgument(1, False)), [17, 14]).ignore == [17, 14]
def test_validate(self):
# Integer key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0, True), (1, False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Default ignore
arguments = _SelectorArguments.validate(Gaussian2D(), ((0,), (1,)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, True))
assert arguments.kept_ignore == []
# String key and passed ignore
arguments = _SelectorArguments.validate(Gaussian2D(), (('x', True), ('y', False)))
assert isinstance(arguments, _SelectorArguments)
assert arguments == ((0, True), (1, False))
assert arguments.kept_ignore == []
# Test kept_ignore option
new_arguments= _SelectorArguments.validate(Gaussian2D(), arguments, [11, 5, 8])
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [11, 5, 8]
arguments._kept_ignore = [13, 17, 14]
new_arguments= _SelectorArguments.validate(Gaussian2D(), arguments)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True), (1, False))
assert new_arguments.kept_ignore == [13, 17, 14]
# Invalid, bad argument
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), ('z', False)))
with pytest.raises(ValueError):
_SelectorArguments.validate(Gaussian2D(), ((mk.MagicMock(), True), (1, False)))
with pytest.raises(IndexError):
_SelectorArguments.validate(Gaussian2D(), ((0, True), (2, False)))
# Invalid, repeated argument
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ((0, True), (0, False)))
assert str(err.value) == \
"Input: 'x' has been repeated."
# Invalid, no arguments
with pytest.raises(ValueError) as err:
_SelectorArguments.validate(Gaussian2D(), ())
assert str(err.value) == \
"There must be at least one selector argument."
def test_get_selector(self):
inputs = [idx + 19 for idx in range(4)]
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).get_selector(*inputs) ==\
tuple(inputs[:2])
assert _SelectorArguments.validate(Gaussian2D(),
((1, True), (0, False))).get_selector(*inputs) ==\
tuple(inputs[:2][::-1])
assert _SelectorArguments.validate(Gaussian2D(),
((1, False),)).get_selector(*inputs) ==\
(inputs[1],)
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).get_selector(*inputs) ==\
(inputs[0],)
def test_is_selector(self):
# Is Selector
assert _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5))
assert _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5,))
# Is not selector
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5, 2.5, 3.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector((0.5,))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True), (1, False))).is_selector(0.5)
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector((0.5, 2.5))
assert not _SelectorArguments.validate(Gaussian2D(),
((0, True),)).is_selector(2.5)
def test_get_fixed_values(self):
model = Gaussian2D()
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {0: 11, 1: 7}) \
== (11, 7)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {0: 5, 'y': 47}) \
== (5, 47)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {'x': 2, 'y': 9}) \
== (2, 9)
assert _SelectorArguments.validate(model,
((0, True), (1, False))).get_fixed_values(model, {'x': 12, 1: 19}) \
== (12, 19)
def test_is_argument(self):
model = Gaussian2D()
# Is true
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.is_argument(model, 0) == True
assert arguments.is_argument(model, 'x') == True
assert arguments.is_argument(model, 1) == True
assert arguments.is_argument(model, 'y') == True
# Is true and false
arguments = _SelectorArguments.validate(model, ((0, True),))
assert arguments.is_argument(model, 0) == True
assert arguments.is_argument(model, 'x') == True
assert arguments.is_argument(model, 1) == False
assert arguments.is_argument(model, 'y') == False
arguments = _SelectorArguments.validate(model, ((1, False),))
assert arguments.is_argument(model, 0) == False
assert arguments.is_argument(model, 'x') == False
assert arguments.is_argument(model, 1) == True
assert arguments.is_argument(model, 'y') == True
def test_selector_index(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.selector_index(model, 0) == 0
assert arguments.selector_index(model, 'x') == 0
assert arguments.selector_index(model, 1) == 1
assert arguments.selector_index(model, 'y') == 1
arguments = _SelectorArguments.validate(model, ((1, True), (0, False)))
assert arguments.selector_index(model, 0) == 1
assert arguments.selector_index(model, 'x') == 1
assert arguments.selector_index(model, 1) == 0
assert arguments.selector_index(model, 'y') == 0
# Error
arguments = _SelectorArguments.validate(model, ((0, True),))
with pytest.raises(ValueError) as err:
arguments.selector_index(model, 'y')
assert str(err.value) ==\
"y does not correspond to any selector argument."
def test_add_ignore(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), ))
assert arguments == ((0, True),)
assert arguments._kept_ignore == []
new_arguments0 = arguments.add_ignore(model, 1)
assert new_arguments0 == arguments
assert new_arguments0._kept_ignore == [1]
assert arguments._kept_ignore == []
assert arguments._kept_ignore == []
new_arguments1 = new_arguments0.add_ignore(model, 'y')
assert new_arguments1 == arguments == new_arguments0
assert new_arguments0._kept_ignore == [1]
assert new_arguments1._kept_ignore == [1, 1]
assert arguments._kept_ignore == []
# Error
with pytest.raises(ValueError) as err:
arguments.add_ignore(model, 0)
assert str(err.value) ==\
"0: is a selector argument and cannot be ignored."
def test_reduce(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
new_arguments = arguments.reduce(model, 0)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'x')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((1, False),)
assert new_arguments._kept_ignore == [0]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 1)
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
new_arguments = arguments.reduce(model, 'y')
assert isinstance(new_arguments, _SelectorArguments)
assert new_arguments == ((0, True),)
assert new_arguments._kept_ignore == [1]
assert arguments._kept_ignore == []
def test_named_tuple(self):
model = Gaussian2D()
arguments = _SelectorArguments.validate(model, ((0, True), (1, False)))
assert arguments.named_tuple(model) == (('x', True), ('y', False))
class TestCompoundBoundingBox:
def test_create(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args, create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
def test_copy(self):
bounding_box = CompoundBoundingBox.validate(Gaussian2D(), {(1,): (-1.5, 1.3), (2,): (-2.7, 2.4)},
((0, True),), mk.MagicMock())
copy = bounding_box.copy()
assert bounding_box == copy
assert id(bounding_box) != id(copy)
# model is not copied to prevent infinite recursion
assert bounding_box._model == copy._model
assert id(bounding_box._model) == id(copy._model)
# Same string values have will have same id
assert bounding_box._order == copy._order
assert id(bounding_box._order) == id(copy._order)
assert bounding_box._create_selector == copy._create_selector
assert id(bounding_box._create_selector) != id(copy._create_selector)
# Check selector_args
for index, argument in enumerate(bounding_box.selector_args):
assert argument == copy.selector_args[index]
assert id(argument) != id(copy.selector_args[index])
# Same integer values have will have same id
assert argument.index == copy.selector_args[index].index
assert id(argument.index) == id(copy.selector_args[index].index)
# Same boolean values have will have same id
assert argument.ignore == copy.selector_args[index].ignore
assert id(argument.ignore) == id(copy.selector_args[index].ignore)
assert len(bounding_box.selector_args) == len(copy.selector_args)
# Check bounding_boxes
for selector, bbox in bounding_box.bounding_boxes.items():
assert bbox == copy.bounding_boxes[selector]
assert id(bbox) != id(copy.bounding_boxes[selector])
assert bbox.ignored == copy.bounding_boxes[selector].ignored
assert id(bbox.ignored) != id(copy.bounding_boxes[selector].ignored)
# model is not copied to prevent infinite recursion
assert bbox._model == copy.bounding_boxes[selector]._model
assert id(bbox._model) == id(copy.bounding_boxes[selector]._model)
# Same string values have will have same id
assert bbox._order == copy.bounding_boxes[selector]._order
assert id(bbox._order) == id(copy.bounding_boxes[selector]._order)
# Check interval objects
for index, interval in bbox.intervals.items():
assert interval == copy.bounding_boxes[selector].intervals[index]
assert id(interval) != id(copy.bounding_boxes[selector].intervals[index])
# Same float values have will have same id
assert interval.lower == copy.bounding_boxes[selector].intervals[index].lower
assert id(interval.lower) == id(copy.bounding_boxes[selector].intervals[index].lower)
# Same float values have will have same id
assert interval.upper == copy.bounding_boxes[selector].intervals[index].upper
assert id(interval.upper) == id(copy.bounding_boxes[selector].intervals[index].upper)
assert len(bbox.intervals) == len(copy.bounding_boxes[selector].intervals)
assert bbox.intervals.keys() == copy.bounding_boxes[selector].intervals.keys()
assert len(bounding_box.bounding_boxes) == len(copy.bounding_boxes)
assert bounding_box.bounding_boxes.keys() == copy.bounding_boxes.keys()
def test___repr__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box.__repr__() ==\
"CompoundBoundingBox(\n" + \
" bounding_boxes={\n" + \
" (1,) = ModelBoundingBox(\n" + \
" intervals={\n" + \
" x: Interval(lower=-1, upper=1)\n" + \
" }\n" + \
" model=Gaussian2D(inputs=('x', 'y'))\n" + \
" order='C'\n" + \
" )\n" + \
" (2,) = ModelBoundingBox(\n" + \
" intervals={\n" + \
" x: Interval(lower=-2, upper=2)\n" + \
" }\n" + \
" model=Gaussian2D(inputs=('x', 'y'))\n" + \
" order='C'\n" + \
" )\n" + \
" }\n" + \
" selector_args = SelectorArguments(\n" + \
" Argument(name='x', ignore=True)\n" + \
" )\n" + \
")"
def test_bounding_boxes(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box.bounding_boxes == bounding_boxes
def test_selector_args(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_box = CompoundBoundingBox({}, model, selector_args)
# Get
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
# Set
selector_args = ((1, False),)
with pytest.warns(RuntimeWarning, match=r"Overriding selector_args.*"):
bounding_box.selector_args = selector_args
assert bounding_box._selector_args == selector_args
assert bounding_box.selector_args == selector_args
def test_create_selector(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1,),), create_selector)
assert bounding_box._create_selector == create_selector
assert bounding_box.create_selector == create_selector
def test__get_selector_key(self):
bounding_box = CompoundBoundingBox({}, Gaussian2D(), ((1, True),))
assert len(bounding_box.bounding_boxes) == 0
# Singlar
assert bounding_box._get_selector_key(5) == (5,)
assert bounding_box._get_selector_key((5,)) == (5,)
assert bounding_box._get_selector_key([5]) == (5,)
assert bounding_box._get_selector_key(np.asanyarray(5)) == (5,)
assert bounding_box._get_selector_key(np.array([5])) == (5,)
# multiple
assert bounding_box._get_selector_key((5, 19)) == (5, 19)
assert bounding_box._get_selector_key([5, 19]) == (5, 19)
assert bounding_box._get_selector_key(np.array([5, 19])) == (5, 19)
def test___setitem__(self):
model = Gaussian2D()
# Ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, True),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = (-15, 15)
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == (-15, 15)
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (7, 13) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(7, 13)] = (-7, 7)
assert str(err.value) == \
"(7, 13) is not a selector!"
assert (7, 13) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = ((-13, 13), (-3, 3))
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# No ignored argument
bounding_box = CompoundBoundingBox({}, model, ((1, False),), order='F')
assert len(bounding_box.bounding_boxes) == 0
# Valid
bounding_box[(15, )] = ((-15, 15), (-6, 6))
assert len(bounding_box.bounding_boxes) == 1
assert (15,) in bounding_box._bounding_boxes
assert isinstance(bounding_box._bounding_boxes[(15,)], ModelBoundingBox)
assert bounding_box._bounding_boxes[(15,)] == ((-15, 15), (-6, 6))
assert bounding_box._bounding_boxes[(15,)].order == 'F'
# Invalid key
assert (14, 11) not in bounding_box._bounding_boxes
with pytest.raises(ValueError) as err:
bounding_box[(14, 11)] = ((-7, 7), (-12, 12))
assert str(err.value) == \
"(14, 11) is not a selector!"
assert (14, 11) not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
# Invalid bounding box
assert 13 not in bounding_box._bounding_boxes
with pytest.raises(ValueError):
bounding_box[(13,)] = (-13, 13)
assert 13 not in bounding_box._bounding_boxes
assert len(bounding_box.bounding_boxes) == 1
def test__validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
# Tuple selector_args
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox({}, model, selector_args)
bounding_box._validate(bounding_boxes)
for _selector, bbox in bounding_boxes.items():
assert _selector in bounding_box._bounding_boxes
assert bounding_box._bounding_boxes[_selector] == bbox
for _selector, bbox in bounding_box._bounding_boxes.items():
assert _selector in bounding_boxes
assert bounding_boxes[_selector] == bbox
assert isinstance(bbox, ModelBoundingBox)
assert bounding_box._bounding_boxes == bounding_boxes
def test___eq__(self):
bounding_box_1 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),))
bounding_box_2 = CompoundBoundingBox({(1,): (-1, 1), (2,): (-2, 2)}, Gaussian2D(), ((0, True),))
# Equal
assert bounding_box_1 == bounding_box_2
# Not equal to non-compound bounding_box
assert not bounding_box_1 == mk.MagicMock()
assert not bounding_box_2 == mk.MagicMock()
# Not equal bounding_boxes
bounding_box_2[(15,)] = (-15, 15)
assert not bounding_box_1 == bounding_box_2
del bounding_box_2._bounding_boxes[(15,)]
assert bounding_box_1 == bounding_box_2
# Not equal selector_args
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, False),))
assert not bounding_box_1 == bounding_box_2
bounding_box_2._selector_args = _SelectorArguments.validate(Gaussian2D(), ((0, True),))
assert bounding_box_1 == bounding_box_2
# Not equal create_selector
bounding_box_2._create_selector = mk.MagicMock()
assert not bounding_box_1 == bounding_box_2
def test_validate(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
create_selector = mk.MagicMock()
# Fail selector_args
with pytest.raises(ValueError) as err:
CompoundBoundingBox.validate(model, bounding_boxes)
assert str(err.value) ==\
"Selector arguments must be provided (can be passed as part of bounding_box argument)!"
# Normal validate
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector, order='F')
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'F'
# Re-validate
new_bounding_box = CompoundBoundingBox.validate(model, bounding_box)
assert bounding_box == new_bounding_box
assert new_bounding_box._order == 'F'
# Default order
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args,
create_selector)
assert (bounding_box._model.parameters == model.parameters).all()
assert bounding_box._selector_args == selector_args
assert bounding_box._bounding_boxes == bounding_boxes
assert bounding_box._create_selector == create_selector
assert bounding_box._order == 'C'
def test___contains__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
assert (1,) in bounding_box
assert (2,) in bounding_box
assert (3,) not in bounding_box
assert 1 not in bounding_box
assert 2 not in bounding_box
def test__create_bounding_box(self):
model = Gaussian2D()
create_selector = mk.MagicMock()
bounding_box = CompoundBoundingBox({}, model, ((1, False),),
create_selector)
# Create is successful
create_selector.return_value = ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 0
bbox = bounding_box._create_bounding_box((7,))
assert isinstance(bbox, ModelBoundingBox)
assert bbox == ((-15, 15), (-23, 23))
assert len(bounding_box._bounding_boxes) == 1
assert (7,) in bounding_box
assert isinstance(bounding_box[(7,)], ModelBoundingBox)
assert bounding_box[(7,)] == bbox
# Create is unsuccessful
create_selector.return_value = (-42, 42)
with pytest.raises(ValueError):
bounding_box._create_bounding_box((27,))
def test___getitem__(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
# already exists
assert isinstance(bounding_box[1], ModelBoundingBox)
assert bounding_box[1] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[2] == (-2, 2)
assert isinstance(bounding_box[(1,)], ModelBoundingBox)
assert bounding_box[(1,)] == (-1, 1)
assert isinstance(bounding_box[(2,)], ModelBoundingBox)
assert bounding_box[(2,)] == (-2, 2)
# no selector
with pytest.raises(RuntimeError) as err:
bounding_box[(3,)]
assert str(err.value) == \
"No bounding box is defined for selector: (3,)."
# Create a selector
bounding_box._create_selector = mk.MagicMock()
with mk.patch.object(CompoundBoundingBox, '_create_bounding_box',
autospec=True) as mkCreate:
assert bounding_box[(3,)] == mkCreate.return_value
assert mkCreate.call_args_list == \
[mk.call(bounding_box, (3,))]
def test__select_bounding_box(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
inputs = [mk.MagicMock() for _ in range(3)]
with mk.patch.object(_SelectorArguments, 'get_selector',
autospec=True) as mkSelector:
with mk.patch.object(CompoundBoundingBox, '__getitem__',
autospec=True) as mkGet:
assert bounding_box._select_bounding_box(inputs) == mkGet.return_value
assert mkGet.call_args_list == \
[mk.call(bounding_box, mkSelector.return_value)]
assert mkSelector.call_args_list == \
[mk.call(bounding_box.selector_args, *inputs)]
def test_prepare_inputs(self):
model = Gaussian2D()
selector_args = ((0, True),)
bounding_boxes = {(1,): (-1, 1), (2,): (-2, 2)}
bounding_box = CompoundBoundingBox(bounding_boxes, model, selector_args)
input_shape = mk.MagicMock()
with mk.patch.object(ModelBoundingBox, 'prepare_inputs',
autospec=True) as mkPrepare:
assert bounding_box.prepare_inputs(input_shape, [1, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == \
[mk.call(bounding_box[(1,)], input_shape, [1, 2, 3])]
mkPrepare.reset_mock()
assert bounding_box.prepare_inputs(input_shape, [2, 2, 3]) == mkPrepare.return_value
assert mkPrepare.call_args_list == \
[mk.call(bounding_box[(2,)], input_shape, [2, 2, 3])]
mkPrepare.reset_mock()
def test__matching_bounding_boxes(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {(1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {(1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
matching = bounding_box._matching_bounding_boxes('x', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
matching = bounding_box._matching_bounding_boxes('y', value)
assert isinstance(matching, dict)
assert (4 - value,) in matching
bbox = matching[(4 - value,)]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox
assert 'y' in bbox.ignored_inputs
assert 'x' in bbox
assert bbox['x'] == (-(5 - value), (5 - value))
assert len(bbox.intervals) == 1
assert bbox.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
matching = bounding_box._matching_bounding_boxes('slit_id', 0)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
matching = bounding_box._matching_bounding_boxes('slit_id', 1)
assert isinstance(matching, dict)
assert () in matching
bbox = matching[()]
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
# Errors
with pytest.raises(ValueError) as err:
bounding_box._matching_bounding_boxes('slit_id', 2)
assert str(err.value) ==\
"Attempting to fix input slit_id, but there are no bounding boxes for argument value 2."
def test__fix_input_selector_arg(self):
# Single selector index
selector_args = ((0, False),)
bounding_boxes = {(1,): ((-1, 1), (-2, 2)), (2,): ((-2, 2), (-3, 3)), (3,): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox
assert 'x' in bbox.ignored_inputs
assert 'y' in bbox
assert bbox['y'] == (-value, value)
assert len(bbox.intervals) == 1
assert bbox.ignored == [0]
# Multiple selector index
selector_args = ((0, False), (1, False))
bounding_boxes = {(1, 3): ((-1, 1), (-2, 2)), (2, 2): ((-2, 2), (-3, 3)), (3, 1): ((-3, 3), (-4, 4))}
bounding_box = CompoundBoundingBox(bounding_boxes, Gaussian2D(), selector_args)
for value in [1, 2, 3]:
bbox = bounding_box._fix_input_selector_arg('x', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((1, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'x' in bbox_selector
assert 'x' in bbox_selector.ignored_inputs
assert 'y' in bbox_selector
assert bbox_selector['y'] == (-value, value)
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [0]
bbox = bounding_box._fix_input_selector_arg('y', value)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == Gaussian2D().parameters).all()
assert bbox.selector_args == ((0, False),)
assert (4 - value,) in bbox
bbox_selector = bbox[(4 - value,)]
assert isinstance(bbox_selector, ModelBoundingBox)
assert (bbox_selector._model.parameters == Gaussian2D().parameters).all()
assert 'y' in bbox_selector
assert 'y' in bbox_selector.ignored_inputs
assert 'x' in bbox_selector
assert bbox_selector['x'] == (-(5 - value), (5 - value))
assert len(bbox_selector.intervals) == 1
assert bbox_selector.ignored == [1]
# Real fix input of slicing input
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_selector_arg('slit_id', 0)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
bbox = bounding_box._fix_input_selector_arg('slit_id', 1)
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.ignored_inputs == ['slit_id']
assert bbox.named_intervals == {'x': (-0.5, 3047.5),
'y': (-0.5, 4047.5)}
assert bbox.order == 'F'
def test__fix_input_bbox_arg(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
bbox = bounding_box._fix_input_bbox_arg('x', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [0]
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert len(bbox._bounding_boxes) == 2
bbox = bounding_box._fix_input_bbox_arg('y', 5)
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((2, True),)
assert bbox.selector_args._kept_ignore == [1]
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert len(bbox._bounding_boxes) == 2
def test_fix_inputs(self):
model = Shift(1) & Scale(2) & Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bounding_boxes = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)), (1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
bounding_box = CompoundBoundingBox.validate(model, bounding_boxes, selector_args=[('slit_id', True)], order='F')
model.bounding_box = bounding_box
# Fix selector argument
new_model = fix_inputs(model, {'slit_id': 0})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 1047.5),
'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
# Fix a bounding_box field
new_model = fix_inputs(model, {'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 2047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 4047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
new_model = fix_inputs(model, {'y': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert (bbox._model.parameters == model.parameters).all()
assert bbox.selector_args == ((1, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-0.5, 1047.5)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-0.5, 3047.5)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
# Fix selector argument and a bounding_box field
new_model = fix_inputs(model, {'slit_id': 0, 'x': 5})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'y': (-0.5, 2047.5)}
assert bbox.order == 'F'
new_model = fix_inputs(model, {'y': 5, 'slit_id': 1})
bbox = new_model.bounding_box
assert isinstance(bbox, ModelBoundingBox)
assert (bbox._model.parameters == new_model.parameters).all()
assert bbox.ignored_inputs == []
assert bbox.named_intervals == {'x': (-0.5, 3047.5)}
assert bbox.order == 'F'
# Fix two bounding_box fields
new_model = fix_inputs(model, {'x': 5, 'y': 7})
bbox = new_model.bounding_box
assert isinstance(bbox, CompoundBoundingBox)
assert bbox.selector_args == ((0, True),)
assert bbox.selector_args._kept_ignore == []
assert bbox._bounding_boxes[(0,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(0,)].order == 'F'
assert bbox._bounding_boxes[(1,)] == (-np.inf, np.inf)
assert bbox._bounding_boxes[(1,)].order == 'F'
assert len(bbox._bounding_boxes) == 2
|
7e5361701f39dc5fced0409e9cc899d91f88d74b0a7485017273011b098c69f7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, pointless-statement
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import astropy.units as u
from astropy.modeling.core import CompoundModel, Model, ModelDefinitionError
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import (Chebyshev1D, Chebyshev2D, Const1D, Gaussian1D, Gaussian2D,
Identity, Legendre1D, Legendre2D, Linear1D, Mapping,
Polynomial1D, Polynomial2D, Rotation2D, Scale, Shift,
Tabular1D, fix_inputs)
from astropy.modeling.parameters import Parameter
from astropy.utils import minversion
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set(expr, result):
s = expr(Const1D((2, 2), n_models=2), Const1D((3, 3), n_models=2))
out = s(0, model_set_axis=False)
assert_array_equal(out, result)
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, [5.0, 5.0]),
(lambda x, y: x - y, [-1.0, -1.0]),
(lambda x, y: x * y, [6.0, 6.0]),
(lambda x, y: x / y, [2.0 / 3.0, 2.0 / 3.0]),
(lambda x, y: x ** y, [8.0, 8.0])])
def test_model_set_raises_value_error(expr, result):
"""Check that creating model sets with components whose _n_models are
different raise a value error
"""
with pytest.raises(ValueError):
expr(Const1D((2, 2), n_models=2), Const1D(3, n_models=1))
@pytest.mark.parametrize(('expr', 'result'),
[(lambda x, y: x + y, 5.0),
(lambda x, y: x - y, -1.0),
(lambda x, y: x * y, 6.0),
(lambda x, y: x / y, 2.0 / 3.0),
(lambda x, y: x ** y, 8.0)])
def test_two_model_instance_arithmetic_1d(expr, result):
"""
Like test_two_model_class_arithmetic_1d, but creates a new model from two
model *instances* with fixed parameters.
"""
s = expr(Const1D(2), Const1D(3))
assert isinstance(s, CompoundModel)
assert s.n_inputs == 1
assert s.n_outputs == 1
out = s(0)
assert out == result
assert isinstance(out, float)
def test_simple_two_model_compose_1d():
"""
Shift and Scale are two of the simplest models to test model composition
with.
"""
S1 = Shift(2) | Scale(3) # First shift then scale
assert isinstance(S1, CompoundModel)
assert S1.n_inputs == 1
assert S1.n_outputs == 1
assert S1(1) == 9.0
S2 = Scale(2) | Shift(3) # First scale then shift
assert isinstance(S2, CompoundModel)
assert S2.n_inputs == 1
assert S2.n_outputs == 1
assert S2(1) == 5.0
# Test with array inputs
assert_array_equal(S2([1, 2, 3]), [5.0, 7.0, 9.0])
def test_simple_two_model_compose_2d():
"""
A simple example consisting of two rotations.
"""
r1 = Rotation2D(45) | Rotation2D(45)
assert isinstance(r1, CompoundModel)
assert r1.n_inputs == 2
assert r1.n_outputs == 2
assert_allclose(r1(0, 1), (-1, 0), atol=1e-10)
r2 = Rotation2D(90) | Rotation2D(90) # Rotate twice by 90 degrees
assert_allclose(r2(0, 1), (0, -1), atol=1e-10)
# Compose R with itself to produce 4 rotations
r3 = r1 | r1
assert_allclose(r3(0, 1), (0, -1), atol=1e-10)
def test_n_submodels():
"""
Test that CompoundModel.n_submodels properly returns the number
of components.
"""
g2 = Gaussian1D() + Gaussian1D()
assert g2.n_submodels == 2
g3 = g2 + Gaussian1D()
assert g3.n_submodels == 3
g5 = g3 | g2
assert g5.n_submodels == 5
g7 = g5 / g2
assert g7.n_submodels == 7
def test_expression_formatting():
"""
Test that the expression strings from compound models are formatted
correctly.
"""
# For the purposes of this test it doesn't matter a great deal what
# model(s) are used in the expression, I don't think
G = Gaussian1D(1, 1, 1)
G2 = Gaussian2D(1, 2, 3, 4, 5, 6)
M = G + G
assert M._format_expression() == '[0] + [1]'
M = G + G + G
assert M._format_expression() == '[0] + [1] + [2]'
M = G + G * G
assert M._format_expression() == '[0] + [1] * [2]'
M = G * G + G
assert M._format_expression() == '[0] * [1] + [2]'
M = G + G * G + G
assert M._format_expression() == '[0] + [1] * [2] + [3]'
M = (G + G) * (G + G)
assert M._format_expression() == '([0] + [1]) * ([2] + [3])'
# This example uses parentheses in the expression, but those won't be
# preserved in the expression formatting since they technically aren't
# necessary, and there's no way to know that they were originally
# parenthesized (short of some deep, and probably not worthwhile
# introspection)
M = (G * G) + (G * G)
assert M._format_expression() == '[0] * [1] + [2] * [3]'
M = G ** G
assert M._format_expression() == '[0] ** [1]'
M = G + G ** G
assert M._format_expression() == '[0] + [1] ** [2]'
M = (G + G) ** G
assert M._format_expression() == '([0] + [1]) ** [2]'
M = G + G | G
assert M._format_expression() == '[0] + [1] | [2]'
M = G + (G | G)
assert M._format_expression() == '[0] + ([1] | [2])'
M = G & G | G2
assert M._format_expression() == '[0] & [1] | [2]'
M = G & (G | G)
assert M._format_expression() == '[0] & ([1] | [2])'
def test_basic_compound_inverse():
"""
Test basic inversion of compound models in the limited sense supported for
models made from compositions and joins only.
"""
t = (Shift(2) & Shift(3)) | (Scale(2) & Scale(3)) | Rotation2D(90)
assert_allclose(t.inverse(*t(0, 1)), (0, 1))
@pytest.mark.parametrize('model', [
Shift(0) + Shift(0) | Shift(0),
Shift(0) - Shift(0) | Shift(0),
Shift(0) * Shift(0) | Shift(0),
Shift(0) / Shift(0) | Shift(0),
Shift(0) ** Shift(0) | Shift(0),
Gaussian1D(1, 2, 3) | Gaussian1D(4, 5, 6)])
def test_compound_unsupported_inverse(model):
"""
Ensure inverses aren't supported in cases where it shouldn't be.
"""
with pytest.raises(NotImplementedError):
model.inverse
def test_mapping_basic_permutations():
"""
Tests a couple basic examples of the Mapping model--specifically examples
that merely permute the outputs.
"""
x, y = Rotation2D(90)(1, 2)
rs = Rotation2D(90) | Mapping((1, 0))
x_prime, y_prime = rs(1, 2)
assert_allclose((x, y), (y_prime, x_prime))
# A more complicated permutation
m = Rotation2D(90) & Scale(2)
x, y, z = m(1, 2, 3)
ms = m | Mapping((2, 0, 1))
x_prime, y_prime, z_prime = ms(1, 2, 3)
assert_allclose((x, y, z), (y_prime, z_prime, x_prime))
def test_mapping_inverse():
"""Tests inverting a compound model that includes a `Mapping`."""
rs1 = Rotation2D(12.1) & Scale(13.2)
rs2 = Rotation2D(14.3) & Scale(15.4)
# Rotates 2 of the coordinates and scales the third--then rotates on a
# different axis and scales on the axis of rotation. No physical meaning
# here just a simple test
m = rs1 | Mapping([2, 0, 1]) | rs2
assert_allclose((0, 1, 2), m.inverse(*m(0, 1, 2)), atol=1e-08)
def test_identity_input():
"""
Test a case where an Identity (or Mapping) model is the first in a chain
of composite models and thus is responsible for handling input broadcasting
properly.
Regression test for https://github.com/astropy/astropy/pull/3362
"""
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=90)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), [-3.0, 1.0])
def test_invalid_operands():
"""
Test that certain operators do not work with models whose inputs/outputs do
not match up correctly.
"""
with pytest.raises(ModelDefinitionError):
Rotation2D(90) | Gaussian1D(1, 0, 0.1)
with pytest.raises(ModelDefinitionError):
Rotation2D(90) + Gaussian1D(1, 0, 0.1)
@pytest.mark.parametrize('poly', [Chebyshev2D(1, 2), Polynomial2D(2), Legendre2D(1, 2)])
def test_compound_with_polynomials_2d(poly):
"""
Tests that polynomials are scaled when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x, y = np.mgrid[:20, :37]
result_compound = model(x, y)
result = shift(poly(x, y))
assert_allclose(result, result_compound)
def test_fix_inputs():
g1 = Gaussian2D(1, 0, 0, 1, 2)
g2 = Gaussian2D(1.5, .5, -.2, .5, .3)
sg1_1 = fix_inputs(g1, {1: 0})
assert_allclose(sg1_1(0), g1(0, 0))
assert_allclose(sg1_1([0, 1, 3]), g1([0, 1, 3], [0, 0, 0]))
sg1_2 = fix_inputs(g1, {'x': 1})
assert_allclose(sg1_2(1.5), g1(1, 1.5))
gg1 = g1 & g2
sgg1_1 = fix_inputs(gg1, {1: 0.1, 3: 0.2})
assert_allclose(sgg1_1(0, 0), gg1(0, 0.1, 0, 0.2))
sgg1_2 = fix_inputs(gg1, {'x0': -.1, 2: .1})
assert_allclose(sgg1_2(1, 1), gg1(-0.1, 1, 0.1, 1))
assert_allclose(sgg1_2(y0=1, y1=1), gg1(-0.1, 1, 0.1, 1))
def test_fix_inputs_invalid():
g1 = Gaussian2D(1, 0, 0, 1, 2)
with pytest.raises(ValueError):
fix_inputs(g1, {'x0': 0, 0: 0})
with pytest.raises(ValueError):
fix_inputs(g1, (0, 1))
with pytest.raises(ValueError):
fix_inputs(g1, {3: 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int32(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {np.int64(3): 2})
with pytest.raises(ValueError):
fix_inputs(g1, {'w': 2})
with pytest.raises(ModelDefinitionError):
CompoundModel('#', g1, g1)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {0: 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int32(0): 1})
gg1(2, y=2)
with pytest.raises(ValueError):
gg1 = fix_inputs(g1, {np.int64(0): 1})
gg1(2, y=2)
def test_fix_inputs_with_bounding_box():
g1 = Gaussian2D(1, 0, 0, 1, 1)
g2 = Gaussian2D(1, 0, 0, 1, 1)
assert g1.bounding_box == ((-5.5, 5.5), (-5.5, 5.5))
gg1 = g1 & g2
gg1.bounding_box = ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
assert gg1.bounding_box == ((-5.5, 5.5), (-5.4, 5.4), (-5.3, 5.3), (-5.2, 5.2))
sg = fix_inputs(gg1, {0: 0, 2: 0})
assert sg.bounding_box == ((-5.5, 5.5), (-5.3, 5.3))
g1 = Gaussian1D(10, 3, 1)
g = g1 & g1
g.bounding_box = ((1, 4), (6, 8))
gf = fix_inputs(g, {0: 1})
assert gf.bounding_box == (1, 4)
def test_indexing_on_instance():
"""Test indexing on compound model instances."""
m = Gaussian1D(1, 0, 0.1) + Const1D(2)
assert isinstance(m[0], Gaussian1D)
assert isinstance(m[1], Const1D)
assert m.param_names == ('amplitude_0', 'mean_0', 'stddev_0', 'amplitude_1')
# Test parameter equivalence
assert m[0].amplitude == 1 == m.amplitude_0
assert m[0].mean == 0 == m.mean_0
assert m[0].stddev == 0.1 == m.stddev_0
assert m[1].amplitude == 2 == m.amplitude_1
# Test that parameter value updates are symmetric between the compound
# model and the submodel returned by indexing
const = m[1]
m.amplitude_1 = 42
assert const.amplitude == 42
const.amplitude = 137
assert m.amplitude_1 == 137
# Similar couple of tests, but now where the compound model was created
# from model instances
g = Gaussian1D(1, 2, 3, name='g')
p = Polynomial1D(2, name='p')
m = g + p
assert m[0].name == 'g'
assert m[1].name == 'p'
assert m['g'].name == 'g'
assert m['p'].name == 'p'
poly = m[1]
m.c0_1 = 12345
assert poly.c0 == 12345
poly.c1 = 6789
assert m.c1_1 == 6789
# Test negative indexing
assert isinstance(m[-1], Polynomial1D)
assert isinstance(m[-2], Gaussian1D)
with pytest.raises(IndexError):
m[42]
with pytest.raises(IndexError):
m['foobar']
# Confirm index-by-name works with fix_inputs
g = Gaussian2D(1, 2, 3, 4, 5, name='g')
m = fix_inputs(g, {0: 1})
assert m['g'].name == 'g'
# Test string slicing
A = Const1D(1.1, name='A')
B = Const1D(2.1, name='B')
C = Const1D(3.1, name='C')
M = A + B * C
assert_allclose(M['B':'C'](1), 6.510000000000001)
class _ConstraintsTestA(Model):
stddev = Parameter(default=0, min=0, max=0.3)
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(stddev, mean):
return stddev, mean
class _ConstraintsTestB(Model):
mean = Parameter(default=0, fixed=True)
@staticmethod
def evaluate(mean):
return mean
def test_inherit_constraints():
"""
Various tests for copying of constraint values between compound models and
their members.
Regression test for https://github.com/astropy/astropy/issues/3481
"""
model = (Gaussian1D(bounds={'stddev': (0, 0.3)}, fixed={'mean': True}) +
Gaussian1D(fixed={'mean': True}))
# Lots of assertions in this test as there are multiple interfaces to
# parameter constraints
assert 'stddev_0' in model.bounds
assert model.bounds['stddev_0'] == (0, 0.3)
assert model.stddev_0.bounds == (0, 0.3)
assert 'mean_0' in model.fixed
assert model.fixed['mean_0'] is True
assert model.mean_0.fixed is True
assert 'mean_1' in model.fixed
assert model.fixed['mean_1'] is True
assert model.mean_1.fixed is True
assert model.stddev_0 is model[0].stddev
# Great, all the constraints were inherited properly
# Now what about if we update them through the sub-models?
model.stddev_0.bounds = (0, 0.4)
assert model[0].stddev.bounds == (0, 0.4)
assert model[0].bounds['stddev'] == (0, 0.4)
model.stddev_0.bounds = (0.1, 0.5)
assert model[0].stddev.bounds == (0.1, 0.5)
assert model[0].bounds['stddev'] == (0.1, 0.5)
model[1].mean.fixed = False
assert model.mean_1.fixed is False
assert model[1].mean.fixed is False
# Now turn off syncing of constraints
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = False
model[0].stddev.bounds = (0, 0.2)
assert model.bounds['stddev_0'] == (0.1, 0.5)
model.sync_constraints = True
assert model.bounds['stddev_0'] == (0, 0.2)
def test_compound_custom_inverse():
"""
Test that a compound model with a custom inverse has that inverse applied
when the inverse of another model, of which it is a component, is computed.
Regression test for https://github.com/astropy/astropy/issues/3542
"""
poly = Polynomial1D(1, c0=1, c1=2)
scale = Scale(1)
shift = Shift(1)
model1 = poly | scale
model1.inverse = poly
# model1 now has a custom inverse (the polynomial itself, ignoring the
# trivial scale factor)
model2 = shift | model1
assert_allclose(model2.inverse(1), (poly | shift.inverse)(1))
# Make sure an inverse is not allowed if the models were combined with the
# wrong operator, or if one of the models doesn't have an inverse defined
with pytest.raises(NotImplementedError):
(shift + model1).inverse
with pytest.raises(NotImplementedError):
(model1 & poly).inverse
def test_pickle_compound():
"""
Regression test for
https://github.com/astropy/astropy/issues/3867#issuecomment-114547228
"""
# Test pickling a compound model instance
g1 = Gaussian1D(1.0, 0.0, 0.1)
g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3])
m = g1 + g2
m2 = pickle.loads(pickle.dumps(m))
assert m.param_names == m2.param_names
assert m.__class__.__name__ == m2.__class__.__name__
assert np.all(m.parameters == m2.parameters)
assert np.all(m(0) == m2(0))
def test_update_parameters():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
assert m(1) == 4
offx.offset = 42
assert m(1) == 86
m.factor_1 = 100
assert m(1) == 4300
m2 = m | offx
assert m2(1) == 4342
def test_name():
offx = Shift(1)
scl = Scale(2)
m = offx | scl
scl.name = "scale"
assert m.submodel_names == ('None_0', 'scale')
assert m.name is None
m.name = "M"
assert m.name == "M"
m1 = m.rename("M1")
assert m.name == "M1"
assert m1.name == "M1"
def test_name_index():
g1 = Gaussian1D(1, 1, 1)
g2 = Gaussian1D(1, 2, 1)
g = g1 + g2
with pytest.raises(IndexError):
g['bozo']
g1.name = 'bozo'
assert g['bozo'].mean == 1
g2.name = 'bozo'
with pytest.raises(IndexError):
g['bozo']
@pytest.mark.skipif("not HAS_SCIPY")
def test_tabular_in_compound():
"""
Issue #7411 - evaluate should not change the shape of the output.
"""
t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19],
bounds_error=False)
rot = Rotation2D(2)
p = Polynomial1D(1)
x = np.arange(12).reshape((3, 4))
# Create a compound model which does not execute Tabular.__call__,
# but model.evaluate and is followed by a Rotation2D which
# checks the exact shapes.
model = p & t | rot
x1, y1 = model(x, x)
assert x1.ndim == 2
assert y1.ndim == 2
def test_bounding_box():
g = Gaussian2D() + Gaussian2D(2, .5, .1, 2, 3, 0)
g.bounding_box = ((0, 1), (0, .5))
y, x = np.mgrid[0:10, 0:10]
y = y / 3.
x = x / 3.
val = g(x, y, with_bounding_box=True)
compare = np.array([
[2.93738984, 2.93792011, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.87857153, 2.88188761, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.70492922, 2.71529265, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[2.45969972, 2.47912103, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]])
mask = ~np.isnan(val)
assert_allclose(val[mask], compare[mask])
val2 = g(x+2, y+2, with_bounding_box=True)
assert np.isnan(val2).sum() == 100
# val3 = g(.1, .1, with_bounding_box=True)
@pytest.mark.skipif("not HAS_SCIPY")
def test_bounding_box_with_units():
points = np.arange(5) * u.pix
lt = np.arange(5) * u.AA
t = Tabular1D(points, lt)
assert t(1 * u.pix, with_bounding_box=True) == 1. * u.AA
@pytest.mark.parametrize('poly', [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)])
def test_compound_with_polynomials_1d(poly):
"""
Tests that polynomials are offset when used in compound models.
Issue #3699
"""
poly.parameters = [1, 2, 3, 4, 1, 2]
shift = Shift(3)
model = poly | shift
x = np.linspace(-5, 5, 10)
result_compound = model(x)
result = shift(poly(x))
assert_allclose(result, result_compound)
assert model.param_names == ('c0_0', 'c1_0', 'c2_0', 'c3_0', 'c4_0', 'c5_0', 'offset_1')
def test_replace_submodel():
"""
Replace a model in a Compound model
"""
S1 = Shift(2, name='shift2') | Scale(3, name='scale3') # First shift then scale
S2 = Scale(2, name='scale2') | Shift(3, name='shift3') # First scale then shift
m = S1 & S2
assert m(1, 2) == (9, 7)
m2 = m.replace_submodel('scale3', Scale(4, name='scale4'))
assert m2(1, 2) == (12, 7)
assert m(1, 2) == (9, 7)
# Check the inverse has been updated
assert m2.inverse(12, 7) == (1, 2)
# Produce the same result by replacing a single model with a compound
m3 = m.replace_submodel('shift2', Shift(2) | Scale(2))
assert m(1, 2) == (9, 7)
assert m3(1, 2) == (18, 7)
# Check the inverse has been updated
assert m3.inverse(18, 7) == (1, 2)
# Test with arithmetic model compunding operator
m = S1 + S2
assert m(1) == 14
m2 = m.replace_submodel('scale2', Scale(4, name='scale4'))
assert m2(1) == 16
# Test with fix_inputs()
R = fix_inputs(Rotation2D(angle=90, name='rotate'), {0: 1})
m4 = S1 | R
assert_allclose(m4(0), (-6, 1))
m5 = m4.replace_submodel('rotate', Rotation2D(180))
assert_allclose(m5(0), (-1, -6))
# Check we get a value error when model name doesn't exist
with pytest.raises(ValueError):
m2 = m.replace_submodel('not_there', Scale(2))
# And now a model set
P = Polynomial1D(degree=1, n_models=2, name='poly')
S = Shift([1, 2], n_models=2)
m = P | S
assert_array_equal(m([0, 1]), (1, 2))
with pytest.raises(ValueError):
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=1))
m2 = m.replace_submodel('poly', Polynomial1D(degree=1, c0=[1, 2],
n_models=2))
assert_array_equal(m2([0, 1]), (2, 4))
# Ensure previous _user_inverse doesn't stick around
S1 = Shift(1)
S2 = Shift(2)
S3 = Shift(3, name='S3')
S23 = S2 | S3
S23.inverse = Shift(-4.9)
m = S1 & S23
# This should delete the S23._user_inverse
m2 = m.replace_submodel('S3', Shift(4))
assert m2(1, 2) == (2, 8)
assert m2.inverse(2, 8) == (1, 2)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
# Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D
p1 = np.array([1, 2, 3, 4, 1, 2])
p2 = np.array([1, 0, 0.5])
model1 = Polynomial1D(5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(x, *p1, *p2),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(x, *p1, *p2),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_double_shift():
x = np.linspace(-5, 5, 10)
y = np.linspace(-5, 5, 10)
m1 = Gaussian2D(1, 0, 0, 1, 1, 1)
m2 = Shift(1)
m3 = Shift(2)
m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2)
assert_array_equal(
m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2),
[
m1.evaluate(x, y, 1, 0, 0, 1, 1, 1),
m2.evaluate(x - 10, 1),
m3.evaluate(y + 20, 2),
],
)
@pytest.mark.parametrize(
"expr",
[
lambda m1, m2: m1 + m2,
lambda m1, m2: m1 - m2,
lambda m1, m2: m1 * m2,
lambda m1, m2: m1 / m2,
],
)
def test_compound_evaluate_named_param(expr):
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3, 0.5, 0.5])
model1 = Gaussian1D(2, 1, 5)
model2 = Gaussian1D(2, 1, 5)
compound = expr(model1, model2)
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)),
)
def test_compound_evaluate_name_param_power():
"""
Tests that compound evaluate function produces the same
result as the models with the power operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0, 0.2])
p2 = np.array([3])
model1 = Gaussian1D(2, 1, 5)
model2 = Const1D(2)
compound = model1 ** model2
assert_array_equal(
compound.evaluate(
x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]
),
model1.evaluate(x, *p1) ** model2.evaluate(x, *p2),
)
def test_compound_evaluate_and():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([1, 0.1, 0.5])
p2 = np.array([3])
model1 = Gaussian1D()
model2 = Shift()
compound = model1 & model2
assert_array_equal(
compound.evaluate(x, x, *p1, p2),
[model1.evaluate(x, *p1), model2.evaluate(x, p2)],
)
def test_compound_evaluate_or():
"""
Tests that compound evaluate function produces the same
result as the models with the operator applied
"""
x = np.linspace(-5, 5, 10)
p1 = np.array([0.5])
p2_amplitude = np.array([3])
p2_mean = np.array([0])
p2_std = np.array([0.1])
model1 = Shift(0.5)
model2 = Gaussian1D(1, 0, 0.5)
compound = model1 | model2
assert_array_equal(
compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std),
model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std),
)
def test_compound_evaluate_fix_inputs_by_keyword():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the keyword
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {"x": x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
def test_compound_evaluate_fix_inputs_by_position():
"""
Tests that compound evaluate function produces the same
result as the models fix_inputs operator is applied
when using the input index
"""
y, x = np.mgrid[:10, :10]
model_params = [3, 0, 0.1, 1, 0.5, 0]
model = Gaussian2D(1, 2, 0, 0.5)
compound = fix_inputs(model, {0: x + 5})
assert_array_equal(
compound.evaluate(x, y, *model_params),
model.evaluate(x + 5, y, *model_params),
)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
truth = m1 * m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_multiplied_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 * m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s
m1 = Linear1D(slope=5*u.m/u.s/u.s, intercept=1.0*u.m/u.s)
m2 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 * m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_divided_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
truth = m1 / m2
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_mixed_recursive_compound_model_with_mixed_units():
"""
Regression test for issue #12320
"""
fitter = LevMarLSQFitter()
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
truth = m1 / m2 * m3
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
x = np.linspace(0, 1, 101) * u.s
y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s
m1 = Linear1D(slope=5*u.kg*u.m/u.s, intercept=1.0*u.kg*u.m)
m2 = Linear1D(slope=0.0*u.s/u.s, intercept=10.0*u.s)
m3 = Linear1D(slope=0.0*u.m/u.s, intercept=10.0*u.m)
m4 = Linear1D(slope=0.0*u.kg/u.s, intercept=10.0*u.kg)
m11 = m1 / m2
m22 = m3 * m4
truth = m11 * m22
fit = fitter(truth, x, y)
unfit_output = truth(x)
fit_output = fit(x)
assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s)
assert_allclose(unfit_output, fit_output)
for name in truth.param_names:
assert getattr(truth, name) == getattr(fit, name)
|
86f4da667fa947af33d41f011ebf8db746b20110fd8c66224c59012c34b49f69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from astropy.convolution import convolve_models_fft
from astropy.modeling.models import Const1D, Const2D
try:
import scipy # pylint: disable=W0611 # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
@pytest.mark.skipif('not HAS_SCIPY')
def test_clear_cache():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
assert model._kwargs is None
assert model._convolution is None
results = model(0)
assert results.all() == np.array([1.]).all()
assert model._kwargs is not None
assert model._convolution is not None
model.clear_cache()
assert model._kwargs is None
assert model._convolution is None
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_1d():
m1 = Const1D()
m2 = Const1D()
model = convolve_models_fft(m1, m2, (-1, 1), 0.01)
results = model(0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x)
assert results.shape == x.shape
@pytest.mark.skipif('not HAS_SCIPY')
def test_input_shape_2d():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
results = model(0, 0)
assert results.shape == (1,)
x = np.arange(-1, 1, 0.1)
results = model(x, 0)
assert results.shape == x.shape
results = model(0, x)
assert results.shape == x.shape
grid = np.meshgrid(x, x)
results = model(*grid)
assert results.shape == grid[0].shape
assert results.shape == grid[1].shape
@pytest.mark.skipif('not HAS_SCIPY')
def test__convolution_inputs():
m1 = Const2D()
m2 = Const2D()
model = convolve_models_fft(m1, m2, ((-1, 1), (-1, 1)), 0.01)
x = np.arange(-1, 1, 0.1)
y = np.arange(-2, 2, 0.1)
grid0 = np.meshgrid(x, x)
grid1 = np.meshgrid(y, y)
# scalar inputs
assert (np.array([1]), (1,)) == model._convolution_inputs(1)
# Multiple inputs
assert np.all(model._convolution_inputs(*grid0)[0] ==
np.reshape([grid0[0], grid0[1]], (2, -1)).T)
assert model._convolution_inputs(*grid0)[1] == grid0[0].shape
assert np.all(model._convolution_inputs(*grid1)[0] ==
np.reshape([grid1[0], grid1[1]], (2, -1)).T)
assert model._convolution_inputs(*grid1)[1] == grid1[0].shape
# Error
with pytest.raises(ValueError) as err:
model._convolution_inputs(grid0[0], grid1[1])
assert str(err.value) ==\
"Values have differing shapes"
|
4d245c5965f7f9cd6b019cc29261b5a5acc26bb57026f332d2cd8a8d59e17aba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.core import Model, fix_inputs
from astropy.modeling.models import Polynomial1D
class _ExampleModel(Model):
n_inputs = 1
n_outputs = 1
def __init__(self):
self._input_units = {"x": u.m}
self._return_units = {"y": u.m/u.s}
super().__init__()
def evaluate(self, input):
return input / u.Quantity(1, u.s)
def _models_with_units():
m1 = _ExampleModel() & _ExampleModel()
m2 = _ExampleModel() + _ExampleModel()
p = Polynomial1D(1)
p._input_units = {'x': u.m / u.s}
p._return_units = {'y': u.m / u.s}
m3 = _ExampleModel() | p
m4 = fix_inputs(m1, {'x0': 1})
m5 = fix_inputs(m1, {0: 1})
models = [m1, m2, m3, m4, m5]
input_units = [{'x0': u.Unit("m"), 'x1': u.Unit("m")},
{'x': u.Unit("m")},
{'x': u.Unit("m")},
{'x1': u.Unit("m")},
{'x1': u.Unit("m")}
]
return_units = [{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")},
{'y': u.Unit("m / s")},
{'y': u.Unit("m / s")},
{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")},
{'y0': u.Unit("m / s"), 'y1': u.Unit("m / s")}
]
return np.array([models, input_units, return_units], dtype=object).T
@pytest.mark.parametrize(("model", "input_units", "return_units"), _models_with_units())
def test_input_units(model, input_units, return_units):
""" Test input_units on various compound models."""
assert model.input_units == input_units
assert model.return_units == return_units
|
ef784c7260a1e6f18db952e553b42e3bc06670c0e999108c088367909c9159cb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import subprocess
import sys
import unittest.mock as mk
from inspect import signature
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
import astropy
import astropy.modeling.core as core
import astropy.units as u
from astropy.convolution import convolve_models
from astropy.modeling import models
from astropy.modeling.bounding_box import CompoundBoundingBox, ModelBoundingBox
from astropy.modeling.core import (SPECIAL_OPERATORS, CompoundModel, Model, _add_special_operator,
bind_bounding_box, bind_compound_bounding_box, custom_model,
fix_inputs)
from astropy.modeling.parameters import Parameter
from astropy.modeling.separable import separability_matrix
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=np.int64(0))
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_model_n_outputs():
"""
Test creating a custom_model which has more than one output, which
requires special handling.
Demonstrates issue #11791's ``n_outputs`` error has been solved
"""
@custom_model
def model(x, y, n_outputs=2):
return x+1, y+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 2
assert m.outputs == ('x0', 'x1')
assert (separability_matrix(m) == [[True, True],
[True, True]]).all()
@custom_model
def model(x, y, z, n_outputs=3):
return x+1, y+1, z+1
m = model()
assert not isinstance(m.n_outputs, Parameter)
assert isinstance(m.n_outputs, int)
assert m.n_outputs == 3
assert m.outputs == ('x0', 'x1', 'x2')
assert (separability_matrix(m) == [[True, True, True],
[True, True, True],
[True, True, True]]).all()
def test_custom_model_settable_parameters():
"""
Test creating a custom_model which specifically sets adjustable model
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, settable parameters
should be allowed to have defaults set.
"""
@custom_model
def model(x, y, n_outputs=2, bounding_box=((1, 2), (3, 4))):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.bounding_box == ((1, 2), (3, 4))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
m = model(bounding_box=((5, 6), (7, 8)))
assert m.n_outputs == 2
assert m.bounding_box == ((5, 6), (7, 8))
m.bounding_box = ((9, 10), (11, 12))
assert m.bounding_box == ((9, 10), (11, 12))
@custom_model
def model(x, y, n_outputs=2, outputs=('z0', 'z1')):
return x+1, y+1
m = model()
assert m.n_outputs == 2
assert m.outputs == ('z0', 'z1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
m = model(outputs=('w0', 'w1'))
assert m.n_outputs == 2
assert m.outputs == ('w0', 'w1')
m.outputs = ('a0', 'a1')
assert m.outputs == ('a0', 'a1')
def test_custom_model_regected_parameters():
"""
Test creating a custom_model which attempts to override non-overridable
parameters.
Demonstrates part of issue #11791's notes about what passed parameters
should/shouldn't be allowed. In this case, non-settable parameters
should raise an error (unexpected behavior may occur).
"""
with pytest.raises(ValueError,
match=r"Parameter 'n_inputs' cannot be a model property: *"):
@custom_model
def model(x, y, n_outputs=2, n_inputs=3):
return x+1, y+1
with pytest.raises(ValueError,
match=r"Parameter 'uses_quantity' cannot be a model property: *"):
@custom_model
def model(x, y, n_outputs=2, uses_quantity=True):
return x+1, y+1
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings('ignore:invalid value encountered in less')
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename('CustomGaussian')
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmpdir):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert repr(RENAMED_MODEL).splitlines()[0] == "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env['PYTHONPATH'] = os.pathsep.join(paths)
script = tmpdir.join('rename.py').strpath
with open(script, 'w') as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize('model_class',
[models.Gaussian1D, models.Polynomial1D,
models.Shift, models.Tabular1D])
def test_rename_1d(model_class):
new_model = model_class.rename(name='Test1D')
assert new_model.name == 'Test1D'
@pytest.mark.parametrize('model_class',
[models.Gaussian2D, models.Polynomial2D, models.Tabular2D])
def test_rename_2d(model_class):
new_model = model_class.rename(name='Test2D')
assert new_model.name == 'Test2D'
def test_fix_inputs_integer():
"""
Tests that numpy integers can be passed as dictionary keys to fix_inputs
Issue #11358
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {1: 22})
assert mf(1) == (1, 22)
mf_int32 = models.fix_inputs(m, {np.int32(1): 33})
assert mf_int32(1) == (1, 33)
mf_int64 = models.fix_inputs(m, {np.int64(1): 44})
assert mf_int64(1) == (1, 44)
def test_fix_inputs_empty_dict():
"""
Tests that empty dictionary can be passed to fix_inputs
Issue #11355
"""
m = models.Identity(2)
mf = models.fix_inputs(m, {})
assert mf(1, 2) == (1, 2)
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
with pytest.raises(ValueError):
g2.inputs = ("w", )
with pytest.raises(ValueError):
g2.outputs = ("w", "e")
def test__prepare_output_single_model():
model = models.Gaussian1D()
# No broadcast
assert (np.array([1, 2]) ==
model._prepare_output_single_model(np.array([1, 2]), None)).all()
# Broadcast to scalar
assert 1 == model._prepare_output_single_model(np.array([1]), ())
assert 2 == model._prepare_output_single_model(np.asanyarray(2), ())
# Broadcast reshape
output = np.array([[1, 2, 3],
[4, 5, 6]])
reshape = np.array([[1, 2],
[3, 4],
[5, 6]])
assert (output == model._prepare_output_single_model(output, (2, 3))).all()
assert (reshape == model._prepare_output_single_model(output, (3, 2))).all()
# Broadcast reshape scalar
assert 1 == model._prepare_output_single_model(np.array([1]), (1, 2))
assert 2 == model._prepare_output_single_model(np.asanyarray(2), (3, 4))
# Fail to broadcast
assert (output == model._prepare_output_single_model(output, (1, 2))).all()
assert (output == model._prepare_output_single_model(output, (3, 4))).all()
def test_prepare_outputs_mixed_broadcast():
"""
Tests that _prepare_outputs_single_model does not fail when a smaller
array is passed as first input, but output is broadcast to larger
array.
Issue #10170
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model([1, 2], 3)
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.9692332344763441, 1.0])
output = model(4, [5, 6])
assert output.shape == (2,)
np.testing.assert_array_equal(output, [0.8146473164114145, 0.7371233743916278])
def test_prepare_outputs_complex_reshape():
x = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]])
y = np.array([[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
[26, 27, 28, 29, 30]])
m = models.Identity(3) | models.Mapping((2, 1, 0))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((2, 1), n_inputs=3)
output = mf(1, 2)
assert output == (22, 2, 1)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
m = models.Identity(3) | models.Mapping((0, 1, 2))
m.bounding_box = ((0, 100), (0, 200), (0, 50))
mf = models.fix_inputs(m, {2: 22})
t = mf | models.Mapping((0, 1), n_inputs=3)
output = mf(1, 2)
assert output == (1, 2, 22)
output = t(1, 2)
assert output == (1, 2)
output = t(x, y)
assert len(output) == 2
np.testing.assert_array_equal(output[0], x)
np.testing.assert_array_equal(output[1], y)
def test_prepare_outputs_single_entry_vector():
"""
jwst and gwcs both require that single entry vectors produce single entry output vectors, not scalars. This
tests for that behavior.
"""
model = models.Gaussian2D(1, 2, 3, 4, 5)
output = model(np.array([1]), np.array([2]))
assert output.shape == (1,)
np.testing.assert_allclose(output, [0.9500411305585278])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore: Using a non-tuple')
def test_prepare_outputs_sparse_grid():
"""
Test to show that #11060 has been solved.
"""
shape = (3, 3)
data = np.arange(np.product(shape)).reshape(shape) * u.m / u.s
points_unit = u.pix
points = [np.arange(size) * points_unit for size in shape]
kwargs = {
'bounds_error': False,
'fill_value': np.nan,
'method': 'nearest',
}
transform = models.Tabular2D(points, data, **kwargs)
truth = np.array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]) * u.m / u.s
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=True)
x = points[0] * u.pix
y = points[1] * u.pix
value = transform(x, y)
assert (value == truth).all()
points = np.meshgrid(np.arange(3), np.arange(3), indexing='ij', sparse=False) * u.pix
value = transform(*points)
assert (value == truth).all()
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
with pytest.raises(u.UnitsError):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(ValueError, match=r"input_units keys.*do not match model inputs"):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(ValueError, match=r"Cannot specify input_units for model with existing input units"):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(ValueError, match=r"return_units keys.*do not match model outputs"):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(ValueError, match=r"return_units length does not match n_outputs"):
model.coerce_units(return_units=(u.m, u.s))
def test_bounding_box_general_inverse():
model = NonFittableModel(42.5)
with pytest.raises(NotImplementedError):
model.bounding_box
model.bounding_box = ()
assert model.bounding_box.bounding_box() == ()
model.inverse = NonFittableModel(3.14)
inverse_model = model.inverse
with pytest.raises(NotImplementedError):
inverse_model.bounding_box
def test__add_special_operator():
sop_name = 'name'
sop = 'value'
key = _add_special_operator(sop_name, 'value')
assert key[0] == sop_name
assert key[1] == SPECIAL_OPERATORS._unique_id
assert key in SPECIAL_OPERATORS
assert SPECIAL_OPERATORS[key] == sop
def test_print_special_operator_CompoundModel(capsys):
"""
Test that issue #11310 has been fixed
"""
model = convolve_models(models.Sersic2D(), models.Gaussian2D())
with astropy.conf.set_temp('max_width', 80):
assert str(model) == "Model: CompoundModel\n" +\
"Inputs: ('x', 'y')\n" +\
"Outputs: ('z',)\n" +\
"Model set size: 1\n" +\
"Expression: convolve_fft (([0]), ([1]))\n" +\
"Components: \n" +\
" [0]: <Sersic2D(amplitude=1., r_eff=1., n=4., x_0=0., y_0=0., ellip=0., theta=0.)>\n" +\
"\n" +\
" [1]: <Gaussian2D(amplitude=1., x_mean=0., y_mean=0., x_stddev=1., y_stddev=1., theta=0.)>\n" +\
"Parameters:\n" +\
" amplitude_0 r_eff_0 n_0 x_0_0 y_0_0 ... y_mean_1 x_stddev_1 y_stddev_1 theta_1\n" +\
" ----------- ------- --- ----- ----- ... -------- ---------- ---------- -------\n" +\
" 1.0 1.0 4.0 0.0 0.0 ... 0.0 1.0 1.0 0.0"
def test__validate_input_shape():
model = models.Gaussian1D()
model._n_models = 2
_input = np.array([[1, 2, 3],
[4, 5, 6]])
# Successful validation
assert model._validate_input_shape(_input, 0, model.inputs, 1, False) == (2, 3)
# Fail number of axes
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 2, True)
assert str(err.value) == \
"For model_set_axis=2, all inputs must be at least 3-dimensional."
# Fail number of models (has argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, model.inputs, 1, True)
assert str(err.value) == \
"Input argument 'x' does not have the correct dimensions in model_set_axis=1 " +\
"for a model set with n_models=2."
# Fail number of models (no argname)
with pytest.raises(ValueError) as err:
model._validate_input_shape(_input, 0, [], 1, True)
assert str(err.value) == \
"Input argument '0' does not have the correct dimensions in model_set_axis=1 " +\
"for a model set with n_models=2."
def test__validate_input_shapes():
model = models.Gaussian1D()
model._n_models = 2
inputs = [mk.MagicMock() for _ in range(3)]
argnames = mk.MagicMock()
model_set_axis = mk.MagicMock()
all_shapes = [mk.MagicMock() for _ in inputs]
# Successful validation
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True) as mkCheck:
assert mkCheck.return_value == \
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == \
[mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)]
# Fail check_broadcast
with mk.patch.object(Model, '_validate_input_shape',
autospec=True, side_effect=all_shapes) as mkValidate:
with mk.patch.object(core, 'check_broadcast',
autospec=True, return_value=None) as mkCheck:
with pytest.raises(ValueError) as err:
model._validate_input_shapes(inputs, argnames, model_set_axis)
assert str(err.value) == \
"All inputs must have identical shapes or must be scalars."
assert mkCheck.call_args_list == [mk.call(*all_shapes)]
assert mkValidate.call_args_list == \
[mk.call(model, _input, idx, argnames, model_set_axis, True)
for idx, _input in enumerate(inputs)]
def test__remove_axes_from_shape():
model = models.Gaussian1D()
# len(shape) == 0
assert model._remove_axes_from_shape((), mk.MagicMock()) == ()
# axis < 0
assert model._remove_axes_from_shape((1, 2, 3), -1) == (1, 2)
assert model._remove_axes_from_shape((1, 2, 3), -2) == (1, 3)
assert model._remove_axes_from_shape((1, 2, 3), -3) == (2, 3)
# axis >= len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 3) == ()
assert model._remove_axes_from_shape((1, 2, 3), 4) == ()
# 0 <= axis < len(shape)
assert model._remove_axes_from_shape((1, 2, 3), 0) == (2, 3)
assert model._remove_axes_from_shape((1, 2, 3), 1) == (3,)
assert model._remove_axes_from_shape((1, 2, 3), 2) == ()
def test_get_bounding_box():
model = models.Const2D(2)
# No with_bbox
assert model.get_bounding_box(False) is None
# No bounding_box
with pytest.raises(NotImplementedError):
model.bounding_box
assert model.get_bounding_box(True) is None
# Normal bounding_box
model.bounding_box = ((0, 1), (0, 1))
assert not isinstance(model.bounding_box, CompoundBoundingBox)
assert model.get_bounding_box(True) == ((0, 1), (0, 1))
# CompoundBoundingBox with no removal
bbox = CompoundBoundingBox.validate(model, {(1,): ((-1, 0), (-1, 0)), (2,): ((0, 1), (0, 1))},
selector_args=[('y', False)])
model.bounding_box = bbox
assert isinstance(model.bounding_box, CompoundBoundingBox)
# Get using argument not with_bbox
assert model.get_bounding_box(True) == bbox
# Get using with_bbox not argument
assert model.get_bounding_box((1,)) == ((-1, 0), (-1, 0))
assert model.get_bounding_box((2,)) == ((0, 1), (0, 1))
def test_compound_bounding_box():
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox1 = CompoundBoundingBox.validate(model, {(1,): (-1, 0), (2,): (0, 1)},
selector_args=[('x', False)])
bbox2 = CompoundBoundingBox.validate(model, {(-0.5,): (-1, 0), (0.5,): (0, 1)},
selector_args=[('x', False)])
# Using with_bounding_box to pass a selector
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=(1,)) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=(2,)) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
# Using argument value to pass bounding_box
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
model1 = models.Gaussian1D()
truth1 = models.Gaussian1D()
model2 = models.Const1D(2)
truth2 = models.Const1D(2)
model = model1 + model2
truth = truth1 + truth2
assert isinstance(model, CompoundModel)
model.bounding_box = bbox1
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=1) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=(2,)))
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=2) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(1,)))
model.bounding_box = bbox2
assert model(-0.5) == truth(-0.5)
assert model(-0.5, with_bounding_box=True) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0.5, with_bounding_box=True) == truth(0.5)
with pytest.raises(RuntimeError):
model(0, with_bounding_box=True)
def test_bind_bounding_box():
model = models.Polynomial2D(3)
bbox = ((-1, 1), (-2, 2))
bind_bounding_box(model, bbox)
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-2, 2)
assert model.bounding_box['y'] == (-1, 1)
bind_bounding_box(model, bbox, order='F')
assert model.get_bounding_box() is not None
assert model.bounding_box == bbox
assert model.bounding_box['x'] == (-1, 1)
assert model.bounding_box['y'] == (-2, 2)
def test_bind_compound_bounding_box_using_with_bounding_box_select():
"""
This demonstrates how to bind multiple bounding_boxes which are
selectable using the `with_bounding_box`, note there must be a
fall-back to implicit.
"""
model = models.Gaussian1D()
truth = models.Gaussian1D()
bbox = (0, 1)
with pytest.raises(AttributeError):
bind_compound_bounding_box(model, bbox, 'x')
bbox = {0: (-1, 0), 1: (0, 1)}
bind_compound_bounding_box(model, bbox, [('x', False)])
# No bounding box
assert model(-0.5) == truth(-0.5)
assert model(0.5) == truth(0.5)
assert model(0) == truth(0)
assert model(1) == truth(1)
# `with_bounding_box` selects as `-0.5` will not be a key
assert model(-0.5, with_bounding_box=0) == truth(-0.5)
assert np.isnan(model(-0.5, with_bounding_box=1))
# `with_bounding_box` selects as `0.5` will not be a key
assert model(0.5, with_bounding_box=1) == truth(0.5)
assert np.isnan(model(0.5, with_bounding_box=(0,)))
# Fall back onto implicit selector
assert model(0, with_bounding_box=True) == truth(0)
assert model(1, with_bounding_box=True) == truth(1)
# Attempt to fall-back on implicit selector, but no bounding_box
with pytest.raises(RuntimeError):
model(0.5, with_bounding_box=True)
# Override implicit selector
assert np.isnan(model(1, with_bounding_box=0))
def test_fix_inputs_compound_bounding_box():
base_model = models.Gaussian2D(1, 2, 3, 4, 5)
bbox = {2.5: (-1, 1), 3.14: (-7, 3)}
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox)
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'y': 2.5}, bounding_boxes=bbox, selector_args=(('y', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=(('x', True),))
assert model.bounding_box == (-1, 1)
model = fix_inputs(base_model, {'x': 2.5}, bounding_boxes=bbox, selector_args=((0, True),))
assert model.bounding_box == (-1, 1)
base_model = models.Identity(4)
bbox = {(2.5, 1.3): ((-1, 1), (-3, 3)), (2.5, 2.71): ((-3, 3), (-1, 1))}
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox)
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=(('x0', True), ('x1', True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
model = fix_inputs(base_model, {'x0': 2.5, 'x1': 1.3}, bounding_boxes=bbox,
selector_args=((0, True), (1, True)))
assert model.bounding_box == ((-1, 1), (-3, 3))
def test_model_copy_with_bounding_box():
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5)), order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_with_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order='F')
# No bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with bbox
model.bounding_box = bbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
for index, interval in model.bounding_box.intervals.items():
interval_copy = model_copy.bounding_box.intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_model_copy_with_compound_bounding_box():
model = models.Polynomial2D(2)
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 3047.5)}
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('x', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(1)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_with_compound_bounding_box():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): ((-0.5, 1047.5), (-0.5, 2047.5)),
(1,): ((-0.5, 3047.5), (-0.5, 4047.5)), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)], order='F')
# No cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert model_copy.get_bounding_box() == model.get_bounding_box() == None
# with cbbox
model.bounding_box = cbbox
model_copy = model.copy()
assert id(model_copy) != id(model)
assert id(model_copy.bounding_box) != id(model.bounding_box)
assert model_copy.bounding_box.selector_args == model.bounding_box.selector_args
assert id(model_copy.bounding_box.selector_args) != id(model.bounding_box.selector_args)
for selector, bbox in model.bounding_box.bounding_boxes.items():
for index, interval in bbox.intervals.items():
interval_copy = model_copy.bounding_box.bounding_boxes[selector].intervals[index]
assert interval == interval_copy
assert id(interval) != interval_copy
# add model to compound model
model1 = model | models.Identity(3)
model_copy = model1.copy()
assert id(model_copy) != id(model1)
assert model_copy.get_bounding_box() == model1.get_bounding_box() == None
def test_compound_model_copy_user_attribute():
"""Regression test for issue #12370"""
model = models.Gaussian2D(100, 25, 25, 5, 5) | models.Identity(1)
model.xname = 'x_mean' # user-defined attribute
assert hasattr(model, 'xname')
assert model.xname == 'x_mean'
model_copy = model.copy()
model_copy.xname
assert hasattr(model_copy, 'xname')
assert model_copy.xname == 'x_mean'
def test_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Gaussian2D()
bbox = ModelBoundingBox.validate(model, ((-1, 1), (-np.inf, np.inf)), order='F')
model.bounding_box = bbox
x = np.array([-0.5, 0.5])
y = 0
# Everything works when its all in the bounding box
assert (model(x, y) == (model(x, y, with_bounding_box=True))).all()
def test_compound_model_mixed_array_scalar_bounding_box():
"""Regression test for issue #12319"""
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = ModelBoundingBox.validate(model, ((-0.5, 1047.5), (-0.5, 2047.5), (-np.inf, np.inf)), order='F')
model.bounding_box = bbox
x = np.array([1000, 1001])
y = np.array([2000, 2001])
slit_id = 0
# Everything works when its all in the bounding box
value0 = model(x, y, slit_id)
value1 = model(x, y, slit_id, with_bounding_box=True)
assert_equal(value0, value1)
def test_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,))
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [3, 4])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [3, 4])
def test_compound_model_with_bounding_box_true_and_single_output():
"""Regression test for issue #12373"""
model = models.Mapping((1,)) | models.Shift(1)
x = [1, 2]
y = [3, 4]
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
model.bounding_box = ((-np.inf, np.inf), (-np.inf, np.inf))
# Check baseline
assert_equal(model(x, y), [4, 5])
# Check with_bounding_box=True should be the same
assert_equal(model(x, y, with_bounding_box=True), [4, 5])
def test_bounding_box_pass_with_ignored():
"""Test the possiblity of setting ignored variables in bounding box"""
model = models.Polynomial2D(2)
bbox = ModelBoundingBox.validate(model, (-1, 1), ignored=['y'])
model.bounding_box = bbox
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
model = models.Polynomial2D(2)
bind_bounding_box(model, (-1, 1), ignored=['y'])
assert model.bounding_box.bounding_box() == (-1, 1)
assert model.bounding_box == bbox
def test_compound_bounding_box_pass_with_ignored():
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bbox = {(0,): (-0.5, 1047.5),
(1,): (-0.5, 2047.5), }
cbbox = CompoundBoundingBox.validate(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
model.bounding_box = cbbox
model = models.Shift(1) & models.Shift(2) & models.Identity(1)
model.inputs = ('x', 'y', 'slit_id')
bind_compound_bounding_box(model, bbox, selector_args=[('slit_id', True)],
ignored=['y'], order='F')
assert model.bounding_box == cbbox
@pytest.mark.parametrize('int_type', [int, np.int32, np.int64, np.uint32, np.uint64])
def test_model_integer_indexing(int_type):
"""Regression for PR 12561; verify that compound model components
can be accessed by integer index"""
gauss = models.Gaussian2D()
airy = models.AiryDisk2D()
compound = gauss + airy
assert compound[int_type(0)] == gauss
assert compound[int_type(1)] == airy
def test_model_string_indexing():
"""Regression for PR 12561; verify that compound model components
can be accessed by indexing with model name"""
gauss = models.Gaussian2D()
gauss.name = 'Model1'
airy = models.AiryDisk2D()
airy.name = 'Model2'
compound = gauss + airy
assert compound['Model1'] == gauss
assert compound['Model2'] == airy
|
f53e0659b4084d1f20a52494ce2e39821de7906e9aa7bfb8394b838e270aecd0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for spline models and fitters"""
import unittest.mock as mk
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.modeling.core import FittableModel, ModelDefinitionError
from astropy.modeling.fitting import (SplineExactKnotsFitter, SplineInterpolateFitter,
SplineSmoothingFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
from astropy.modeling.spline import Spline1D, _Spline, _SplineFitter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
# pylint: disable=invalid-name
from astropy.utils.exceptions import AstropyUserWarning
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == \
[mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == \
"If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple([mk.MagicMock() for _ in range(3)])
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple([mk.MagicMock() for _ in range(3)])
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == \
[mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == \
[mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple([f"test_param{idx}" for idx in range(20)])
assert mkCreate.call_args_list == \
[mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == \
"This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif('not HAS_SCIPY')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) ==\
f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) ==\
"Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) ==\
"tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == \
"Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) ==\
"Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == \
[mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == \
[mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == \
"Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) ==\
"No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import BSpline, splrep
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == \
"Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == \
f"Supported splines can have max degree 5, antiderivative degree will be {nu + 3}"
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"This has not been implemented for _SplineFitter."
|
0bc763ca47ceca946d25a119e7f4ee67a9b4c1b41c838e94fef3bc794f51f960 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test separability of models.
"""
import numpy as np
# pylint: disable=invalid-name
import pytest
from numpy.testing import assert_allclose
from astropy.modeling import custom_model, models
from astropy.modeling.core import ModelDefinitionError
from astropy.modeling.models import Mapping
from astropy.modeling.separable import (_arith_oper, _cdot, _coord_matrix, _cstack, is_separable,
separability_matrix)
sh1 = models.Shift(1, name='shift1')
sh2 = models.Shift(2, name='sh2')
scl1 = models.Scale(1, name='scl1')
scl2 = models.Scale(2, name='scl2')
map1 = Mapping((0, 1, 0, 1), name='map1')
map2 = Mapping((0, 0, 1), name='map2')
map3 = Mapping((0, 0), name='map3')
rot = models.Rotation2D(2, name='rotation')
p2 = models.Polynomial2D(1, name='p2')
p22 = models.Polynomial2D(2, name='p22')
p1 = models.Polynomial1D(1, name='p1')
cm_4d_expected = (np.array([False, False, True, True]),
np.array([[True, True, False, False],
[True, True, False, False],
[False, False, True, False],
[False, False, False, True]]))
compound_models = {
'cm1': (map3 & sh1 | rot & sh1 | sh1 & sh2 & sh1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm2': (sh1 & sh2 | rot | map1 | p2 & p22,
(np.array([False, False]),
np.array([[True, True], [True, True]]))
),
'cm3': (map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm4': (sh1 & sh2 | map2 | rot & scl1,
(np.array([False, False, True]),
np.array([[True, False], [True, False], [False, True]]))
),
'cm5': (map3 | sh1 & sh2 | scl1 & scl2,
(np.array([False, False]),
np.array([[True], [True]]))
),
'cm7': (map2 | p2 & sh1,
(np.array([False, True]),
np.array([[True, False], [False, True]]))
),
'cm8': (rot & (sh1 & sh2), cm_4d_expected),
'cm9': (rot & sh1 & sh2, cm_4d_expected),
'cm10': ((rot & sh1) & sh2, cm_4d_expected),
'cm11': (rot & sh1 & (scl1 & scl2),
(np.array([False, False, True, True, True]),
np.array([[True, True, False, False, False],
[True, True, False, False, False],
[False, False, True, False, False],
[False, False, False, True, False],
[False, False, False, False, True]]))),
}
def test_coord_matrix():
c = _coord_matrix(p2, 'left', 2)
assert_allclose(np.array([[1, 1], [0, 0]]), c)
c = _coord_matrix(p2, 'right', 2)
assert_allclose(np.array([[0, 0], [1, 1]]), c)
c = _coord_matrix(p1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(p1, 'left', 1)
assert_allclose(np.array([[1]]), c)
c = _coord_matrix(sh1, 'left', 2)
assert_allclose(np.array([[1], [0]]), c)
c = _coord_matrix(sh1, 'right', 2)
assert_allclose(np.array([[0], [1]]), c)
c = _coord_matrix(sh1, 'right', 3)
assert_allclose(np.array([[0], [0], [1]]), c)
c = _coord_matrix(map3, 'left', 2)
assert_allclose(np.array([[1], [1]]), c)
c = _coord_matrix(map3, 'left', 3)
assert_allclose(np.array([[1], [1], [0]]), c)
def test_cdot():
result = _cdot(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _cdot(rot, p2)
assert_allclose(result, np.array([[2, 2]]))
result = _cdot(rot, rot)
assert_allclose(result, np.array([[2, 2], [2, 2]]))
result = _cdot(Mapping((0, 0)), rot)
assert_allclose(result, np.array([[2], [2]]))
with pytest.raises(ModelDefinitionError,
match=r"Models cannot be combined with the \"|\" operator; .*"):
_cdot(sh1, map1)
def test_cstack():
result = _cstack(sh1, scl1)
assert_allclose(result, np.array([[1, 0], [0, 1]]))
result = _cstack(sh1, rot)
assert_allclose(result,
np.array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
)
result = _cstack(rot, sh1)
assert_allclose(result,
np.array([[1, 1, 0],
[1, 1, 0],
[0, 0, 1]])
)
def test_arith_oper():
# Models as inputs
result = _arith_oper(sh1, scl1)
assert_allclose(result, np.array([[1]]))
result = _arith_oper(rot, rot)
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# ndarray
result = _arith_oper(np.array([[1, 2], [3, 4]]), np.array([[1, 2], [3, 4]]))
assert_allclose(result, np.array([[1, 1], [1, 1]]))
# Error
with pytest.raises(ModelDefinitionError, match=r"Unsupported operands for arithmetic operator: .*"):
_arith_oper(sh1, map1)
@pytest.mark.parametrize(('compound_model', 'result'), compound_models.values())
def test_separable(compound_model, result):
assert_allclose(is_separable(compound_model), result[0])
assert_allclose(separability_matrix(compound_model), result[1])
def test_custom_model_separable():
@custom_model
def model_a(x):
return x
assert model_a().separable
@custom_model
def model_c(x, y):
return x + y
assert not model_c().separable
assert np.all(separability_matrix(model_c()) == [True, True])
|
e842abda0f8dd05b6d12f66ac75d55ad95f91b850f059d83d9a420ad6744cfe2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests models.parameters
"""
# pylint: disable=invalid-name
import functools
import itertools
import unittest.mock as mk
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling import fitting, models
from astropy.modeling.core import FittableModel, Model
from astropy.modeling.parameters import InputParameterError, Parameter, _tofloat, param_repr_oneline
from astropy.utils.data import get_pkg_data_filename
from . import irafutil
def setter1(val):
return val
def setter2(val, model):
model.do_something(val)
return val * model.p
class SetterModel(FittableModel):
n_inputs = 2
n_outputs = 1
xc = Parameter(default=1, setter=setter1)
yc = Parameter(default=1, setter=setter2)
def do_something(self, v):
pass
def __init__(self, xc, yc, p):
self.p = p # p is a value intended to be used by the setter
super().__init__()
self.xc = xc
self.yc = yc
def evaluate(self, x, y, xc, yc):
return (x - xc)**2 + (y - yc)**2
def do_something(self, v):
pass
class TParModel(Model):
"""
A toy model to test parameters machinery
"""
coeff = Parameter()
e = Parameter()
def __init__(self, coeff, e, **kwargs):
super().__init__(coeff=coeff, e=e, **kwargs)
@staticmethod
def evaluate(coeff, e):
pass
class MockModel(FittableModel):
alpha = Parameter(name='alpha', default=42)
@staticmethod
def evaluate(*args):
pass
def test__tofloat():
# iterable
value = _tofloat([1, 2, 3])
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
value = _tofloat(np.array([1, 2, 3]))
assert isinstance(value, np.ndarray)
assert (value == np.array([1, 2, 3])).all()
assert np.all([isinstance(val, float) for val in value])
with pytest.raises(InputParameterError) as err:
_tofloat('test')
assert str(err.value) == \
"Parameter of <class 'str'> could not be converted to float"
# quantity
assert _tofloat(1 * u.m) == 1 * u.m
# dimensions/scalar array
value = _tofloat(np.asanyarray(3))
assert isinstance(value, float)
assert value == 3
# A regular number
value = _tofloat(3)
assert isinstance(value, float)
assert value == 3
value = _tofloat(3.0)
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.float64(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int32(3))
assert isinstance(value, float)
assert value == 3
value = _tofloat(np.int64(3))
assert isinstance(value, float)
assert value == 3
# boolean
message = "Expected parameter to be of numerical type, not boolean"
with pytest.raises(InputParameterError) as err:
_tofloat(True)
assert str(err.value) == message
with pytest.raises(InputParameterError) as err:
_tofloat(False)
assert str(err.value) == message
# other
class Value(object):
pass
with pytest.raises(InputParameterError) as err:
_tofloat(Value)
assert str(err.value) == \
"Don't know how to convert parameter of <class 'type'> to float"
def test_parameter_properties():
"""Test if getting / setting of Parameter properties works."""
p = Parameter('alpha', default=1)
assert p.name == 'alpha'
# Parameter names are immutable
with pytest.raises(AttributeError):
p.name = 'beta'
assert p.fixed is False
p.fixed = True
assert p.fixed is True
assert p.tied is False
p.tied = lambda _: 0
p.tied = False
assert p.tied is False
assert p.min is None
p.min = 42
assert p.min == 42
p.min = None
assert p.min is None
assert p.max is None
p.max = 41
assert p.max == 41
def test_parameter_operators():
"""Test if the parameter arithmetic operators work."""
par = Parameter('alpha', default=42)
num = 42.
val = 3
assert par - val == num - val
assert val - par == val - num
assert par / val == num / val
assert val / par == val / num
assert par ** val == num ** val
assert val ** par == val ** num
assert par < 45
assert par > 41
assert par <= par
assert par >= par
assert par == par
assert -par == -num
assert abs(par) == abs(num)
# Test inherited models
class M1(Model):
m1a = Parameter(default=1.)
m1b = Parameter(default=5.)
def evaluate():
pass
class M2(M1):
m2c = Parameter(default=11.)
class M3(M2):
m3d = Parameter(default=20.)
def test_parameter_inheritance():
mod = M3()
assert mod.m1a == 1.
assert mod.m1b == 5.
assert mod.m2c == 11.
assert mod.m3d == 20.
for key in ['m1a', 'm1b', 'm2c', 'm3d']:
assert key in mod.__dict__
assert mod.param_names == ('m1a', 'm1b', 'm2c', 'm3d')
def test_param_metric():
mod = M3()
assert mod._param_metrics['m1a']['slice'] == slice(0, 1)
assert mod._param_metrics['m1b']['slice'] == slice(1, 2)
assert mod._param_metrics['m2c']['slice'] == slice(2, 3)
assert mod._param_metrics['m3d']['slice'] == slice(3, 4)
mod._parameters_to_array()
assert (mod._parameters == np.array([1., 5., 11., 20], dtype=np.float64)).all()
class TestParameters:
def setup_class(self):
"""
Unit tests for parameters
Read an iraf database file created by onedspec.identify. Use the
information to create a 1D Chebyshev model and perform the same fit.
Create also a gaussian model.
"""
test_file = get_pkg_data_filename('data/idcompspec.fits')
f = open(test_file)
lines = f.read()
reclist = lines.split("begin")
f.close()
record = irafutil.IdentifyRecord(reclist[1])
self.icoeff = record.coeff
order = int(record.fields['order'])
self.model = models.Chebyshev1D(order - 1)
self.gmodel = models.Gaussian1D(2, mean=3, stddev=4)
self.linear_fitter = fitting.LinearLSQFitter()
self.x = record.x
self.y = record.z
self.yy = np.array([record.z, record.z])
def test_set_parameters_as_list(self):
"""Tests updating parameters using a list."""
self.model.parameters = [30, 40, 50, 60, 70]
assert (self.model.parameters == [30., 40., 50., 60, 70]).all()
def test_set_parameters_as_array(self):
"""Tests updating parameters using an array."""
self.model.parameters = np.array([3, 4, 5, 6, 7])
assert (self.model.parameters == [3., 4., 5., 6., 7.]).all()
def test_set_as_tuple(self):
"""Tests updating parameters using a tuple."""
self.model.parameters = (1, 2, 3, 4, 5)
assert (self.model.parameters == [1, 2, 3, 4, 5]).all()
def test_set_model_attr_seq(self):
"""
Tests updating the parameters attribute when a model's
parameter (in this case coeff) is updated.
"""
self.model.parameters = [0, 0., 0., 0, 0]
self.model.c0 = 7
assert (self.model.parameters == [7, 0., 0., 0, 0]).all()
def test_set_model_attr_num(self):
"""Update the parameter list when a model's parameter is updated."""
self.gmodel.amplitude = 7
assert (self.gmodel.parameters == [7, 3, 4]).all()
def test_set_item(self):
"""Update the parameters using indexing."""
self.model.parameters = [1, 2, 3, 4, 5]
tpar = self.model.parameters
tpar[0] = 10.
self.model.parameters = tpar
assert (self.model.parameters == [10, 2, 3, 4, 5]).all()
assert self.model.c0 == 10
def test_wrong_size1(self):
"""
Tests raising an error when attempting to reset the parameters
using a list of a different size.
"""
with pytest.raises(InputParameterError):
self.model.parameters = [1, 2, 3]
def test_wrong_size2(self):
"""
Tests raising an exception when attempting to update a model's
parameter (in this case coeff) with a sequence of the wrong size.
"""
with pytest.raises(InputParameterError):
self.model.c0 = [1, 2, 3]
def test_wrong_shape(self):
"""
Tests raising an exception when attempting to update a model's
parameter and the new value has the wrong shape.
"""
with pytest.raises(InputParameterError):
self.gmodel.amplitude = [1, 2]
def test_par_against_iraf(self):
"""
Test the fitter modifies model.parameters.
Uses an iraf example.
"""
new_model = self.linear_fitter(self.model, self.x, self.y)
np.testing.assert_allclose(
new_model.parameters,
np.array([4826.1066602783685, 952.8943813407858, 12.641236013982386,
-1.7910672553339604, 0.90252884366711317]),
rtol=10 ** (-2))
def testPolynomial1D(self):
d = {'c0': 11, 'c1': 12, 'c2': 13, 'c3': 14}
p1 = models.Polynomial1D(3, **d)
np.testing.assert_equal(p1.parameters, [11, 12, 13, 14])
def test_poly1d_multiple_sets(self):
p1 = models.Polynomial1D(3, n_models=3)
np.testing.assert_equal(p1.parameters, [0.0, 0.0, 0.0, 0, 0, 0,
0, 0, 0, 0, 0, 0])
np.testing.assert_array_equal(p1.c0, [0, 0, 0])
p1.c0 = [10, 10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 10.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_par_slicing(self):
"""
Test assigning to a parameter slice
"""
p1 = models.Polynomial1D(3, n_models=3)
p1.c0[:2] = [10, 10]
np.testing.assert_equal(p1.parameters, [10.0, 10.0, 0.0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
def test_poly2d(self):
p2 = models.Polynomial2D(degree=3)
p2.c0_0 = 5
np.testing.assert_equal(p2.parameters, [5, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def test_poly2d_multiple_sets(self):
kw = {'c0_0': [2, 3], 'c1_0': [1, 2], 'c2_0': [4, 5],
'c0_1': [1, 1], 'c0_2': [2, 2], 'c1_1': [5, 5]}
p2 = models.Polynomial2D(2, **kw)
np.testing.assert_equal(p2.parameters, [2, 3, 1, 2, 4, 5,
1, 1, 2, 2, 5, 5])
def test_shift_model_parameters1d(self):
sh1 = models.Shift(2)
sh1.offset = 3
assert sh1.offset == 3
assert sh1.offset.value == 3
def test_scale_model_parametersnd(self):
sc1 = models.Scale([2, 2])
sc1.factor = [3, 3]
assert np.all(sc1.factor == [3, 3])
np.testing.assert_array_equal(sc1.factor.value, [3, 3])
def test_bounds(self):
# Valid __init__
param = Parameter(bounds=(1, 2))
assert param.bounds == (1, 2)
param = Parameter(min=1, max=2)
assert param.bounds == (1, 2)
# Errors __init__
message = "bounds may not be specified simultaneously with min or max" +\
" when instantiating Parameter test"
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), max=2, name='test')
assert str(err.value) == message
with pytest.raises(ValueError) as err:
Parameter(bounds=(1, 2), min=1, max=2, name='test')
assert str(err.value) == message
# Setters
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.bounds == (None, None) == param._bounds
# Set errors
with pytest.raises(TypeError) as err:
param.bounds = ('test', None)
assert str(err.value) == \
"Min value must be a number or a Quantity"
with pytest.raises(TypeError) as err:
param.bounds = (None, 'test')
assert str(err.value) == \
"Max value must be a number or a Quantity"
# Set number
param.bounds = (1, 2)
assert param.bounds == (1, 2) == param._bounds
# Set Quantity
param.bounds = (1 * u.m, 2 * u.m)
assert param.bounds == (1, 2) == param._bounds
def test_modify_value(self):
param = Parameter(name='test', default=[1, 2, 3])
assert (param.value == [1, 2, 3]).all()
# Errors
with pytest.raises(InputParameterError) as err:
param[slice(0, 0)] = 2
assert str(err.value) == \
"Slice assignment outside the parameter dimensions for 'test'"
with pytest.raises(InputParameterError) as err:
param[3] = np.array([5])
assert str(err.value) == \
"Input dimension 3 invalid for 'test' parameter with dimension 1"
# assignment of a slice
param[slice(0, 2)] = [4, 5]
assert (param.value == [4, 5, 3]).all()
# assignment of a value
param[2] = 6
assert (param.value == [4, 5, 6]).all()
def test__set_unit(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
# No force Error (no existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.m)
assert str(err.value) == \
"Cannot attach units to parameters that were not initially specified with units"
# Force
param._set_unit(u.m, True)
assert param.unit == u.m
# No force Error (existing unit)
with pytest.raises(ValueError) as err:
param._set_unit(u.K)
assert str(err.value) == \
"Cannot change the unit attribute directly, instead change the parameter to a new quantity"
def test_quantity(self):
param = Parameter(name='test', default=[1, 2, 3])
assert param.unit is None
assert param.quantity is None
param = Parameter(name='test', default=[1, 2, 3], unit=u.m)
assert param.unit == u.m
assert (param.quantity == np.array([1, 2, 3]) * u.m).all()
def test_shape(self):
# Array like
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.shape == (4,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"cannot reshape array of size 4 into shape (5,)"
# Reshape success
param.shape = (2, 2)
assert param.shape == (2, 2)
assert (param.value == [[1, 2], [3, 4]]).all()
# Scalar
param = Parameter(name='test', default=1)
assert param.shape == ()
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"Cannot assign this shape to a scalar quantity"
param.shape = (1,)
# single value
param = Parameter(name='test', default=np.array([1]))
assert param.shape == (1,)
# Reshape error
with pytest.raises(ValueError) as err:
param.shape = (5,)
assert str(err.value) == \
"Cannot assign this shape to a scalar quantity"
param.shape = ()
def test_size(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.size == 4
param = Parameter(name='test', default=[1])
assert param.size == 1
param = Parameter(name='test', default=1)
assert param.size == 1
def test_std(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.std == None == param._std
param.std = 5
assert param.std == 5 == param._std
def test_fixed(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.fixed == False == param._fixed
# Set error
with pytest.raises(ValueError) as err:
param.fixed = 3
assert str(err.value) == \
"Value must be boolean"
# Set
param.fixed = True
assert param.fixed == True == param._fixed
def test_tied(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.tied == False == param._tied
# Set error
with pytest.raises(TypeError) as err:
param.tied = mk.NonCallableMagicMock()
assert str(err.value) == \
"Tied must be a callable or set to False or None"
# Set None
param.tied = None
assert param.tied == None == param._tied
# Set False
param.tied = False
assert param.tied == False == param._tied
# Set other
tied = mk.MagicMock()
param.tied = tied
assert param.tied == tied == param._tied
def test_validator(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
valid = mk.MagicMock()
param.validator(valid)
assert param._validator == valid
with pytest.raises(ValueError) as err:
param.validator(mk.NonCallableMagicMock())
assert str(err.value) == \
"This decorator method expects a callable.\n" +\
"The use of this method as a direct validator is\n" +\
"deprecated; use the new validate method instead\n"
def test_validate(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param._validator is None
assert param.model is None
# Run without validator
param.validate(mk.MagicMock())
# Run with validator but no Model
validator = mk.MagicMock()
param.validator(validator)
assert param._validator == validator
param.validate(mk.MagicMock())
assert validator.call_args_list == []
# Full validate
param._model = mk.MagicMock()
value = mk.MagicMock()
param.validate(value)
assert validator.call_args_list == [mk.call(param._model, value)]
def test_copy(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
copy_param = param.copy()
assert (param == copy_param).all()
assert id(param) != id(copy_param)
def test_model(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param.model == None == param._model
assert param._model_required == False
assert (param._value == [1, 2, 3, 4]).all()
setter = mk.MagicMock()
getter = mk.MagicMock()
param._setter = setter
param._getter = getter
# No Model Required
param._value = [5, 6, 7, 8]
model0 = mk.MagicMock()
setter0 = mk.MagicMock()
getter0 = mk.MagicMock()
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter0, getter0]) as mkCreate:
param.model = model0
assert param.model == model0 == param._model
assert param._setter == setter0
assert param._getter == getter0
assert mkCreate.call_args_list == [
mk.call(setter, model0),
mk.call(getter, model0)
]
assert param._value == [5, 6, 7, 8]
param._setter = setter
param._getter = getter
# Model required
param._model_required = True
model1 = mk.MagicMock()
setter1 = mk.MagicMock()
getter1 = mk.MagicMock()
setter1.return_value = [9, 10, 11, 12]
getter1.return_value = [9, 10, 11, 12]
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert (param.value == [9, 10, 11, 12]).all()
param._setter = setter
param._getter = getter
param._default = None
with mk.patch.object(Parameter, '_create_value_wrapper',
side_effect=[setter1, getter1]) as mkCreate:
param.model = model1
assert param.model == model1 == param._model
assert param._setter == setter1
assert param._getter == getter1
assert mkCreate.call_args_list == [
mk.call(setter, model1),
mk.call(getter, model1)
]
assert param._value is None
def test_raw_value(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Normal case
assert (param._raw_value == param.value).all()
# Bad setter
param._setter = True
param._internal_value = 4
assert param._raw_value == 4
def test__create_value_wrapper(self):
param = Parameter(name='test', default=[1, 2, 3, 4])
# Bad ufunc
with pytest.raises(TypeError) as err:
param._create_value_wrapper(np.add, mk.MagicMock())
assert str(err.value) == \
"A numpy.ufunc used for Parameter getter/setter may only take one input argument"
# Good ufunc
assert param._create_value_wrapper(np.negative, mk.MagicMock()) == np.negative
# None
assert param._create_value_wrapper(None, mk.MagicMock()) is None
# wrapper with one argument
def wrapper1(a):
pass
assert param._create_value_wrapper(wrapper1, mk.MagicMock()) == wrapper1
# wrapper with two argument2
def wrapper2(a, b):
pass
# model is None
assert param._model_required == False
assert param._create_value_wrapper(wrapper2, None) == wrapper2
assert param._model_required == True
# model is not None
param._model_required = False
model = mk.MagicMock()
with mk.patch.object(functools, 'partial', autospec=True) as mkPartial:
assert param._create_value_wrapper(wrapper2, model) == mkPartial.return_value
# wrapper with more than 2 arguments
def wrapper3(a, b, c):
pass
with pytest.raises(TypeError) as err:
param._create_value_wrapper(wrapper3, mk.MagicMock())
assert str(err.value) == \
"Parameter getter/setter must be a function of either one or two arguments"
def test_bool(self):
# single value is true
param = Parameter(name='test', default=1)
assert param.value == 1
assert np.all(param)
if param:
assert True
else:
assert False
# single value is false
param = Parameter(name='test', default=0)
assert param.value == 0
assert not np.all(param)
if param:
assert False
else:
assert True
# vector value all true
param = Parameter(name='test', default=[1, 2, 3, 4])
assert np.all(param.value == [1, 2, 3, 4])
assert np.all(param)
if param:
assert True
else:
assert False
# vector value at least one false
param = Parameter(name='test', default=[1, 2, 0, 3, 4])
assert np.all(param.value == [1, 2, 0, 3, 4])
assert not np.all(param)
if param:
assert False
else:
assert True
def test_param_repr_oneline(self):
# Single value no units
param = Parameter(name='test', default=1)
assert param_repr_oneline(param) == '1.'
# Vector value no units
param = Parameter(name='test', default=[1, 2, 3, 4])
assert param_repr_oneline(param) == '[1., 2., 3., 4.]'
# Single value units
param = Parameter(name='test', default=1*u.m)
assert param_repr_oneline(param) == '1. m'
# Vector value units
param = Parameter(name='test', default=[1, 2, 3, 4] * u.m)
assert param_repr_oneline(param) == '[1., 2., 3., 4.] m'
class TestMultipleParameterSets:
def setup_class(self):
self.x1 = np.arange(1, 10, .1)
self.y, self.x = np.mgrid[:10, :7]
self.x11 = np.array([self.x1, self.x1]).T
self.gmodel = models.Gaussian1D([12, 10], [3.5, 5.2], stddev=[.4, .7],
n_models=2)
def test_change_par(self):
"""
Test that a change to one parameter as a set propagates to param_sets.
"""
self.gmodel.amplitude = [1, 10]
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[1.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [1.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_par2(self):
"""
Test that a change to one single parameter in a set propagates to
param_sets.
"""
self.gmodel.amplitude[0] = 11
np.testing.assert_almost_equal(
self.gmodel.param_sets,
np.array([[11.,
10],
[3.5,
5.2],
[0.4,
0.7]]))
np.all(self.gmodel.parameters == [11.0, 10.0, 3.5, 5.2, 0.4, 0.7])
def test_change_parameters(self):
self.gmodel.parameters = [13, 10, 9, 5.2, 0.4, 0.7]
np.testing.assert_almost_equal(self.gmodel.amplitude.value, [13., 10.])
np.testing.assert_almost_equal(self.gmodel.mean.value, [9., 5.2])
class TestParameterInitialization:
"""
This suite of tests checks most if not all cases if instantiating a model
with parameters of different shapes/sizes and with different numbers of
parameter sets.
"""
def test_single_model_scalar_parameters(self):
t = TParModel(10, 1)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[10], [1]])
assert np.all(t.parameters == [10, 1])
assert t.coeff.shape == ()
assert t.e.shape == ()
def test_single_model_scalar_and_array_parameters(self):
t = TParModel(10, [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert len(t.param_sets) == 2
assert np.all(t.param_sets[0] == [10])
assert np.all(t.param_sets[1] == [[1, 2]])
assert np.all(t.parameters == [10, 1, 2])
assert t.coeff.shape == ()
assert t.e.shape == (2,)
def test_single_model_1d_array_parameters(self):
t = TParModel([10, 20], [1, 2])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[10, 20]], [[1, 2]]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
def test_single_model_1d_array_different_length_parameters(self):
with pytest.raises(InputParameterError):
# Not broadcastable
t = TParModel([1, 2], [3, 4, 5])
def test_single_model_2d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]])
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40]]],
[[[1, 2], [3, 4]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
def test_single_model_2d_non_square_parameters(self):
coeff = np.array([[10, 20], [30, 40], [50, 60]])
e = np.array([[1, 2], [3, 4], [5, 6]])
t = TParModel(coeff, e)
assert len(t) == 1
assert t.model_set_axis is False
assert np.all(t.param_sets == [[[[10, 20], [30, 40], [50, 60]]],
[[[1, 2], [3, 4], [5, 6]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t.coeff.shape == (3, 2)
assert t.e.shape == (3, 2)
t2 = TParModel(coeff.T, e.T)
assert len(t2) == 1
assert t2.model_set_axis is False
assert np.all(t2.param_sets == [[[[10, 30, 50], [20, 40, 60]]],
[[[1, 3, 5], [2, 4, 6]]]])
assert np.all(t2.parameters == [10, 30, 50, 20, 40, 60,
1, 3, 5, 2, 4, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
# Not broadcastable
with pytest.raises(InputParameterError):
TParModel(coeff, e.T)
with pytest.raises(InputParameterError):
TParModel(coeff.T, e)
def test_single_model_2d_broadcastable_parameters(self):
t = TParModel([[10, 20, 30], [40, 50, 60]], [1, 2, 3])
assert len(t) == 1
assert t.model_set_axis is False
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20, 30], [40, 50, 60]]])
assert np.all(t.param_sets[1] == [[1, 2, 3]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 1, 2, 3])
@pytest.mark.parametrize(('p1', 'p2'), [
(1, 2), (1, [2, 3]), ([1, 2], 3), ([1, 2, 3], [4, 5]),
([1, 2], [3, 4, 5])])
def test_two_model_incorrect_scalar_parameters(self, p1, p2):
with pytest.raises(InputParameterError):
TParModel(p1, p2, n_models=2)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_parameters(self, kwargs):
t = TParModel([10, 20], [1, 2], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[10, 20], [1, 2]])
assert np.all(t.parameters == [10, 20, 1, 2])
assert t.coeff.shape == (2,)
assert t.e.shape == (2,)
@pytest.mark.parametrize('kwargs', [
{'n_models': 2}, {'model_set_axis': 0},
{'n_models': 2, 'model_set_axis': 0}])
def test_two_model_scalar_and_array_parameters(self, kwargs):
t = TParModel([10, 20], [[1, 2], [3, 4]], **kwargs)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[10], [20]])
assert np.all(t.param_sets[1] == [[1, 2], [3, 4]])
assert np.all(t.parameters == [10, 20, 1, 2, 3, 4])
assert t.coeff.shape == (2,)
assert t.e.shape == (2, 2)
def test_two_model_1d_array_parameters(self):
t = TParModel([[10, 20], [30, 40]], [[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[10, 20], [30, 40]],
[[1, 2], [3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 1, 2, 3, 4])
assert t.coeff.shape == (2, 2)
assert t.e.shape == (2, 2)
t2 = TParModel([[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]], n_models=2)
assert len(t2) == 2
assert t2.model_set_axis == 0
assert np.all(t2.param_sets == [[[10, 20, 30], [40, 50, 60]],
[[1, 2, 3], [4, 5, 6]]])
assert np.all(t2.parameters == [10, 20, 30, 40, 50, 60,
1, 2, 3, 4, 5, 6])
assert t2.coeff.shape == (2, 3)
assert t2.e.shape == (2, 3)
def test_two_model_mixed_dimension_array_parameters(self):
with pytest.raises(InputParameterError):
# Can't broadcast different array shapes
TParModel([[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[9, 10, 11], [12, 13, 14]], n_models=2)
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[1, 2], [3, 4]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 20], [30, 40]],
[[50, 60], [70, 80]]])
assert np.all(t.param_sets[1] == [[[1, 2]], [[3, 4]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2)
def test_two_model_2d_array_parameters(self):
t = TParModel([[[10, 20], [30, 40]], [[50, 60], [70, 80]]],
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], n_models=2)
assert len(t) == 2
assert t.model_set_axis == 0
assert np.all(t.param_sets == [[[[10, 20], [30, 40]],
[[50, 60], [70, 80]]],
[[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]])
assert np.all(t.parameters == [10, 20, 30, 40, 50, 60, 70, 80,
1, 2, 3, 4, 5, 6, 7, 8])
assert t.coeff.shape == (2, 2, 2)
assert t.e.shape == (2, 2, 2)
def test_two_model_nonzero_model_set_axis(self):
# An example where the model set axis is the *last* axis of the
# parameter arrays
coeff = np.array([[[10, 20, 30], [30, 40, 50]], [[50, 60, 70], [70, 80, 90]]])
coeff = np.rollaxis(coeff, 0, 3)
e = np.array([[1, 2, 3], [3, 4, 5]])
e = np.rollaxis(e, 0, 2)
t = TParModel(coeff, e, n_models=2, model_set_axis=-1)
assert len(t) == 2
assert t.model_set_axis == -1
assert len(t.param_sets) == 2
assert np.issubdtype(t.param_sets.dtype, np.object_)
assert np.all(t.param_sets[0] == [[[10, 50], [20, 60], [30, 70]],
[[30, 70], [40, 80], [50, 90]]])
assert np.all(t.param_sets[1] == [[[1, 3], [2, 4], [3, 5]]])
assert np.all(t.parameters == [10, 50, 20, 60, 30, 70, 30, 70, 40, 80,
50, 90, 1, 3, 2, 4, 3, 5])
assert t.coeff.shape == (2, 3, 2) # note change in api
assert t.e.shape == (3, 2) # note change in api
def test_wrong_number_of_params(self):
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), n_models=2)
with pytest.raises(InputParameterError):
TParModel(coeff=[[1, 2], [3, 4]], e=(2, 3, 4), model_set_axis=0)
def test_wrong_number_of_params2(self):
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, n_models=2)
with pytest.raises(InputParameterError):
m = TParModel(coeff=[[1, 2], [3, 4]], e=4, model_set_axis=0)
def test_array_parameter1(self):
with pytest.raises(InputParameterError):
t = TParModel(np.array([[1, 2], [3, 4]]), 1, model_set_axis=0)
def test_array_parameter2(self):
with pytest.raises(InputParameterError):
m = TParModel(np.array([[1, 2], [3, 4]]), (1, 1, 11),
model_set_axis=0)
def test_array_parameter4(self):
"""
Test multiple parameter model with array-valued parameters of the same
size as the number of parameter sets.
"""
t4 = TParModel([[1, 2], [3, 4]], [5, 6], model_set_axis=False)
assert len(t4) == 1
assert t4.coeff.shape == (2, 2)
assert t4.e.shape == (2,)
assert np.issubdtype(t4.param_sets.dtype, np.object_)
assert np.all(t4.param_sets[0] == [[1, 2], [3, 4]])
assert np.all(t4.param_sets[1] == [5, 6])
def test_non_broadcasting_parameters():
"""
Tests that in a model with 3 parameters that do not all mutually broadcast,
this is determined correctly regardless of what order the parameters are
in.
"""
a = 3
b = np.array([[1, 2, 3], [4, 5, 6]])
c = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
class TestModel(Model):
p1 = Parameter()
p2 = Parameter()
p3 = Parameter()
def evaluate(self, *args):
return
# a broadcasts with both b and c, but b does not broadcast with c
for args in itertools.permutations((a, b, c)):
with pytest.raises(InputParameterError):
TestModel(*args)
def test_setter():
pars = np.random.rand(20).reshape((10, 2))
model = SetterModel(xc=-1, yc=3, p=np.pi)
for x, y in pars:
np.testing.assert_almost_equal(
model(x, y),
(x + 1)**2 + (y - np.pi * 3)**2)
|
d212ee183cbb50ddf8e45a63a3a0ac2127e91a281900ab1ebeceb36f4f33dcc7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for physical functions."""
# pylint: disable=no-member, invalid-name
import numpy as np
import pytest
from astropy import cosmology
from astropy import units as u
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.physical_models import NFW, BlackBody
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
__doctest_skip__ = ["*"]
# BlackBody tests
@pytest.mark.parametrize("temperature", (3000 * u.K, 2726.85 * u.deg_C))
def test_blackbody_evaluate(temperature):
b = BlackBody(temperature=temperature, scale=1.0)
assert_quantity_allclose(b(1.4 * u.micron), 486787299458.15656 * u.MJy / u.sr)
assert_quantity_allclose(b(214.13747 * u.THz), 486787299458.15656 * u.MJy / u.sr)
def test_blackbody_weins_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.lambda_max, 9.890006672986939 * u.micron)
assert_quantity_allclose(b.nu_max, 17.22525080856469 * u.THz)
def test_blackbody_sefanboltzman_law():
b = BlackBody(293.0 * u.K)
assert_quantity_allclose(b.bolometric_flux, 133.02471751812573 * u.W / (u.m * u.m))
def test_blackbody_input_units():
SLAM = u.erg / (u.cm ** 2 * u.s * u.AA * u.sr)
SNU = u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
b_lam = BlackBody(3000*u.K, scale=1*SLAM)
assert(b_lam.input_units['x'] == u.AA)
b_nu = BlackBody(3000*u.K, scale=1*SNU)
assert(b_nu.input_units['x'] == u.Hz)
def test_blackbody_return_units():
# return of evaluate has no units when temperature has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert not isinstance(b.evaluate(1.0 * u.micron, 1000.0, 1.0), u.Quantity)
# return has "standard" units when scale has no units
b = BlackBody(1000.0 * u.K, scale=1.0)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.erg / (u.cm ** 2 * u.s * u.Hz * u.sr)
# return has scale units when scale has units
b = BlackBody(1000.0 * u.K, scale=1.0 * u.MJy / u.sr)
assert isinstance(b(1.0 * u.micron), u.Quantity)
assert b(1.0 * u.micron).unit == u.MJy / u.sr
# scale has units but evaluate scale has no units
assert_quantity_allclose(b.evaluate(1.0 * u.micron, 1000.0 * u.K, 4.0), 89668184.86321202 * u.MJy / u.sr)
@pytest.mark.skipif("not HAS_SCIPY")
def test_blackbody_fit():
fitter = LevMarLSQFitter()
b = BlackBody(3000 * u.K, scale=5e-17 * u.Jy / u.sr)
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
b_fit = fitter(b, wav, fnu, maxiter=1000)
assert_quantity_allclose(b_fit.temperature, 2840.7438355865065 * u.K)
assert_quantity_allclose(b_fit.scale, 5.803783292762381e-17)
def test_blackbody_overflow():
"""Test Planck function with overflow."""
photlam = u.photon / (u.cm ** 2 * u.s * u.AA)
wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom
temp = 10000.0 # Kelvin
bb = BlackBody(temperature=temp * u.K, scale=1.0)
with pytest.warns(
AstropyUserWarning,
match=r'Input contains invalid wavelength/frequency value\(s\)'):
with np.errstate(all="ignore"):
bb_lam = bb(wave) * u.sr
flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr
# First element is NaN, last element is very small, others normal
assert np.isnan(flux[0])
with np.errstate(all="ignore"):
assert np.log10(flux[-1].value) < -134
np.testing.assert_allclose(
flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3
) # 0.1% accuracy in PHOTLAM/sr
with np.errstate(all="ignore"):
flux = bb(1.0 * u.AA)
assert flux.value == 0
def test_blackbody_exceptions_and_warnings():
"""Test exceptions."""
# Negative temperature
with pytest.raises(
ValueError,
match="Temperature should be positive: \\[-100.\\] K"):
bb = BlackBody(-100 * u.K)
bb(1.0 * u.micron)
bb = BlackBody(5000 * u.K)
# Zero wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(0 * u.AA)
assert len(w) == 3 # 2 of these are RuntimeWarning from zero divide
# Negative wavelength given for conversion to Hz
with pytest.warns(AstropyUserWarning, match='invalid') as w:
bb(-1.0 * u.AA)
assert len(w) == 1
# Test that a non surface brightness convertible scale unit raises an error
with pytest.raises(
ValueError,
match="scale units not dimensionless or in surface brightness: Jy"):
bb = BlackBody(5000 * u.K, scale=1.0 * u.Jy)
def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4)
def test_blackbody_dimensionless():
"""Test support for dimensionless (but not unscaled) units for scale"""
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL)**2
bb1 = BlackBody(temperature=T, scale=scale)
# even though we passed scale with units, we should be able to evaluate with unitless
bb1.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
bb2.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
# bolometric flux for both cases should be equivalent
assert(bb1.bolometric_flux == bb2.bolometric_flux)
@pytest.mark.skipif("not HAS_SCIPY")
def test_blackbody_dimensionless_fit():
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL)**2
bb1 = BlackBody(temperature=T, scale=scale)
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
fitter = LevMarLSQFitter()
wav = np.array([0.5, 5, 10]) * u.micron
fnu = np.array([1, 10, 5]) * u.Jy / u.sr
bb1_fit = fitter(bb1, wav, fnu, maxiter=1000)
bb2_fit = fitter(bb2, wav, fnu, maxiter=1000)
assert(bb1_fit.temperature == bb2_fit.temperature)
@pytest.mark.parametrize("mass", (2.0000000000000E15 * u.M_sun, 3.976819741e+45 * u.kg))
def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc ** 3)))
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3141 * u.kpc), (3254.373619264334 * (u.solMass / u.kpc ** 3),
6.471028627484543e+33 * (u.kg / u.kpc ** 3)))
assert_quantity_allclose(n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3141 * u.kpc), (3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3141 * u.kpc), (3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e+33 * (u.kg / u.kpc**3)))
assert_quantity_allclose(nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc ** 3)))
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(4.2e+23 * u.m), (1.527649658673012e-57 * (u.solMass / u.m ** 3),
3.0375936602739256e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200c.r_s, (7.616880211930209e+21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e+22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(4.2e+23 * u.m), (1.5194778058079436e-57 * (u.solMass / u.m ** 3),
3.0213446673751314e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(n200m.r_s, (8.76521436235054e+21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e+22 * u.m))
# Virial mass
massfactor = ("virial")
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(4.2e+23 * u.m), (1.5214899184117633e-57 * (u.solMass / u.m ** 3),
3.0253455719375224e-27 * (u.kg / u.m ** 3)))
assert_quantity_allclose(nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m ** 3)))
assert_quantity_allclose(nvir.r_s, (8.481607714647913e+21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e+22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3.0 * u.Mpc), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# 200m Overdensity
massfactor = "200m"
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200m(3.0 * u.Mpc), (3.626093406e+12 * (u.solMass / u.Mpc**3),
7.210159921e+42 * (u.kg / u.Mpc**3)))
# Virial mass
massfactor = "virial"
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(nvir(3.0 * u.Mpc), (3.646475546e+12 * (u.solMass / u.Mpc**3),
7.250687967e+42 * (u.kg / u.Mpc**3)))
@pytest.mark.skipif("not HAS_SCIPY")
def test_NFW_fit():
"""Test linear fitting of NFW model."""
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# 200c Overdensity
massfactor = ("critical", 200)
density_r = np.array([1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
n200c = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
density_r = np.array([1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
n200m = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
density_r = np.array([1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01]) * (u.solMass / u.kpc ** 3)
fitter = LevMarLSQFitter()
nvir = NFW(mass=1.8E15 * u.M_sun, concentration=7.0, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000E15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5]) * u.Mpc
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_200m = np.array([670.18236647, 1088.9843324, 1046.82334367, 1016.88890732, 987.97273478,
936.00207134, 891.80115232, 806.63307977, 744.91002191, 659.33401039,
557.82823549, 395.9735786, 318.29863006]) * (u.km / u.s)
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = ("virial")
mass = 1.2e+45 * u.kg
concentration = 2.4
redshift = 0.34
r_r = np.array([3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23]) * u.m
nvir = NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
circ_v_vir = np.array([205.87461783, 604.65091823, 793.9190629, 857.52516521, 908.90280843,
986.53582718, 1041.69089845, 1124.19719446, 1164.58270747, 1191.33193561,
1174.02934755, 1023.69360527, 895.52206321]) * (u.km / u.s)
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s)))
def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000E15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
r_r = np.array([1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04]) * u.kpc
# Massfactor exception tests
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=("not", "virial"))
assert exc.value.args[0] == "Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor="not virial")
assert exc.value.args[0] == "Massfactor not virial string not of the form '#m', '#c', " \
"or 'virial'"
with pytest.raises(TypeError) as exc:
NFW(mass=mass, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=200)
assert exc.value.args[0] == "Massfactor 200 not a tuple or string"
# Verify unitless mass
# Density test
n200c = NFW(mass=mass.value, concentration=concentration, redshift=redshift, cosmo=cosmo,
massfactor=massfactor)
assert_quantity_allclose(n200c(3000.0), (3.709693508e+12 * (u.solMass / u.Mpc ** 3),
7.376391187e+42 * (u.kg / u.Mpc ** 3)))
# Circular velocity test with unitless mass
circ_v_200c = np.array([702.45487454, 1812.4138346, 2150.50929296, 2231.5802568, 2283.96950242,
2338.45989696, 2355.78876772, 2332.41766543, 2276.89433811,
2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541]) * (u.km / u.s)
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232)
|
1091a76bb39ea6925bdc1f899c9b6abff869930b31f95ac748bacf9c0a583665 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import unittest.mock as mk
from math import cos, sin
import numpy as np
import pytest
from numpy.testing import assert_allclose
import astropy.units as u
from astropy.modeling import models, rotations
from astropy.tests.helper import assert_quantity_allclose
from astropy.wcs import wcs
@pytest.mark.parametrize(('inp'), [(0, 0), (4000, -20.56), (-2001.5, 45.9),
(0, 90), (0, -90), (np.mgrid[:4, :6]),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_against_wcslib(inp):
w = wcs.WCS()
crval = [202.4823228, 47.17511893]
w.wcs.crval = crval
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
lonpole = 180
tan = models.Pix2Sky_TAN()
n2c = models.RotateNative2Celestial(crval[0], crval[1], lonpole)
c2n = models.RotateCelestial2Native(crval[0], crval[1], lonpole)
m = tan | n2c
minv = c2n | tan.inverse
radec = w.wcs_pix2world(inp[0], inp[1], 1)
xy = w.wcs_world2pix(radec[0], radec[1], 1)
assert_allclose(m(*inp), radec, atol=1e-12)
assert_allclose(minv(*radec), xy, atol=1e-12)
@pytest.mark.parametrize(('inp'), [(1e-5, 1e-4), (40, -20.56), (21.5, 45.9),
([[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]),
([[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]]],
[[[25, 26, 27, 28],
[29, 30, 31, 32],
[33, 34, 35, 36]],
[[37, 38, 39, 40],
[41, 42, 43, 44],
[45, 46, 47, 48]]])])
def test_roundtrip_sky_rotation(inp):
lon, lat, lon_pole = 42, 43, 44
n2c = models.RotateNative2Celestial(lon, lat, lon_pole)
c2n = models.RotateCelestial2Native(lon, lat, lon_pole)
assert_allclose(n2c.inverse(*n2c(*inp)), inp, atol=1e-13)
assert_allclose(c2n.inverse(*c2n(*inp)), inp, atol=1e-13)
def test_native_celestial_lat90():
n2c = models.RotateNative2Celestial(1, 90, 0)
alpha, delta = n2c(1, 1)
assert_allclose(delta, 1)
assert_allclose(alpha, 182)
def test_Rotation2D():
model = models.Rotation2D(angle=90)
x, y = model(1, 0)
assert_allclose([x, y], [0, 1], atol=1e-10)
def test_Rotation2D_quantity():
model = models.Rotation2D(angle=90*u.deg)
x, y = model(1*u.deg, 0*u.arcsec)
assert_quantity_allclose([x, y], [0, 1]*u.deg, atol=1e-10*u.deg)
def test_Rotation2D_inverse():
model = models.Rotation2D(angle=234.23494)
x, y = model.inverse(*model(1, 0))
assert_allclose([x, y], [1, 0], atol=1e-10)
def test_Rotation2D_errors():
model = models.Rotation2D(angle=90*u.deg)
# Bad evaluation input shapes
x = np.array([1, 2])
y = np.array([1, 2, 3])
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(x, y, model.angle)
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(y, x, model.angle)
assert str(err.value) == message
# Bad evaluation units
x = np.array([1, 2])
y = np.array([1, 2])
message = "x and y must have compatible units"
with pytest.raises(u.UnitsError) as err:
model.evaluate(x * u.m, y, model.angle)
assert str(err.value) == message
def test_euler_angle_rotations():
x = (0, 0)
y = (90, 0)
z = (0, 90)
negx = (180, 0)
negy = (-90, 0)
# rotate y into minus z
model = models.EulerAngleRotation(0, 90, 0, 'zxz')
assert_allclose(model(*z), y, atol=10**-12)
# rotate z into minus x
model = models.EulerAngleRotation(0, 90, 0, 'zyz')
assert_allclose(model(*z), negx, atol=10**-12)
# rotate x into minus y
model = models.EulerAngleRotation(0, 90, 0, 'yzy')
assert_allclose(model(*x), negy, atol=10**-12)
euler_axes_order = ['zxz', 'zyz', 'yzy', 'yxy', 'xyx', 'xzx']
@pytest.mark.parametrize(('axes_order'), euler_axes_order)
def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {'zxz': np.array([[(c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1), (s1*s2)],
[(c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3), (-c1*s2)],
[(s2*s3), (c3*s2), (c2)]]),
'zyz': np.array([[(c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3), (c1*s2)],
[(c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3), (s1*s2)],
[(-c3*s2), (s2*s3), (c2)]]),
'yzy': np.array([[(c1*c2*c3 - s1*s3), (-c1*s2), (c3*s1+c1*c2*s3)],
[(c3*s2), (c2), (s2*s3)],
[(-c1*s3 - c2*c3*s1), (s1*s2), (c1*c3-c2*s1*s3)]]),
'yxy': np.array([[(c1*c3 - c2*s1*s3), (s1*s2), (c1*s3+c2*c3*s1)],
[(s2*s3), (c2), (-c3*s2)],
[(-c3*s1 - c1*c2*s3), (c1*s2), (c1*c2*c3 - s1*s3)]]),
'xyx': np.array([[(c2), (s2*s3), (c3*s2)],
[(s1*s2), (c1*c3 - c2*s1*s3), (-c1*s3 - c2*c3*s1)],
[(-c1*s2), (c3*s1 + c1*c2*s3), (c1*c2*c3 - s1*s3)]]),
'xzx': np.array([[(c2), (-c3*s2), (s2*s3)],
[(c1*s2), (c1*c2*c3 - s1*s3), (-c3*s1 - c1*c2*s3)],
[(s1*s2), (c1*s3 + c2*c3*s1), (c1*c3 - c2*s1*s3)]])
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) # get_rotation_matrix(axes_order))
def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * \
np.sin(v3) + matrix[2, 2] * np.cos(v3)
Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + \
(matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]) * np.sin(v2)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3)
def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
#assert_allclose(v2s.inverse(*v2s(v2, v3)), (v2, v3))
def test_RotationSequence3D_errors():
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.RotationSequence3D(mk.MagicMock(), axes_order="abc")
# Bad number of angles
with pytest.raises(ValueError) as err:
rotations.RotationSequence3D([1, 2, 3, 4], axes_order="zyx")
assert str(err.value) ==\
"The number of angles 4 should match the number of axes 3."
# Bad evaluation input shapes
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
message = "Expected input arrays to have the same shape"
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2, 3]),
np.array([1, 2]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2, 3]),
np.array([1, 2]),
[1, 2, 3])
assert str(err.value) == message
with pytest.raises(ValueError) as err:
model.evaluate(np.array([1, 2]),
np.array([1, 2]),
np.array([1, 2, 3]),
[1, 2, 3])
assert str(err.value) == message
def test_RotationSequence3D_inverse():
model = rotations.RotationSequence3D([1, 2, 3], axes_order="zyx")
assert_allclose(model.inverse.angles.value, [-3, -2, -1])
assert model.inverse.axes_order == "xyz"
def test_EulerAngleRotation_errors():
# Bad length of axes_order
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="xyzx")
assert str(err.value) ==\
"Expected axes_order to be a character sequence of length 3, got xyzx"
# Bad axes_order labels
with pytest.raises(ValueError, match=r"Unrecognized axis label .* should be one of .*"):
rotations.EulerAngleRotation(mk.MagicMock(), mk.MagicMock(), mk.MagicMock(),
axes_order="abc")
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1 * u.m, 2, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2 * u.m, 3,
axes_order="xyz")
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations.EulerAngleRotation(1, 2, 3 * u.m,
axes_order="xyz")
assert str(err.value) == message
def test_EulerAngleRotation_inverse():
model = rotations.EulerAngleRotation(1, 2, 3, "xyz")
assert_allclose(model.inverse.phi, -3)
assert_allclose(model.inverse.theta, -2)
assert_allclose(model.inverse.psi, -1)
assert model.inverse.axes_order == "zyx"
def test__SkyRotation_errors():
# Bad units
message = "All parameters should be of the same type - float or Quantity."
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1 * u.m, 2, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2 * u.m, 3)
assert str(err.value) == message
with pytest.raises(TypeError) as err:
rotations._SkyRotation(1, 2, 3 * u.m)
assert str(err.value) == message
def test__SkyRotation__evaluate():
model = rotations._SkyRotation(1, 2, 3)
phi = mk.MagicMock()
theta = mk.MagicMock()
lon = mk.MagicMock()
lat = mk.MagicMock()
lon_pole = mk.MagicMock()
alpha = 5
delta = mk.MagicMock()
with mk.patch.object(rotations._EulerRotation, 'evaluate',
autospec=True, return_value=(alpha, delta)) as mkEval:
assert (365, delta) == model._evaluate(phi, theta, lon, lat, lon_pole)
assert mkEval.call_args_list ==\
[mk.call(model, phi, theta, lon, lat, lon_pole, 'zxz')]
|
b2f2cb96d6f0d2ef8aa1aed8bb52f3fba4ad3722a769b4f40841cebdc8f5e2d8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import types
import warnings
import numpy as np
import pytest
from numpy.random import default_rng
from numpy.testing import assert_allclose
from astropy.modeling import fitting, models
from astropy.modeling.core import Fittable1DModel
from astropy.modeling.parameters import Parameter
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
class TestNonLinearConstraints:
def setup_class(self):
self.g1 = models.Gaussian1D(10, 14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, 13, stddev=.4)
self.x = np.arange(10, 20, .1)
self.y1 = self.g1(self.x)
self.y2 = self.g2(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(100)
self.ny1 = self.y1 + 2 * self.n
self.ny2 = self.y2 + 2 * self.n
@pytest.mark.skipif('not HAS_SCIPY')
def test_fixed_par(self):
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3,
fixed={'amplitude': True})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert model.amplitude.value == 10
@pytest.mark.skipif('not HAS_SCIPY')
def test_tied_par(self):
def tied(model):
mean = 50 * model.stddev
return mean
g1 = models.Gaussian1D(10, mean=14.9, stddev=.3, tied={'mean': tied})
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, self.ny1)
assert_allclose(model.mean.value, 50 * model.stddev,
rtol=10 ** (-5))
@pytest.mark.skipif('not HAS_SCIPY')
def test_joint_fitter(self):
from scipy import optimize
g1 = models.Gaussian1D(10, 14.9, stddev=.3)
g2 = models.Gaussian1D(10, 13, stddev=.4)
jf = fitting.JointFitter([g1, g2], {g1: ['amplitude'],
g2: ['amplitude']}, [9.8])
x = np.arange(10, 20, .1)
y1 = g1(x)
y2 = g2(x)
n = np.random.randn(100)
ny1 = y1 + 2 * n
ny2 = y2 + 2 * n
jf(x, ny1, x, ny2)
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def compmodel(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errf(p, x1, y1, x2, y2):
return np.ravel(
np.r_[compmodel(p[0], p[1:3], x1) - y1,
compmodel(p[0], p[3:], x2) - y2])
fitparams, _ = optimize.leastsq(errf, p, args=(x, ny1, x, ny2))
assert_allclose(jf.fitparams, fitparams, rtol=10 ** (-5))
assert_allclose(g1.amplitude.value, g2.amplitude.value)
@pytest.mark.skipif('not HAS_SCIPY')
def test_no_constraints(self):
from scipy import optimize
g1 = models.Gaussian1D(9.9, 14.5, stddev=.3)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errf(p, x, y):
return func(p, x) - y
p0 = [9.9, 14.5, 0.3]
y = g1(self.x)
n = np.random.randn(100)
ny = y + n
fitpar, s = optimize.leastsq(errf, p0, args=(self.x, ny))
fitter = fitting.LevMarLSQFitter()
model = fitter(g1, self.x, ny)
assert_allclose(model.parameters, fitpar, rtol=5 * 10 ** (-3))
@pytest.mark.skipif('not HAS_SCIPY')
class TestBounds:
def setup_class(self):
A = -2.0
B = 0.5
self.x = np.linspace(-1.0, 1.0, 100)
self.y = A * self.x + B + np.random.normal(scale=0.1, size=100)
data = np.array([505.0, 556.0, 630.0, 595.0, 561.0, 553.0, 543.0, 496.0, 460.0, 469.0,
426.0, 518.0, 684.0, 798.0, 830.0, 794.0, 649.0, 706.0, 671.0, 545.0,
479.0, 454.0, 505.0, 700.0, 1058.0, 1231.0, 1325.0, 997.0, 1036.0, 884.0,
610.0, 487.0, 453.0, 527.0, 780.0, 1094.0, 1983.0, 1993.0, 1809.0, 1525.0,
1056.0, 895.0, 604.0, 466.0, 510.0, 678.0, 1130.0, 1986.0, 2670.0, 2535.0,
1878.0, 1450.0, 1200.0, 663.0, 511.0, 474.0, 569.0, 848.0, 1670.0, 2611.0,
3129.0, 2507.0, 1782.0, 1211.0, 723.0, 541.0, 511.0, 518.0, 597.0, 1137.0,
1993.0, 2925.0, 2438.0, 1910.0, 1230.0, 738.0, 506.0, 461.0, 486.0, 597.0,
733.0, 1262.0, 1896.0, 2342.0, 1792.0, 1180.0, 667.0, 482.0, 454.0, 482.0,
504.0, 566.0, 789.0, 1194.0, 1545.0, 1361.0, 933.0, 562.0, 418.0, 463.0,
435.0, 466.0, 528.0, 487.0, 664.0, 799.0, 746.0, 550.0, 478.0, 535.0, 443.0,
416.0, 439.0, 472.0, 472.0, 492.0, 523.0, 569.0, 487.0, 441.0, 428.0])
self.data = data.reshape(11, 11)
def test_bounds_lsq(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_slsqp(self):
guess_slope = 1.1
guess_intercept = 0.0
bounds = {'slope': (-1.5, 5.0), 'intercept': (-1.0, 1.0)}
line_model = models.Linear1D(guess_slope, guess_intercept,
bounds=bounds)
fitter = fitting.SLSQPLSQFitter()
with pytest.warns(AstropyUserWarning, match='consider using linear fitting methods'):
model = fitter(line_model, self.x, self.y)
slope = model.slope.value
intercept = model.intercept.value
assert slope + 10 ** -5 >= bounds['slope'][0]
assert slope - 10 ** -5 <= bounds['slope'][1]
assert intercept + 10 ** -5 >= bounds['intercept'][0]
assert intercept - 10 ** -5 <= bounds['intercept'][1]
def test_bounds_gauss2d_lsq(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.LevMarLSQFitter()
with pytest.warns(AstropyUserWarning,
match='The fit may be unsuccessful'):
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
def test_bounds_gauss2d_slsqp(self):
X, Y = np.meshgrid(np.arange(11), np.arange(11))
bounds = {"x_mean": [0., 11.],
"y_mean": [0., 11.],
"x_stddev": [1., 4],
"y_stddev": [1., 4]}
gauss = models.Gaussian2D(amplitude=10., x_mean=5., y_mean=5.,
x_stddev=4., y_stddev=4., theta=0.5,
bounds=bounds)
gauss_fit = fitting.SLSQPLSQFitter()
# Warning does not appear in all the CI jobs.
# TODO: Rewrite the test for more consistent warning behavior.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*The fit may be unsuccessful.*',
category=AstropyUserWarning)
model = gauss_fit(gauss, X, Y, self.data)
x_mean = model.x_mean.value
y_mean = model.y_mean.value
x_stddev = model.x_stddev.value
y_stddev = model.y_stddev.value
assert x_mean + 10 ** -5 >= bounds['x_mean'][0]
assert x_mean - 10 ** -5 <= bounds['x_mean'][1]
assert y_mean + 10 ** -5 >= bounds['y_mean'][0]
assert y_mean - 10 ** -5 <= bounds['y_mean'][1]
assert x_stddev + 10 ** -5 >= bounds['x_stddev'][0]
assert x_stddev - 10 ** -5 <= bounds['x_stddev'][1]
assert y_stddev + 10 ** -5 >= bounds['y_stddev'][0]
assert y_stddev - 10 ** -5 <= bounds['y_stddev'][1]
class TestLinearConstraints:
def setup_class(self):
self.p1 = models.Polynomial1D(4)
self.p1.c0 = 0
self.p1.c1 = 0
self.p1.window = [0., 9.]
self.x = np.arange(10)
self.y = self.p1(self.x)
rsn = default_rng(1234567890)
self.n = rsn.standard_normal(10)
self.ny = self.y + self.n
def test(self):
self.p1.c0.fixed = True
self.p1.c1.fixed = True
pfit = fitting.LinearLSQFitter()
model = pfit(self.p1, self.x, self.y)
assert_allclose(self.y, model(self.x))
# Test constraints as parameter properties
def test_set_fixed_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.mean.fixed = True
assert gauss.fixed == {'amplitude': False, 'mean': True, 'stddev': False}
def test_set_fixed_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
assert gauss.mean.fixed is True
def test_set_tied_1():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.amplitude.tied = tie_amplitude
assert gauss.amplitude.tied is not False
assert isinstance(gauss.tied['amplitude'], types.FunctionType)
def test_set_tied_2():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
assert gauss.amplitude.tied
def test_unset_fixed():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
fixed={'mean': True})
gauss.mean.fixed = False
assert gauss.fixed == {'amplitude': False, 'mean': False, 'stddev': False}
def test_unset_tied():
def tie_amplitude(model):
return 50 * model.stddev
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
tied={'amplitude': tie_amplitude})
gauss.amplitude.tied = False
assert gauss.tied == {'amplitude': False, 'mean': False, 'stddev': False}
def test_set_bounds_1():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, None)})
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_set_bounds_2():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1)
gauss.stddev.min = 0.
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (0.0, None)}
def test_unset_bounds():
gauss = models.Gaussian1D(amplitude=20, mean=2, stddev=1,
bounds={'stddev': (0, 2)})
gauss.stddev.min = None
gauss.stddev.max = None
assert gauss.bounds == {'amplitude': (None, None),
'mean': (None, None),
'stddev': (None, None)}
def test_default_constraints():
"""Regression test for https://github.com/astropy/astropy/issues/2396
Ensure that default constraints defined on parameters are carried through
to instances of the models those parameters are defined for.
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=0, min=0, fixed=True)
@staticmethod
def evaluate(x, a, b):
return x * a + b
assert MyModel.a.default == 1
assert MyModel.b.default == 0
assert MyModel.b.min == 0
assert MyModel.b.bounds == (0, None)
assert MyModel.b.fixed is True
m = MyModel()
assert m.a.value == 1
assert m.b.value == 0
assert m.b.min == 0
assert m.b.bounds == (0, None)
assert m.b.fixed is True
assert m.bounds == {'a': (None, None), 'b': (0, None)}
assert m.fixed == {'a': False, 'b': True}
# Make a model instance that overrides the default constraints and values
m = MyModel(3, 4, bounds={'a': (1, None), 'b': (2, None)},
fixed={'a': True, 'b': False})
assert m.a.value == 3
assert m.b.value == 4
assert m.a.min == 1
assert m.b.min == 2
assert m.a.bounds == (1, None)
assert m.b.bounds == (2, None)
assert m.a.fixed is True
assert m.b.fixed is False
assert m.bounds == {'a': (1, None), 'b': (2, None)}
assert m.fixed == {'a': True, 'b': False}
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_fixed_and_bound_constraints():
"""
Regression test for https://github.com/astropy/astropy/issues/2235
Currently doesn't test that the fit is any *good*--just that parameters
stay within their given constraints.
"""
m = models.Gaussian1D(amplitude=3, mean=4, stddev=1,
bounds={'mean': (4, 5)},
fixed={'amplitude': True})
x = np.linspace(0, 10, 10)
y = np.exp(-x ** 2 / 2)
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
assert fitted_1.amplitude == 3.0
m.amplitude.fixed = False
_ = f(m, x, y)
# It doesn't matter anymore what the amplitude ends up as so long as the
# bounds constraint was still obeyed
assert fitted_1.mean >= 4
assert fitted_1.mean <= 5
@pytest.mark.skipif('not HAS_SCIPY')
def test_fit_with_bound_constraints_estimate_jacobian():
"""
Regression test for https://github.com/astropy/astropy/issues/2400
Checks that bounds constraints are obeyed on a custom model that does not
define fit_deriv (and thus its Jacobian must be estimated for non-linear
fitting).
"""
class MyModel(Fittable1DModel):
a = Parameter(default=1)
b = Parameter(default=2)
@staticmethod
def evaluate(x, a, b):
return a * x + b
m_real = MyModel(a=1.5, b=-3)
x = np.arange(100)
y = m_real(x)
m = MyModel()
f = fitting.LevMarLSQFitter()
fitted_1 = f(m, x, y)
# This fit should be trivial so even without constraints on the bounds it
# should be right
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
m2 = MyModel()
m2.a.bounds = (-2, 2)
f2 = fitting.LevMarLSQFitter()
_ = f2(m2, x, y)
assert np.allclose(fitted_1.a, 1.5)
assert np.allclose(fitted_1.b, -3)
# Check that the estimated Jacobian was computed (it doesn't matter what
# the values are so long as they're not all zero.
assert np.any(f2.fit_info['fjac'] != 0)
# https://github.com/astropy/astropy/issues/6014
@pytest.mark.skipif('not HAS_SCIPY')
def test_gaussian2d_positive_stddev():
# This is 2D Gaussian with noise to be fitted, as provided by @ysBach
test = [
[-54.33, 13.81, -34.55, 8.95, -143.71, -0.81, 59.25, -14.78, -204.9,
-30.87, -124.39, 123.53, 70.81, -109.48, -106.77, 35.64, 18.29],
[-126.19, -89.13, 63.13, 50.74, 61.83, 19.06, 65.7, 77.94, 117.14,
139.37, 52.57, 236.04, 100.56, 242.28, -180.62, 154.02, -8.03],
[91.43, 96.45, -118.59, -174.58, -116.49, 80.11, -86.81, 14.62, 79.26,
7.56, 54.99, 260.13, -136.42, -20.77, -77.55, 174.52, 134.41],
[33.88, 7.63, 43.54, 70.99, 69.87, 33.97, 273.75, 176.66, 201.94,
336.34, 340.54, 163.77, -156.22, 21.49, -148.41, 94.88, 42.55],
[82.28, 177.67, 26.81, 17.66, 47.81, -31.18, 353.23, 589.11, 553.27,
242.35, 444.12, 186.02, 140.73, 75.2, -87.98, -18.23, 166.74],
[113.09, -37.01, 134.23, 71.89, 107.88, 198.69, 273.88, 626.63, 551.8,
547.61, 580.35, 337.8, 139.8, 157.64, -1.67, -26.99, 37.35],
[106.47, 31.97, 84.99, -125.79, 195.0, 493.65, 861.89, 908.31, 803.9,
781.01, 532.59, 404.67, 115.18, 111.11, 28.08, 122.05, -58.36],
[183.62, 45.22, 40.89, 111.58, 425.81, 321.53, 545.09, 866.02, 784.78,
731.35, 609.01, 405.41, -19.65, 71.2, -140.5, 144.07, 25.24],
[137.13, -86.95, 15.39, 180.14, 353.23, 699.01, 1033.8, 1014.49,
814.11, 647.68, 461.03, 249.76, 94.8, 41.17, -1.16, 183.76, 188.19],
[35.39, 26.92, 198.53, -37.78, 638.93, 624.41, 816.04, 867.28, 697.0,
491.56, 378.21, -18.46, -65.76, 98.1, 12.41, -102.18, 119.05],
[190.73, 125.82, 311.45, 369.34, 554.39, 454.37, 755.7, 736.61, 542.43,
188.24, 214.86, 217.91, 7.91, 27.46, -172.14, -82.36, -80.31],
[-55.39, 80.18, 267.19, 274.2, 169.53, 327.04, 488.15, 437.53, 225.38,
220.94, 4.01, -92.07, 39.68, 57.22, 144.66, 100.06, 34.96],
[130.47, -4.23, 46.3, 101.49, 115.01, 217.38, 249.83, 115.9, 87.36,
105.81, -47.86, -9.94, -82.28, 144.45, 83.44, 23.49, 183.9],
[-110.38, -115.98, 245.46, 103.51, 255.43, 163.47, 56.52, 33.82,
-33.26, -111.29, 88.08, 193.2, -100.68, 15.44, 86.32, -26.44, -194.1],
[109.36, 96.01, -124.89, -16.4, 84.37, 114.87, -65.65, -58.52, -23.22,
42.61, 144.91, -209.84, 110.29, 66.37, -117.85, -147.73, -122.51],
[10.94, 45.98, 118.12, -46.53, -72.14, -74.22, 21.22, 0.39, 86.03,
23.97, -45.42, 12.05, -168.61, 27.79, 61.81, 84.07, 28.79],
[46.61, -104.11, 56.71, -90.85, -16.51, -66.45, -141.34, 0.96, 58.08,
285.29, -61.41, -9.01, -323.38, 58.35, 80.14, -101.22, 145.65]]
g_init = models.Gaussian2D(x_mean=8, y_mean=8)
fitter = fitting.LevMarLSQFitter()
y, x = np.mgrid[:17, :17]
g_fit = fitter(g_init, x, y, test)
# Compare with @ysBach original result:
# - x_stddev was negative, so its abs value is used for comparison here.
# - theta is beyond (-90, 90) deg, which doesn't make sense, so ignored.
assert_allclose([g_fit.amplitude.value, g_fit.y_stddev.value],
[984.7694929790363, 3.1840618351417307], rtol=1.5e-6)
assert_allclose(g_fit.x_mean.value, 7.198391516587464)
assert_allclose(g_fit.y_mean.value, 7.49720660088511, rtol=5e-7)
assert_allclose(g_fit.x_stddev.value, 1.9840185107597297, rtol=2e-6)
# Issue #6403
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
def test_2d_model():
from astropy.utils import NumpyRNGContext
# 2D model with LevMarLSQFitter
gauss2d = models.Gaussian2D(10.2, 4.3, 5, 2, 1.2, 1.4)
fitter = fitting.LevMarLSQFitter()
X = np.linspace(-1, 7, 200)
Y = np.linspace(-1, 7, 200)
x, y = np.meshgrid(X, Y)
z = gauss2d(x, y)
w = np.ones(x.size)
w.shape = x.shape
with NumpyRNGContext(1234567890):
n = np.random.randn(x.size)
n.shape = x.shape
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# 2D model with LevMarLSQFitter, fixed constraint
gauss2d.x_stddev.fixed = True
m = fitter(gauss2d, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
m = fitter(gauss2d, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, gauss2d.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False
p2 = models.Polynomial2D(1, c0_0=1, c1_0=1.2, c0_1=3.2)
z = p2(x, y)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
# Polynomial2D, col_fit_deriv=False, fixed constraint
p2.c1_0.fixed = True
m = fitter(p2, x, y, z + 2 * n, weights=w)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
m = fitter(p2, x, y, z + 2 * n, weights=None)
assert_allclose(m.parameters, p2.parameters, rtol=0.05)
def test_set_prior_posterior():
model = models.Polynomial1D(1)
model.c0.prior = models.Gaussian1D(2.3, 2, .1)
assert model.c0.prior(2) == 2.3
model.c0.posterior = models.Linear1D(1, .2)
assert model.c0.posterior(1) == 1.2
def test_set_constraints():
g = models.Gaussian1D()
p = models.Polynomial1D(1)
# Set bounds before model combination
g.stddev.bounds = (0, 3)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (0.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set bounds on the compound model
m.stddev_0.bounds = (1, 3)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1.0, 3.0),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter directly in the bounds dict
m.bounds['stddev_0'] = (4, 5)
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (4, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
# Set the bounds of a Parameter on the child model bounds dict
g.bounds['stddev'] = (1, 5)
m = g + p
assert m.bounds == {'amplitude_0': (None, None),
'mean_0': (None, None),
'stddev_0': (1, 5),
'c0_1': (None, None),
'c1_1': (None, None)}
|
21cb56c9a7e2ccaeb3e6e9d4845f779379037c18372bcf1219059e41eb52852c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.models import Gaussian1D, Identity, Mapping, Rotation2D, Shift, UnitsMapping
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
def test_swap_axes():
x = np.zeros((2, 3))
y = np.ones((2, 3))
mapping = Mapping((1, 0))
assert mapping(1, 2) == (2.0, 1.0)
assert mapping.inverse(2, 1) == (1, 2)
assert_array_equal(mapping(x, y), (y, x))
assert_array_equal(mapping.inverse(y, x), (x, y))
def test_duplicate_axes():
mapping = Mapping((0, 1, 0, 1))
assert mapping(1, 2) == (1.0, 2., 1., 2)
assert mapping.inverse(1, 2, 1, 2) == (1, 2)
assert mapping.inverse.n_inputs == 4
assert mapping.inverse.n_outputs == 2
def test_drop_axes_1():
mapping = Mapping((0,), n_inputs=2)
assert mapping(1, 2) == (1.)
def test_drop_axes_2():
mapping = Mapping((1, ))
assert mapping(1, 2) == (2.)
with pytest.raises(NotImplementedError):
mapping.inverse
def test_drop_axes_3():
mapping = Mapping((1,), n_inputs=2)
assert mapping.n_inputs == 2
rotation = Rotation2D(60)
model = rotation | mapping
assert_allclose(model(1, 2), 1.86602540378)
@pytest.mark.parametrize('name', [None, 'test_name'])
def test_bad_inputs(name):
mapping = Mapping((1, 0), name=name)
if name is None:
name = "Mapping"
x = [np.ones((2, 3))*idx for idx in range(5)]
for idx in range(1, 6):
if idx == 2:
continue
with pytest.raises(TypeError) as err:
mapping.evaluate(*x[:idx])
assert str(err.value) == \
f"{name} expects 2 inputs; got {idx}"
def test_identity():
x = np.zeros((2, 3))
y = np.ones((2, 3))
ident1 = Identity(1)
shift = Shift(1)
rotation = Rotation2D(angle=60)
model = ident1 & shift | rotation
assert_allclose(model(1, 2), (-2.098076211353316, 2.3660254037844393))
res_x, res_y = model(x, y)
assert_allclose((res_x, res_y),
(np.array([[-1.73205081, -1.73205081, -1.73205081],
[-1.73205081, -1.73205081, -1.73205081]]),
np.array([[1., 1., 1.],
[1., 1., 1.]])))
assert_allclose(model.inverse(res_x, res_y), (x, y), atol=1.e-10)
# https://github.com/astropy/astropy/pull/6018
@pytest.mark.skipif('not HAS_SCIPY')
def test_fittable_compound():
m = Identity(1) | Mapping((0, )) | Gaussian1D(1, 5, 4)
x = np.arange(10)
y_real = m(x)
dy = 0.005
with NumpyRNGContext(1234567):
n = np.random.normal(0., dy, x.shape)
y_noisy = y_real + n
pfit = LevMarLSQFitter()
new_model = pfit(m, x, y_noisy)
y_fit = new_model(x)
assert_allclose(y_fit, y_real, atol=dy)
def test_identity_repr():
m = Identity(1, name='foo')
assert repr(m) == "<Identity(1, name='foo')>"
m = Identity(1)
assert repr(m) == "<Identity(1)>"
def test_mapping_repr():
m = Mapping([0, 1], name='foo')
assert repr(m) == "<Mapping([0, 1], name='foo')>"
m = Mapping([0, 1])
assert repr(m) == "<Mapping([0, 1])>"
class TestUnitsMapping:
def test___init__(self):
# Set values
model = UnitsMapping(((u.m, None),),
input_units_equivalencies='test_eqiv',
input_units_allow_dimensionless=True,
name='test')
assert model._mapping == ((u.m, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies == 'test_eqiv'
assert model.input_units_allow_dimensionless == {'x': True}
assert model.name == 'test'
assert model._input_units == {'x': u.m}
# Default values
model = UnitsMapping(((u.K, None),))
assert model._mapping == ((u.K, None),)
assert model._input_units_strict == {'x': True}
assert model.input_units_equivalencies is None
assert model.input_units_allow_dimensionless == {'x': False}
assert model.name is None
assert model._input_units == {'x': u.K}
# Error
with pytest.raises(ValueError) as err:
UnitsMapping(((u.m, None), (u.m, u.K)))
assert str(err.value) == \
"If one return unit is None, then all must be None"
def test_evaluate(self):
model = UnitsMapping(((u.m, None),))
assert model(10*u.m) == 10
model = UnitsMapping(((u.m, u.K),))
assert model(10*u.m) == 10 * u.K
model = UnitsMapping(((u.m, None), (u.K, None)),)
assert model(10*u.m, 20*u.K) == (10, 20)
model = UnitsMapping(((u.m, u.K), (u.K, u.m)),)
assert model(10*u.m, 20*u.K) == (10*u.K, 20*u.m)
def test_repr(self):
model = UnitsMapping(((u.m, None),), name='foo')
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),), name='foo')>"
model = UnitsMapping(((u.m, None),))
assert repr(model) == f"<UnitsMapping((({repr(u.m)}, None),))>"
|
ac5a57825aa3d045dfdd70a64ddc4eb14eef3c0c0f5aabfc3f900357019082d6 | # Various tests of models not related to evaluation, fitting, or parameters
# pylint: disable=invalid-name, no-member
import warnings
import pytest
from astropy import units as u
from astropy.modeling import models
from astropy.modeling.core import _ModelMeta
from astropy.modeling.models import Gaussian1D, Mapping, Pix2Sky_TAN
from astropy.tests.helper import assert_quantity_allclose
def test_gaussian1d_bounding_box():
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
bbox = g.bounding_box.bounding_box()
assert_quantity_allclose(bbox[0], 2.835 * u.m)
assert_quantity_allclose(bbox[1], 3.165 * u.m)
def test_gaussian1d_n_models():
g = Gaussian1D(
amplitude=[1 * u.J, 2. * u.J],
mean=[1 * u.m, 5000 * u.AA],
stddev=[0.1 * u.m, 100 * u.AA],
n_models=2)
assert_quantity_allclose(g(1.01 * u.m), [0.99501248, 0.] * u.J)
assert_quantity_allclose(
g(u.Quantity([1.01 * u.m, 5010 * u.AA])), [0.99501248, 1.990025] * u.J)
# FIXME: The following doesn't work as np.asanyarray doesn't work with a
# list of quantity objects.
# assert_quantity_allclose(g([1.01 * u.m, 5010 * u.AA]),
# [ 0.99501248, 1.990025] * u.J)
"""
Test the "rules" of model units.
"""
def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
with pytest.raises(u.UnitsError):
g(10)
def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10)
def test_default_parameters():
# Test that calling with a quantity works when one of the parameters
# defaults to dimensionless
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm)
assert isinstance(g, Gaussian1D)
g(10*u.m)
def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity
def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity
def test_uses_quantity_no_param():
comp = Mapping((0, 1)) | Pix2Sky_TAN()
assert comp.uses_quantity
def _allmodels():
allmodels = []
for name in dir(models):
model = getattr(models, name)
if type(model) is _ModelMeta:
try:
m = model()
except Exception:
pass
allmodels.append(m)
return allmodels
@pytest.mark.parametrize("m", _allmodels())
def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {}
|
b120103ca5561cb5f75a15034382e469d0631b2fd929738ff2ec310fc99e6cd5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name, no-member
import numpy as np
import pytest
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.modeling.core import fix_inputs
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.modeling.functional_models import (AiryDisk2D, ArcCosine1D, ArcSine1D, ArcTangent1D,
Box1D, Box2D, Const1D, Const2D, Cosine1D, Disk2D,
Ellipse2D, Exponential1D, Gaussian1D, Gaussian2D,
KingProjectedAnalytic1D, Linear1D, Logarithmic1D,
Lorentz1D, Moffat1D, Moffat2D, Multiply, Planar2D,
RickerWavelet1D, RickerWavelet2D, Ring2D, Scale,
Sersic1D, Sersic2D, Sine1D, Tangent1D, Trapezoid1D,
TrapezoidDisk2D, Voigt1D)
from astropy.modeling.parameters import InputParameterError
from astropy.modeling.physical_models import Drude1D, Plummer1D
from astropy.modeling.polynomial import Polynomial1D, Polynomial2D
from astropy.modeling.powerlaws import (BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D,
LogParabola1D, PowerLaw1D, SmoothlyBrokenPowerLaw1D)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
FUNC_MODELS_1D = [
{'class': Gaussian1D,
'parameters': {'amplitude': 3 * u.Jy, 'mean': 2 * u.m, 'stddev': 30 * u.cm},
'evaluation': [(2600 * u.mm, 3 * u.Jy * np.exp(-2))],
'bounding_box': [0.35, 3.65] * u.m},
{'class': Sersic1D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'r_eff': 2 * u.arcsec, 'n': 4},
'evaluation': [(3 * u.arcsec, 1.3237148119468918 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Sine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Cosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': False},
{'class': Tangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(1 * u.s, -3 * u.km / u.s)],
'bounding_box': [-4, 0] / u.Hz},
{'class': ArcSine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s},
{'class': ArcCosine1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.25 * u.Hz, 'phase': 0.5},
'evaluation': [(0 * u.km / u.s, -1 * u.s)],
'bounding_box': [-3, 3] * u.km / u.s},
{'class': ArcTangent1D,
'parameters': {'amplitude': 3 * u.km / u.s, 'frequency': 0.125 * u.Hz, 'phase': 0.25},
'evaluation': [(0 * u.km / u.s, -2 * u.s)],
'bounding_box': False},
{'class': Linear1D,
'parameters': {'slope': 3 * u.km / u.s, 'intercept': 5000 * u.m},
'evaluation': [(6000 * u.ms, 23 * u.km)],
'bounding_box': False},
{'class': Lorentz1D,
'parameters': {'amplitude': 2 * u.Jy, 'x_0': 505 * u.nm, 'fwhm': 100 * u.AA},
'evaluation': [(0.51 * u.micron, 1 * u.Jy)],
'bounding_box': [255, 755] * u.nm},
{'class': Voigt1D,
'parameters': {'amplitude_L': 2 * u.Jy, 'x_0': 505 * u.nm,
'fwhm_L': 100 * u.AA, 'fwhm_G': 50 * u.AA},
'evaluation': [(0.51 * u.micron, 1.0621795524 * u.Jy)],
'bounding_box': False},
{'class': Const1D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 3 * u.Jy)],
'bounding_box': False},
{'class': Box1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.9, 4.9] * u.um},
{'class': Trapezoid1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'width': 1 * u.um, 'slope': 5 * u.Jy / u.um},
'evaluation': [(4200 * u.nm, 3 * u.Jy), (1 * u.m, 0 * u.Jy)],
'bounding_box': [3.3, 5.5] * u.um},
{'class': RickerWavelet1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'sigma': 1e-3 * u.mm},
'evaluation': [(1000 * u.nm, -0.09785050 * u.Jy)],
'bounding_box': [-5.6, 14.4] * u.um},
{'class': Moffat1D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 0.238853503 * u.Jy)],
'bounding_box': False},
{'class': KingProjectedAnalytic1D,
'parameters': {'amplitude': 1. * u.Msun/u.pc**2, 'r_core': 1. * u.pc, 'r_tide': 2. * u.pc},
'evaluation': [(0.5 * u.pc, 0.2 * u.Msun/u.pc**2)],
'bounding_box': [0. * u.pc, 2. * u.pc]},
{'class': Logarithmic1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 3.4657359027997265 * u.m)],
'bounding_box': False},
{'class': Exponential1D,
'parameters': {'amplitude': 5*u.m, 'tau': 2 * u.m},
'evaluation': [(4 * u.m, 36.945280494653254 * u.m)],
'bounding_box': False}
]
SCALE_MODELS = [
{'class': Scale,
'parameters': {'factor': 2*u.m},
'evaluation': [(1*u.m, 2*u.m)],
'bounding_box': False},
{'class': Multiply,
'parameters': {'factor': 2*u.m},
'evaluation': [(1 * u.m/u.m, 2*u.m)],
'bounding_box': False},
]
PHYS_MODELS_1D = [
{'class': Plummer1D,
'parameters': {'mass': 3 * u.kg, 'r_plum': 0.5 * u.m},
'evaluation': [(1* u.m, 0.10249381 * u.kg / (u.m **3))],
'bounding_box': False},
{'class': Drude1D,
'parameters': {'amplitude': 1.0 * u.m, 'x_0': 2175. * u.AA, 'fwhm': 400. * u.AA},
'evaluation': [(2000*u.AA, 0.5452317018423869 * u.m)],
'bounding_box': [-17825, 22175] * u.AA},
]
FUNC_MODELS_2D = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-13.02230366, 15.02230366],
[-12.02230366, 16.02230366]] * u.m},
{'class': Const2D,
'parameters': {'amplitude': 3 * u.Jy},
'evaluation': [(0.6 * u.micron, 0.2 * u.m, 3 * u.Jy)],
'bounding_box': False},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': Box2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.s,
'x_width': 4 * u.cm, 'y_width': 3 * u.s},
'evaluation': [(301 * u.cm, 3 * u.s, 3 * u.Jy)],
'bounding_box': [[0.5 * u.s, 3.5 * u.s], [2.98 * u.m, 3.02 * u.m]]},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
{'class': Planar2D,
'parameters': {'slope_x': 2*u.m, 'slope_y': 3*u.m, 'intercept': 4*u.m},
'evaluation': [(5*u.m/u.m, 6*u.m/u.m, 32*u.m)],
'bounding_box': False},
]
POWERLAW_MODELS = [
{'class': PowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1},
'evaluation': [(1 * u.m, 500 * u.g)],
'bounding_box': False},
{'class': BrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1},
'evaluation': [(1 * u.m, 50 * u.kg), (1 * u.cm, 50 * u.kg)],
'bounding_box': False},
{'class': SmoothlyBrokenPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_break': 10 * u.cm, 'alpha_1': 1, 'alpha_2': -1, 'delta': 1},
'evaluation': [(1 * u.cm, 15.125 * u.kg), (1 * u.m, 15.125 * u.kg)],
'bounding_box': False},
{'class': ExponentialCutoffPowerLaw1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'x_cutoff': 1 * u.m},
'evaluation': [(1 * u.um, 499999.5 * u.kg), (10 * u.m, 50 * np.exp(-10) * u.g)],
'bounding_box': False},
{'class': LogParabola1D,
'parameters': {'amplitude': 5 * u.kg, 'x_0': 10 * u.cm, 'alpha': 1, 'beta': 2},
'evaluation': [(1 * u.cm, 5 * 0.1 ** (-1 - 2 * np.log(0.1)) * u.kg)],
'bounding_box': False}
]
POLY_MODELS = [
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.one, 'c1': 2 / u.m, 'c2': 3 / u.m**2},
'evaluation': [(3 * u.m, 36 * u.one)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg / u.m, 'c2': 3 * u.kg / u.m**2},
'evaluation': [(3 * u.m, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial1D,
'parameters': {'degree': 2, 'c0': 3 * u.kg, 'c1': 2 * u.kg, 'c2': 3 * u.kg},
'evaluation': [(3 * u.one, 36 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.one, 'c1_0': 2 / u.m, 'c2_0': 3 / u.m**2,
'c0_1': 3 / u.s, 'c0_2': -2 / u.s**2, 'c1_1': 5 / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.one)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg / u.m, 'c2_0': 3 * u.kg / u.m**2,
'c0_1': 3 * u.kg / u.s, 'c0_2': -2 * u.kg / u.s**2, 'c1_1': 5 * u.kg / u.m / u.s},
'evaluation': [(3 * u.m, 2 * u.s, 64 * u.kg)],
'bounding_box': False},
{'class': Polynomial2D,
'parameters': {'degree': 2, 'c0_0': 3 * u.kg, 'c1_0': 2 * u.kg, 'c2_0': 3 * u.kg,
'c0_1': 3 * u.kg, 'c0_2': -2 * u.kg, 'c1_1': 5 * u.kg},
'evaluation': [(3 * u.one, 2 * u.one, 64 * u.kg)],
'bounding_box': False},
]
MODELS = FUNC_MODELS_1D + SCALE_MODELS + FUNC_MODELS_2D + POWERLAW_MODELS +\
PHYS_MODELS_1D + POLY_MODELS
SCIPY_MODELS = set([Sersic1D, Sersic2D, AiryDisk2D])
# These models will fail fitting test, because built in fitting data
# will produce non-finite values
NON_FINITE_MODELS = [
Sersic1D,
PowerLaw1D,
ExponentialCutoffPowerLaw1D,
LogParabola1D
]
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_without_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
mnu = m.without_units_for_data(**kwargs)
args = [x.value for x in kwargs.values()]
assert_quantity_allclose(mnu(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
assert_quantity_allclose(m(*args[:-1]), args[-1])
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_x_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
@pytest.mark.parametrize('model', MODELS)
def test_models_evaluate_with_units_param_array(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
params = {}
for key, value in model['parameters'].items():
if value is None or key == 'degree':
params[key] = value
else:
params[key] = np.repeat(value, 2)
params['n_models'] = 2
m = model['class'](**params)
for args in model['evaluation']:
if len(args) == 2:
x, y = args
x_arr = u.Quantity([x, x])
result = m(x_arr)
assert_quantity_allclose(result, u.Quantity([y, y]))
else:
x, y, z = args
x_arr = u.Quantity([x, x])
y_arr = u.Quantity([y, y])
result = m(x_arr, y_arr)
assert_quantity_allclose(result, u.Quantity([z, z]))
if model['class'] == Drude1D:
params['x_0'][-1] = 0 * u.AA
with pytest.raises(InputParameterError) as err:
model['class'](**params)
assert str(err.value) ==\
'0 is not an allowed value for x_0'
@pytest.mark.parametrize('model', MODELS)
def test_models_bounding_box(model):
# In some cases, having units in parameters caused bounding_box to break,
# so this is to ensure that it works correctly.
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
m = model['class'](**model['parameters'])
# In the following we need to explicitly test that the value is False
# since Quantities no longer evaluate as as True
if model['bounding_box'] is False:
# Check that NotImplementedError is raised, so that if bounding_box is
# implemented we remember to set bounding_box=True in the list of models
# above
with pytest.raises(NotImplementedError):
m.bounding_box
else:
# A bounding box may have inhomogeneous units so we need to check the
# values one by one.
for i in range(len(model['bounding_box'])):
bbox = m.bounding_box
if isinstance(bbox, ModelBoundingBox):
bbox = bbox.bounding_box()
assert_quantity_allclose(bbox[i], model['bounding_box'][i])
@pytest.mark.parametrize('model', MODELS)
def test_compound_model_input_units_equivalencies_defaults(model):
m = model['class'](**model['parameters'])
assert m.input_units_equivalencies is None
compound_model = m + m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m - m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
compound_model = m & m
assert compound_model.inputs_map()['x1'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x0':1})
assert fixed_input_model.inputs_map()['x1'][0].input_units_equivalencies is None
assert fixed_input_model.input_units_equivalencies is None
if m.n_outputs == m.n_inputs:
compound_model = m | m
assert compound_model.inputs_map()['x'][0].input_units_equivalencies is None
fixed_input_model = fix_inputs(compound_model, {'x':1})
assert fixed_input_model.input_units_equivalencies is None
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings(r'ignore:.*:RuntimeWarning')
@pytest.mark.filterwarnings(r'ignore:Model is linear in parameters.*')
@pytest.mark.filterwarnings(r'ignore:The fit may be unsuccessful.*')
@pytest.mark.parametrize('model', MODELS)
def test_models_fitting(model):
if model['class'] in NON_FINITE_MODELS:
return
m = model['class'](**model['parameters'])
if len(model['evaluation'][0]) == 2:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.exp(-x.value ** 2) * model['evaluation'][0][1].unit
args = [x, y]
else:
x = np.linspace(1, 3, 100) * model['evaluation'][0][0].unit
y = np.linspace(1, 3, 100) * model['evaluation'][0][1].unit
z = np.exp(-x.value**2 - y.value**2) * model['evaluation'][0][2].unit
args = [x, y, z]
# Test that the model fits even if it has units on parameters
fitter = LevMarLSQFitter()
m_new = fitter(m, *args)
# Check that units have been put back correctly
for param_name in m.param_names:
par_bef = getattr(m, param_name)
par_aft = getattr(m_new, param_name)
if par_bef.unit is None:
# If the parameter used to not have a unit then had a radian unit
# for example, then we should allow that
assert par_aft.unit is None or par_aft.unit is u.rad
else:
assert par_aft.unit.is_equivalent(par_bef.unit)
unit_mismatch_models = [
{'class': Gaussian2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_mean': 2 * u.m, 'y_mean': 1 * u.m,
'x_stddev': 3 * u.m, 'y_stddev': 2 * u.m, 'theta': 45 * u.deg},
'evaluation': [(412.1320343 * u.cm, 3.121320343 * u.K, 3 * u.Jy * np.exp(-0.5)),
(412.1320343 * u.K, 3.121320343 * u.m, 3 * u.Jy * np.exp(-0.5))],
'bounding_box': [[-14.18257445, 16.18257445], [-10.75693665, 14.75693665]] * u.m},
{'class': Ellipse2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'a': 300 * u.cm, 'b': 200 * u.cm, 'theta': 45 * u.deg},
'evaluation': [(4 * u.m, 300 * u.K, 3 * u.Jy),
(4 * u.K, 300 * u.cm, 3 * u.Jy)],
'bounding_box': [[-0.76046808, 4.76046808], [0.68055697, 5.31944302]] * u.m},
{'class': Disk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'R_0': 300 * u.cm},
'evaluation': [(5.8 * u.m, 201 * u.K, 3 * u.Jy),
(5.8 * u.K, 201 * u.cm, 3 * u.Jy)],
'bounding_box': [[-1, 5], [0, 6]] * u.m},
{'class': Ring2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'r_in': 2 * u.cm, 'r_out': 2.1 * u.cm},
'evaluation': [(302.05 * u.cm, 2 * u.K + 10 * u.K, 3 * u.Jy),
(302.05 * u.K, 2 * u.m + 10 * u.um, 3 * u.Jy)],
'bounding_box': [[1.979, 2.021], [2.979, 3.021]] * u.m},
{'class': TrapezoidDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 1 * u.m, 'y_0': 2 * u.m,
'R_0': 100 * u.cm, 'slope': 1 * u.Jy / u.m},
'evaluation': [(3.5 * u.m, 2 * u.K, 1.5 * u.Jy),
(3.5 * u.K, 2 * u.m, 1.5 * u.Jy)],
'bounding_box': [[-2, 6], [-3, 5]] * u.m},
{'class': RickerWavelet2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'sigma': 1 * u.m},
'evaluation': [(4 * u.m, 2.5 * u.K, 0.602169107 * u.Jy),
(4 * u.K, 2.5 * u.m, 0.602169107 * u.Jy)],
'bounding_box': False},
{'class': AiryDisk2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 3 * u.m, 'y_0': 2 * u.m,
'radius': 1 * u.m},
'evaluation': [(4 * u.m, 2.1 * u.K, 4.76998480e-05 * u.Jy),
(4 * u.K, 2.1 * u.m, 4.76998480e-05 * u.Jy)],
'bounding_box': False},
{'class': Moffat2D,
'parameters': {'amplitude': 3 * u.Jy, 'x_0': 4.4 * u.um, 'y_0': 3.5 * u.um,
'gamma': 1e-3 * u.mm, 'alpha': 1},
'evaluation': [(1000 * u.nm, 2 * u.K, 0.202565833 * u.Jy),
(1000 * u.K, 2 * u.um, 0.202565833 * u.Jy)],
'bounding_box': False},
{'class': Sersic2D,
'parameters': {'amplitude': 3 * u.MJy / u.sr, 'x_0': 1 * u.arcsec,
'y_0': 2 * u.arcsec, 'r_eff': 2 * u.arcsec, 'n': 4,
'ellip': 0, 'theta': 0},
'evaluation': [(3 * u.arcsec, 2.5 * u.m, 2.829990489 * u.MJy/u.sr),
(3 * u.m, 2.5 * u.arcsec, 2.829990489 * u.MJy/u.sr)],
'bounding_box': False},
]
@pytest.mark.parametrize('model', unit_mismatch_models)
def test_input_unit_mismatch_error(model):
if not HAS_SCIPY and model['class'] in SCIPY_MODELS:
pytest.skip()
message = "Units of 'x' and 'y' inputs should match"
m = model['class'](**model['parameters'])
for args in model['evaluation']:
if len(args) == 2:
kwargs = dict(zip(('x', 'y'), args))
else:
kwargs = dict(zip(('x', 'y', 'z'), args))
if kwargs['x'].unit.is_equivalent(kwargs['y'].unit):
kwargs['x'] = kwargs['x'].to(kwargs['y'].unit)
with pytest.raises(u.UnitsError) as err:
m.without_units_for_data(**kwargs)
assert str(err.value) == message
|
fdf4236005bd1a784bfd5488459c94d8f487bb3613a2ddf7ee88f04461556f80 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.utils.iers package provides access to the tables provided by
the International Earth Rotation and Reference Systems Service, in
particular allowing interpolation of published UT1-UTC values for given
times. These are used in `astropy.time` to provide UT1 values. The polar
motions are also used for determining earth orientation for
celestial-to-terrestrial coordinate transformations
(in `astropy.coordinates`).
"""
import re
from datetime import datetime
from warnings import warn
from urllib.parse import urlparse
import numpy as np
import erfa
from astropy.time import Time, TimeDelta
from astropy import config as _config
from astropy import units as u
from astropy.table import QTable, MaskedColumn
from astropy.utils.data import (get_pkg_data_filename, clear_download_cache,
is_url_in_cache, get_readable_fileobj)
from astropy.utils.state import ScienceState
from astropy import utils
from astropy.utils.exceptions import AstropyWarning
__all__ = ['Conf', 'conf', 'earth_orientation_table',
'IERS', 'IERS_B', 'IERS_A', 'IERS_Auto',
'FROM_IERS_B', 'FROM_IERS_A', 'FROM_IERS_A_PREDICTION',
'TIME_BEFORE_IERS_RANGE', 'TIME_BEYOND_IERS_RANGE',
'IERS_A_FILE', 'IERS_A_URL', 'IERS_A_URL_MIRROR', 'IERS_A_README',
'IERS_B_FILE', 'IERS_B_URL', 'IERS_B_README',
'IERSRangeError', 'IERSStaleWarning',
'LeapSeconds', 'IERS_LEAP_SECOND_FILE', 'IERS_LEAP_SECOND_URL',
'IETF_LEAP_SECOND_URL']
# IERS-A default file name, URL, and ReadMe with content description
IERS_A_FILE = 'finals2000A.all'
IERS_A_URL = 'https://maia.usno.navy.mil/ser7/finals2000A.all'
IERS_A_URL_MIRROR = 'https://datacenter.iers.org/data/9/finals2000A.all'
IERS_A_README = get_pkg_data_filename('data/ReadMe.finals2000A')
# IERS-B default file name, URL, and ReadMe with content description
IERS_B_FILE = get_pkg_data_filename('data/eopc04_IAU2000.62-now')
IERS_B_URL = 'http://hpiers.obspm.fr/iers/eop/eopc04/eopc04_IAU2000.62-now'
IERS_B_README = get_pkg_data_filename('data/ReadMe.eopc04_IAU2000')
# LEAP SECONDS default file name, URL, and alternative format/URL
IERS_LEAP_SECOND_FILE = get_pkg_data_filename('data/Leap_Second.dat')
IERS_LEAP_SECOND_URL = 'https://hpiers.obspm.fr/iers/bul/bulc/Leap_Second.dat'
IETF_LEAP_SECOND_URL = 'https://www.ietf.org/timezones/data/leap-seconds.list'
# Status/source values returned by IERS.ut1_utc
FROM_IERS_B = 0
FROM_IERS_A = 1
FROM_IERS_A_PREDICTION = 2
TIME_BEFORE_IERS_RANGE = -1
TIME_BEYOND_IERS_RANGE = -2
MJD_ZERO = 2400000.5
INTERPOLATE_ERROR = """\
interpolating from IERS_Auto using predictive values that are more
than {0} days old.
Normally you should not see this error because this class
automatically downloads the latest IERS-A table. Perhaps you are
offline? If you understand what you are doing then this error can be
suppressed by setting the auto_max_age configuration variable to
``None``:
from astropy.utils.iers import conf
conf.auto_max_age = None
"""
MONTH_ABBR = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec']
def download_file(*args, **kwargs):
"""
Overload astropy.utils.data.download_file within iers module to use a
custom (longer) wait time. This just passes through ``*args`` and
``**kwargs`` after temporarily setting the download_file remote timeout to
the local ``iers.conf.remote_timeout`` value.
"""
kwargs.setdefault('http_headers', {'User-Agent': 'astropy/iers',
'Accept': '*/*'})
with utils.data.conf.set_temp('remote_timeout', conf.remote_timeout):
return utils.data.download_file(*args, **kwargs)
def _none_to_float(value):
"""
Convert None to a valid floating point value. Especially
for auto_max_age = None.
"""
return (value if value is not None else np.finfo(float).max)
class IERSStaleWarning(AstropyWarning):
pass
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.utils.iers`.
"""
auto_download = _config.ConfigItem(
True,
'Enable auto-downloading of the latest IERS data. If set to False '
'then the local IERS-B file will be used by default (even if the '
'full IERS file with predictions was already downloaded and cached). '
'This parameter also controls whether internet resources will be '
'queried to update the leap second table if the installed version is '
'out of date. Default is True.')
auto_max_age = _config.ConfigItem(
30.0,
'Maximum age (days) of predictive data before auto-downloading. '
'See "Auto refresh behavior" in astropy.utils.iers documentation for details. '
'Default is 30.')
iers_auto_url = _config.ConfigItem(
IERS_A_URL,
'URL for auto-downloading IERS file data.')
iers_auto_url_mirror = _config.ConfigItem(
IERS_A_URL_MIRROR,
'Mirror URL for auto-downloading IERS file data.')
remote_timeout = _config.ConfigItem(
10.0,
'Remote timeout downloading IERS file data (seconds).')
system_leap_second_file = _config.ConfigItem(
'',
'System file with leap seconds.')
iers_leap_second_auto_url = _config.ConfigItem(
IERS_LEAP_SECOND_URL,
'URL for auto-downloading leap seconds.')
ietf_leap_second_auto_url = _config.ConfigItem(
IETF_LEAP_SECOND_URL,
'Alternate URL for auto-downloading leap seconds.')
conf = Conf()
class IERSRangeError(IndexError):
"""
Any error for when dates are outside of the valid range for IERS
"""
class IERS(QTable):
"""Generic IERS table class, defining interpolation functions.
Sub-classed from `astropy.table.QTable`. The table should hold columns
'MJD', 'UT1_UTC', 'dX_2000A'/'dY_2000A', and 'PM_x'/'PM_y'.
"""
iers_table = None
"""Cached table, returned if ``open`` is called without arguments."""
@classmethod
def open(cls, file=None, cache=False, **kwargs):
"""Open an IERS table, reading it from a file if not loaded before.
Parameters
----------
file : str or None
full local or network path to the ascii file holding IERS data,
for passing on to the ``read`` class methods (further optional
arguments that are available for some IERS subclasses can be added).
If None, use the default location from the ``read`` class method.
cache : bool
Whether to use cache. Defaults to False, since IERS files
are regularly updated.
Returns
-------
IERS
An IERS table class instance
Notes
-----
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table if ``file=None`` (the default).
If a table needs to be re-read from disk, pass on an explicit file
location or use the (sub-class) close method and re-open.
If the location is a network location it is first downloaded via
download_file.
For the IERS class itself, an IERS_B sub-class instance is opened.
"""
if file is not None or cls.iers_table is None:
if file is not None:
if urlparse(file).netloc:
kwargs.update(file=download_file(file, cache=cache))
else:
kwargs.update(file=file)
# TODO: the below is really ugly and probably a bad idea. Instead,
# there should probably be an IERSBase class, which provides
# useful methods but cannot really be used on its own, and then
# *perhaps* an IERS class which provides best defaults. But for
# backwards compatibility, we use the IERS_B reader for IERS here.
if cls is IERS:
cls.iers_table = IERS_B.read(**kwargs)
else:
cls.iers_table = cls.read(**kwargs)
return cls.iers_table
@classmethod
def close(cls):
"""Remove the IERS table from the class.
This allows the table to be re-read from disk during one's session
(e.g., if one finds it is out of date and has updated the file).
"""
cls.iers_table = None
def mjd_utc(self, jd1, jd2=0.):
"""Turn a time to MJD, returning integer and fractional parts.
Parameters
----------
jd1 : float, array, or `~astropy.time.Time`
first part of two-part JD, or Time object
jd2 : float or array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
Returns
-------
mjd : float or array
integer part of MJD
utc : float or array
fractional part of MJD
"""
try: # see if this is a Time object
jd1, jd2 = jd1.utc.jd1, jd1.utc.jd2
except Exception:
pass
mjd = np.floor(jd1 - MJD_ZERO + jd2)
utc = jd1 - (MJD_ZERO+mjd) + jd2
return mjd, utc
def ut1_utc(self, jd1, jd2=0., return_status=False):
"""Interpolate UT1-UTC corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['UT1_UTC'],
self.ut1_utc_source if return_status else None)
def dcip_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate CIP corrections in IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD (default 0., ignored if jd1 is Time)
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
D_x : `~astropy.units.Quantity` ['angle']
x component of CIP correction for the requested times.
D_y : `~astropy.units.Quantity` ['angle']
y component of CIP correction for the requested times
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['dX_2000A', 'dY_2000A'],
self.dcip_source if return_status else None)
def pm_xy(self, jd1, jd2=0., return_status=False):
"""Interpolate polar motions from IERS Table for given dates.
Parameters
----------
jd1 : float, array of float, or `~astropy.time.Time` object
first part of two-part JD, or Time object
jd2 : float or float array, optional
second part of two-part JD.
Default is 0., ignored if jd1 is `~astropy.time.Time`.
return_status : bool
Whether to return status values. If False (default),
raise ``IERSRangeError`` if any time is out of the range covered
by the IERS table.
Returns
-------
PM_x : `~astropy.units.Quantity` ['angle']
x component of polar motion for the requested times.
PM_y : `~astropy.units.Quantity` ['angle']
y component of polar motion for the requested times.
status : int or int array
Status values (if ``return_status``=``True``)::
``iers.FROM_IERS_B``
``iers.FROM_IERS_A``
``iers.FROM_IERS_A_PREDICTION``
``iers.TIME_BEFORE_IERS_RANGE``
``iers.TIME_BEYOND_IERS_RANGE``
"""
return self._interpolate(jd1, jd2, ['PM_x', 'PM_y'],
self.pm_source if return_status else None)
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""
Check that the indices from interpolation match those after clipping
to the valid table range. This method gets overridden in the IERS_Auto
class because it has different requirements.
"""
if np.any(indices_orig != indices_clipped):
raise IERSRangeError('(some) times are outside of range covered '
'by IERS table.')
def _interpolate(self, jd1, jd2, columns, source=None):
mjd, utc = self.mjd_utc(jd1, jd2)
# enforce array
is_scalar = not hasattr(mjd, '__array__') or mjd.ndim == 0
if is_scalar:
mjd = np.array([mjd])
utc = np.array([utc])
elif mjd.size == 0:
# Short-cut empty input.
return np.array([])
self._refresh_table_as_needed(mjd)
# For typical format, will always find a match (since MJD are integer)
# hence, important to define which side we will be; this ensures
# self['MJD'][i-1]<=mjd<self['MJD'][i]
i = np.searchsorted(self['MJD'].value, mjd, side='right')
# Get index to MJD at or just below given mjd, clipping to ensure we
# stay in range of table (status will be set below for those outside)
i1 = np.clip(i, 1, len(self) - 1)
i0 = i1 - 1
mjd_0, mjd_1 = self['MJD'][i0].value, self['MJD'][i1].value
results = []
for column in columns:
val_0, val_1 = self[column][i0], self[column][i1]
d_val = val_1 - val_0
if column == 'UT1_UTC':
# Check & correct for possible leap second (correcting diff.,
# not 1st point, since jump can only happen right at 2nd point)
d_val -= d_val.round()
# Linearly interpolate (which is what TEMPO does for UT1-UTC, but
# may want to follow IERS gazette #13 for more precise
# interpolation and correction for tidal effects;
# https://maia.usno.navy.mil/iers-gaz13)
val = val_0 + (mjd - mjd_0 + utc) / (mjd_1 - mjd_0) * d_val
# Do not extrapolate outside range, instead just propagate last values.
val[i == 0] = self[column][0]
val[i == len(self)] = self[column][-1]
if is_scalar:
val = val[0]
results.append(val)
if source:
# Set status to source, using the routine passed in.
status = source(i1)
# Check for out of range
status[i == 0] = TIME_BEFORE_IERS_RANGE
status[i == len(self)] = TIME_BEYOND_IERS_RANGE
if is_scalar:
status = status[0]
results.append(status)
return results
else:
self._check_interpolate_indices(i1, i, np.max(mjd))
return results[0] if len(results) == 1 else results
def _refresh_table_as_needed(self, mjd):
"""
Potentially update the IERS table in place depending on the requested
time values in ``mdj`` and the time span of the table. The base behavior
is not to update the table. ``IERS_Auto`` overrides this method.
"""
pass
def ut1_utc_source(self, i):
"""Source for UT1-UTC. To be overridden by subclass."""
return np.zeros_like(i)
def dcip_source(self, i):
"""Source for CIP correction. To be overridden by subclass."""
return np.zeros_like(i)
def pm_source(self, i):
"""Source for polar motion. To be overridden by subclass."""
return np.zeros_like(i)
@property
def time_now(self):
"""
Property to provide the current time, but also allow for explicitly setting
the _time_now attribute for testing purposes.
"""
try:
return self._time_now
except Exception:
return Time.now()
def _convert_col_for_table(self, col):
# Fill masked columns with units to avoid dropped-mask warnings
# when converting to Quantity.
# TODO: Once we support masked quantities, we can drop this and
# in the code below replace b_bad with table['UT1_UTC_B'].mask, etc.
if (getattr(col, 'unit', None) is not None and
isinstance(col, MaskedColumn)):
col = col.filled(np.nan)
return super()._convert_col_for_table(col)
class IERS_A(IERS):
"""IERS Table class targeted to IERS A, provided by USNO.
These include rapid turnaround and predicted times.
See https://datacenter.iers.org/eop.php
Notes
-----
The IERS A file is not part of astropy. It can be downloaded from
``iers.IERS_A_URL`` or ``iers.IERS_A_URL_MIRROR``. See ``iers.__doc__``
for instructions on use in ``Time``, etc.
"""
iers_table = None
@classmethod
def _combine_a_b_columns(cls, iers_a):
"""
Return a new table with appropriate combination of IERS_A and B columns.
"""
# IERS A has some rows at the end that hold nothing but dates & MJD
# presumably to be filled later. Exclude those a priori -- there
# should at least be a predicted UT1-UTC and PM!
table = iers_a[np.isfinite(iers_a['UT1_UTC_A']) &
(iers_a['PolPMFlag_A'] != '')]
# This does nothing for IERS_A, but allows IERS_Auto to ensure the
# IERS B values in the table are consistent with the true ones.
table = cls._substitute_iers_b(table)
# Combine A and B columns, using B where possible.
b_bad = np.isnan(table['UT1_UTC_B'])
table['UT1_UTC'] = np.where(b_bad, table['UT1_UTC_A'], table['UT1_UTC_B'])
table['UT1Flag'] = np.where(b_bad, table['UT1Flag_A'], 'B')
# Repeat for polar motions.
b_bad = np.isnan(table['PM_X_B']) | np.isnan(table['PM_Y_B'])
table['PM_x'] = np.where(b_bad, table['PM_x_A'], table['PM_X_B'])
table['PM_y'] = np.where(b_bad, table['PM_y_A'], table['PM_Y_B'])
table['PolPMFlag'] = np.where(b_bad, table['PolPMFlag_A'], 'B')
b_bad = np.isnan(table['dX_2000A_B']) | np.isnan(table['dY_2000A_B'])
table['dX_2000A'] = np.where(b_bad, table['dX_2000A_A'], table['dX_2000A_B'])
table['dY_2000A'] = np.where(b_bad, table['dY_2000A_A'], table['dY_2000A_B'])
table['NutFlag'] = np.where(b_bad, table['NutFlag_A'], 'B')
# Get the table index for the first row that has predictive values
# PolPMFlag_A IERS (I) or Prediction (P) flag for
# Bull. A polar motion values
# UT1Flag_A IERS (I) or Prediction (P) flag for
# Bull. A UT1-UTC values
# Since only 'P' and 'I' are possible and 'P' is guaranteed to come
# after 'I', we can use searchsorted for 100 times speed up over
# finding the first index where the flag equals 'P'.
p_index = min(np.searchsorted(table['UT1Flag_A'], 'P'),
np.searchsorted(table['PolPMFlag_A'], 'P'))
table.meta['predictive_index'] = p_index
table.meta['predictive_mjd'] = table['MJD'][p_index].value
return table
@classmethod
def _substitute_iers_b(cls, table):
# See documentation in IERS_Auto.
return table
@classmethod
def read(cls, file=None, readme=None):
"""Read IERS-A table from a finals2000a.* file provided by USNO.
Parameters
----------
file : str
full path to ascii file holding IERS-A data.
Defaults to ``iers.IERS_A_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_A_README``.
Returns
-------
``IERS_A`` class instance
"""
if file is None:
file = IERS_A_FILE
if readme is None:
readme = IERS_A_README
iers_a = super().read(file, format='cds', readme=readme)
# Combine the A and B data for UT1-UTC and PM columns
table = cls._combine_a_b_columns(iers_a)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
ut1flag = self['UT1Flag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[ut1flag == 'I'] = FROM_IERS_A
source[ut1flag == 'P'] = FROM_IERS_A_PREDICTION
return source
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
nutflag = self['NutFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[nutflag == 'I'] = FROM_IERS_A
source[nutflag == 'P'] = FROM_IERS_A_PREDICTION
return source
def pm_source(self, i):
"""Set polar motion source flag for entries in IERS table"""
pmflag = self['PolPMFlag'][i]
source = np.ones_like(i) * FROM_IERS_B
source[pmflag == 'I'] = FROM_IERS_A
source[pmflag == 'P'] = FROM_IERS_A_PREDICTION
return source
class IERS_B(IERS):
"""IERS Table class targeted to IERS B, provided by IERS itself.
These are final values; see https://www.iers.org/IERS/EN/Home/home_node.html
Notes
-----
If the package IERS B file (```iers.IERS_B_FILE``) is out of date, a new
version can be downloaded from ``iers.IERS_B_URL``.
"""
iers_table = None
@classmethod
def read(cls, file=None, readme=None, data_start=14):
"""Read IERS-B table from a eopc04_iau2000.* file provided by IERS.
Parameters
----------
file : str
full path to ascii file holding IERS-B data.
Defaults to package version, ``iers.IERS_B_FILE``.
readme : str
full path to ascii file holding CDS-style readme.
Defaults to package version, ``iers.IERS_B_README``.
data_start : int
starting row. Default is 14, appropriate for standard IERS files.
Returns
-------
``IERS_B`` class instance
"""
if file is None:
file = IERS_B_FILE
if readme is None:
readme = IERS_B_README
table = super().read(file, format='cds', readme=readme,
data_start=data_start)
table.meta['data_path'] = file
table.meta['readme_path'] = readme
return table
def ut1_utc_source(self, i):
"""Set UT1-UTC source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def dcip_source(self, i):
"""Set CIP correction source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
def pm_source(self, i):
"""Set PM source flag for entries in IERS table"""
return np.ones_like(i) * FROM_IERS_B
class IERS_Auto(IERS_A):
"""
Provide most-recent IERS data and automatically handle downloading
of updated values as necessary.
"""
iers_table = None
@classmethod
def open(cls):
"""If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to True (default), then open a recent version of the IERS-A
table with predictions for UT1-UTC and polar motion out to
approximately one year from now. If the available version of this file
is older than ``astropy.utils.iers.conf.auto_max_age`` days old
(or non-existent) then it will be downloaded over the network and cached.
If the configuration setting ``astropy.utils.iers.conf.auto_download``
is set to False then ``astropy.utils.iers.IERS()`` is returned. This
is normally the IERS-B table that is supplied with astropy.
On the first call in a session, the table will be memoized (in the
``iers_table`` class attribute), and further calls to ``open`` will
return this stored table.
Returns
-------
`~astropy.table.QTable` instance
With IERS (Earth rotation) data columns
"""
if not conf.auto_download:
cls.iers_table = IERS_B.open()
return cls.iers_table
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
if cls.iers_table is not None:
# If the URL has changed, we need to redownload the file, so we
# should ignore the internally cached version.
if cls.iers_table.meta.get('data_url') in all_urls:
return cls.iers_table
try:
filename = download_file(all_urls[0], sources=all_urls, cache=True)
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning(
f'failed to download {" and ".join(all_urls)}, '
f'using local IERS-B: {err}'))
cls.iers_table = IERS_B.open()
return cls.iers_table
cls.iers_table = cls.read(file=filename)
cls.iers_table.meta['data_url'] = all_urls[0]
return cls.iers_table
def _check_interpolate_indices(self, indices_orig, indices_clipped, max_input_mjd):
"""Check that the indices from interpolation match those after clipping to the
valid table range. The IERS_Auto class is exempted as long as it has
sufficiently recent available data so the clipped interpolation is
always within the confidence bounds of current Earth rotation
knowledge.
"""
predictive_mjd = self.meta['predictive_mjd']
# See explanation in _refresh_table_as_needed for these conditions
auto_max_age = _none_to_float(conf.auto_max_age)
if (max_input_mjd > predictive_mjd and
self.time_now.mjd - predictive_mjd > auto_max_age):
raise ValueError(INTERPOLATE_ERROR.format(auto_max_age))
def _refresh_table_as_needed(self, mjd):
"""Potentially update the IERS table in place depending on the requested
time values in ``mjd`` and the time span of the table.
For IERS_Auto the behavior is that the table is refreshed from the IERS
server if both the following apply:
- Any of the requested IERS values are predictive. The IERS-A table
contains predictive data out for a year after the available
definitive values.
- The first predictive values are at least ``conf.auto_max_age days`` old.
In other words the IERS-A table was created by IERS long enough
ago that it can be considered stale for predictions.
"""
max_input_mjd = np.max(mjd)
now_mjd = self.time_now.mjd
# IERS-A table contains predictive data out for a year after
# the available definitive values.
fpi = self.meta['predictive_index']
predictive_mjd = self.meta['predictive_mjd']
# Update table in place if necessary
auto_max_age = _none_to_float(conf.auto_max_age)
# If auto_max_age is smaller than IERS update time then repeated downloads may
# occur without getting updated values (giving a IERSStaleWarning).
if auto_max_age < 10:
raise ValueError('IERS auto_max_age configuration value must be larger than 10 days')
if (max_input_mjd > predictive_mjd and
(now_mjd - predictive_mjd) > auto_max_age):
all_urls = (conf.iers_auto_url, conf.iers_auto_url_mirror)
# Get the latest version
try:
filename = download_file(
all_urls[0], sources=all_urls, cache="update")
except Exception as err:
# Issue a warning here, perhaps user is offline. An exception
# will be raised downstream when actually trying to interpolate
# predictive values.
warn(AstropyWarning(
f'failed to download {" and ".join(all_urls)}: {err}.\n'
'A coordinate or time-related '
'calculation might be compromised or fail because the dates are '
'not covered by the available IERS file. See the '
'"IERS data access" section of the astropy documentation '
'for additional information on working offline.'))
return
new_table = self.__class__.read(file=filename)
new_table.meta['data_url'] = str(all_urls[0])
# New table has new values?
if new_table['MJD'][-1] > self['MJD'][-1]:
# Replace *replace* current values from the first predictive index through
# the end of the current table. This replacement is much faster than just
# deleting all rows and then using add_row for the whole duration.
new_fpi = np.searchsorted(new_table['MJD'].value, predictive_mjd, side='right')
n_replace = len(self) - fpi
self[fpi:] = new_table[new_fpi:new_fpi + n_replace]
# Sanity check for continuity
if new_table['MJD'][new_fpi + n_replace] - self['MJD'][-1] != 1.0 * u.d:
raise ValueError('unexpected gap in MJD when refreshing IERS table')
# Now add new rows in place
for row in new_table[new_fpi + n_replace:]:
self.add_row(row)
self.meta.update(new_table.meta)
else:
warn(IERSStaleWarning(
'IERS_Auto predictive values are older than {} days but downloading '
'the latest table did not find newer values'.format(conf.auto_max_age)))
@classmethod
def _substitute_iers_b(cls, table):
"""Substitute IERS B values with those from a real IERS B table.
IERS-A has IERS-B values included, but for reasons unknown these
do not match the latest IERS-B values (see comments in #4436).
Here, we use the bundled astropy IERS-B table to overwrite the values
in the downloaded IERS-A table.
"""
iers_b = IERS_B.open()
# Substitute IERS-B values for existing B values in IERS-A table
mjd_b = table['MJD'][np.isfinite(table['UT1_UTC_B'])]
i0 = np.searchsorted(iers_b['MJD'], mjd_b[0], side='left')
i1 = np.searchsorted(iers_b['MJD'], mjd_b[-1], side='right')
iers_b = iers_b[i0:i1]
n_iers_b = len(iers_b)
# If there is overlap then replace IERS-A values from available IERS-B
if n_iers_b > 0:
# Sanity check that we are overwriting the correct values
if not u.allclose(table['MJD'][:n_iers_b], iers_b['MJD']):
raise ValueError('unexpected mismatch when copying '
'IERS-B values into IERS-A table.')
# Finally do the overwrite
table['UT1_UTC_B'][:n_iers_b] = iers_b['UT1_UTC']
table['PM_X_B'][:n_iers_b] = iers_b['PM_x']
table['PM_Y_B'][:n_iers_b] = iers_b['PM_y']
table['dX_2000A_B'][:n_iers_b] = iers_b['dX_2000A']
table['dY_2000A_B'][:n_iers_b] = iers_b['dY_2000A']
return table
class earth_orientation_table(ScienceState):
"""Default IERS table for Earth rotation and reference systems service.
These tables are used to calculate the offsets between ``UT1`` and ``UTC``
and for conversion to Earth-based coordinate systems.
The state itself is an IERS table, as an instance of one of the
`~astropy.utils.iers.IERS` classes. The default, the auto-updating
`~astropy.utils.iers.IERS_Auto` class, should suffice for most
purposes.
Examples
--------
To temporarily use the IERS-B file packaged with astropy::
>>> from astropy.utils import iers
>>> from astropy.time import Time
>>> iers_b = iers.IERS_B.open(iers.IERS_B_FILE)
>>> with iers.earth_orientation_table.set(iers_b):
... print(Time('2000-01-01').ut1.isot)
2000-01-01T00:00:00.355
To use the most recent IERS-A file for the whole session::
>>> iers_a = iers.IERS_A.open(iers.IERS_A_URL) # doctest: +SKIP
>>> iers.earth_orientation_table.set(iers_a) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_A length=17463>...>
To go back to the default (of `~astropy.utils.iers.IERS_Auto`)::
>>> iers.earth_orientation_table.set(None) # doctest: +SKIP
<ScienceState earth_orientation_table: <IERS_Auto length=17428>...>
"""
_value = None
@classmethod
def validate(cls, value):
if value is None:
value = IERS_Auto.open()
if not isinstance(value, IERS):
raise ValueError("earth_orientation_table requires an IERS Table.")
return value
class LeapSeconds(QTable):
"""Leap seconds class, holding TAI-UTC differences.
The table should hold columns 'year', 'month', 'tai_utc'.
Methods are provided to initialize the table from IERS ``Leap_Second.dat``,
IETF/ntp ``leap-seconds.list``, or built-in ERFA/SOFA, and to update the
list used by ERFA.
Notes
-----
Astropy has a built-in ``iers.IERS_LEAP_SECONDS_FILE``. Up to date versions
can be downloaded from ``iers.IERS_LEAP_SECONDS_URL`` or
``iers.LEAP_SECONDS_LIST_URL``. Many systems also store a version
of ``leap-seconds.list`` for use with ``ntp`` (e.g., on Debian/Ubuntu
systems, ``/usr/share/zoneinfo/leap-seconds.list``).
To prevent querying internet resources if the available local leap second
file(s) are out of date, set ``iers.conf.auto_download = False``. This
must be done prior to performing any ``Time`` scale transformations related
to UTC (e.g. converting from UTC to TAI).
"""
# Note: Time instances in this class should use scale='tai' to avoid
# needing leap seconds in their creation or interpretation.
_re_expires = re.compile(r'^#.*File expires on[:\s]+(\d+\s\w+\s\d+)\s*$')
_expires = None
_auto_open_files = ['erfa',
IERS_LEAP_SECOND_FILE,
'system_leap_second_file',
'iers_leap_second_auto_url',
'ietf_leap_second_auto_url']
"""Files or conf attributes to try in auto_open."""
@classmethod
def open(cls, file=None, cache=False):
"""Open a leap-second list.
Parameters
----------
file : path-like or None
Full local or network path to the file holding leap-second data,
for passing on to the various ``from_`` class methods.
If 'erfa', return the data used by the ERFA library.
If `None`, use default locations from file and configuration to
find a table that is not expired.
cache : bool
Whether to use cache. Defaults to False, since leap-second files
are regularly updated.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Table with 'year', 'month', and 'tai_utc' columns, plus possibly
others.
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. For the auto-loading,
a list comprised of the table shipped with astropy, and files and
URLs in `~astropy.utils.iers.Conf` are tried, returning the first
that is sufficiently new, or the newest among them all.
"""
if file is None:
return cls.auto_open()
if file.lower() == 'erfa':
return cls.from_erfa()
if urlparse(file).netloc:
file = download_file(file, cache=cache)
# Just try both reading methods.
try:
return cls.from_iers_leap_seconds(file)
except Exception:
return cls.from_leap_seconds_list(file)
@staticmethod
def _today():
# Get current day in scale='tai' without going through a scale change
# (so we do not need leap seconds).
s = '{0.year:04d}-{0.month:02d}-{0.day:02d}'.format(datetime.utcnow())
return Time(s, scale='tai', format='iso', out_subfmt='date')
@classmethod
def auto_open(cls, files=None):
"""Attempt to get an up-to-date leap-second list.
The routine will try the files in sequence until it finds one
whose expiration date is "good enough" (see below). If none
are good enough, it returns the one with the most recent expiration
date, warning if that file is expired.
For remote files that are cached already, the cached file is tried
first before attempting to retrieve it again.
Parameters
----------
files : list of path-like, optional
List of files/URLs to attempt to open. By default, uses
``cls._auto_open_files``.
Returns
-------
leap_seconds : `~astropy.utils.iers.LeapSeconds`
Up to date leap-second table
Notes
-----
Bulletin C is released about 10 days after a possible leap second is
introduced, i.e., mid-January or mid-July. Expiration days are thus
generally at least 150 days after the present. We look for a file
that expires more than 180 - `~astropy.utils.iers.Conf.auto_max_age`
after the present.
"""
offset = 180 - (30 if conf.auto_max_age is None else conf.auto_max_age)
good_enough = cls._today() + TimeDelta(offset, format='jd')
if files is None:
# Basic files to go over (entries in _auto_open_files can be
# configuration items, which we want to be sure are up to date).
files = [getattr(conf, f, f) for f in cls._auto_open_files]
# Remove empty entries.
files = [f for f in files if f]
# Our trials start with normal files and remote ones that are
# already in cache. The bools here indicate that the cache
# should be used.
trials = [(f, True) for f in files
if not urlparse(f).netloc or is_url_in_cache(f)]
# If we are allowed to download, we try downloading new versions
# if none of the above worked.
if conf.auto_download:
trials += [(f, False) for f in files if urlparse(f).netloc]
self = None
err_list = []
# Go through all entries, and return the first one that
# is not expired, or the most up to date one.
for f, allow_cache in trials:
if not allow_cache:
clear_download_cache(f)
try:
trial = cls.open(f, cache=True)
except Exception as exc:
err_list.append(exc)
continue
if self is None or trial.expires > self.expires:
self = trial
self.meta['data_url'] = str(f)
if self.expires > good_enough:
break
if self is None:
raise ValueError('none of the files could be read. The '
'following errors were raised:\n' + str(err_list))
if self.expires < self._today() and conf.auto_max_age is not None:
warn('leap-second file is expired.', IERSStaleWarning)
return self
@property
def expires(self):
"""The limit of validity of the table."""
return self._expires
@classmethod
def _read_leap_seconds(cls, file, **kwargs):
"""Read a file, identifying expiration by matching 'File expires'"""
expires = None
# Find expiration date.
with get_readable_fileobj(file) as fh:
lines = fh.readlines()
for line in lines:
match = cls._re_expires.match(line)
if match:
day, month, year = match.groups()[0].split()
month_nb = MONTH_ABBR.index(month[:3]) + 1
expires = Time(f'{year}-{month_nb:02d}-{day}',
scale='tai', out_subfmt='date')
break
else:
raise ValueError(f'did not find expiration date in {file}')
self = cls.read(lines, format='ascii.no_header', **kwargs)
self._expires = expires
return self
@classmethod
def from_iers_leap_seconds(cls, file=IERS_LEAP_SECOND_FILE):
"""Create a table from a file like the IERS ``Leap_Second.dat``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IERS. By default, uses
``iers.IERS_LEAP_SECOND_FILE``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on 28 June 2020'
"""
return cls._read_leap_seconds(
file, names=['mjd', 'day', 'month', 'year', 'tai_utc'])
@classmethod
def from_leap_seconds_list(cls, file):
"""Create a table from a file like the IETF ``leap-seconds.list``.
Parameters
----------
file : path-like, optional
Full local or network path to the file holding leap-second data
in a format consistent with that used by IETF. Up to date versions
can be retrieved from ``iers.IETF_LEAP_SECOND_URL``.
Notes
-----
The file *must* contain the expiration date in a comment line, like
'# File expires on: 28 June 2020'
"""
from astropy.io.ascii import convert_numpy # Here to avoid circular import
names = ['ntp_seconds', 'tai_utc', 'comment', 'day', 'month', 'year']
# Note: ntp_seconds does not fit in 32 bit, so causes problems on
# 32-bit systems without the np.int64 converter.
self = cls._read_leap_seconds(
file, names=names, include_names=names[:2],
converters={'ntp_seconds': [convert_numpy(np.int64)]})
self['mjd'] = (self['ntp_seconds']/86400 + 15020).round()
# Note: cannot use Time.ymdhms, since that might require leap seconds.
isot = Time(self['mjd'], format='mjd', scale='tai').isot
ymd = np.array([[int(part) for part in t.partition('T')[0].split('-')]
for t in isot])
self['year'], self['month'], self['day'] = ymd.T
return self
@classmethod
def from_erfa(cls, built_in=False):
"""Create table from the leap-second list in ERFA.
Parameters
----------
built_in : bool
If `False` (default), retrieve the list currently used by ERFA,
which may have been updated. If `True`, retrieve the list shipped
with erfa.
"""
current = cls(erfa.leap_seconds.get())
current._expires = Time('{0.year:04d}-{0.month:02d}-{0.day:02d}'
.format(erfa.leap_seconds.expires),
scale='tai')
if not built_in:
return current
try:
erfa.leap_seconds.set(None) # reset to defaults
return cls.from_erfa(built_in=False)
finally:
erfa.leap_seconds.set(current)
def update_erfa_leap_seconds(self, initialize_erfa=False):
"""Add any leap seconds not already present to the ERFA table.
This method matches leap seconds with those present in the ERFA table,
and extends the latter as necessary.
Parameters
----------
initialize_erfa : bool, or 'only', or 'empty'
Initialize the ERFA leap second table to its built-in value before
trying to expand it. This is generally not needed but can help
in case it somehow got corrupted. If equal to 'only', the ERFA
table is reinitialized and no attempt it made to update it.
If 'empty', the leap second table is emptied before updating, i.e.,
it is overwritten altogether (note that this may break things in
surprising ways, as most leap second tables do not include pre-1970
pseudo leap-seconds; you were warned).
Returns
-------
n_update : int
Number of items updated.
Raises
------
ValueError
If the leap seconds in the table are not on 1st of January or July,
or if the matches are inconsistent. This would normally suggest
a corrupted leap second table, but might also indicate that the
ERFA table was corrupted. If needed, the ERFA table can be reset
by calling this method with an appropriate value for
``initialize_erfa``.
"""
if initialize_erfa == 'empty':
# Initialize to empty and update is the same as overwrite.
erfa.leap_seconds.set(self)
return len(self)
if initialize_erfa:
erfa.leap_seconds.set()
if initialize_erfa == 'only':
return 0
return erfa.leap_seconds.update(self)
|
fe0be15627ea3cb94470f39f46b00aacfda90708bb272d7d4dc36cd4d33f6957 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Functions to do XML schema and DTD validation. At the moment, this
makes a subprocess call to xmllint. This could use a Python-based
library at some point in the future, if something appropriate could be
found.
"""
import os
import subprocess
def validate_schema(filename, schema_file):
"""
Validates an XML file against a schema or DTD.
Parameters
----------
filename : str
The path to the XML file to validate
schema_file : str
The path to the XML schema or DTD
Returns
-------
returncode, stdout, stderr : int, str, str
Returns the returncode from xmllint and the stdout and stderr
as strings
"""
base, ext = os.path.splitext(schema_file)
if ext == '.xsd':
schema_part = '--schema'
elif ext == '.dtd':
schema_part = '--dtdvalid'
else:
raise TypeError("schema_file must be a path to an XML Schema or DTD")
p = subprocess.Popen(
["xmllint", "--noout", "--nonet", schema_part, schema_file, filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 127:
raise OSError(
"xmllint not found, so can not validate schema")
elif p.returncode < 0:
from astropy.utils.misc import signal_number_to_name
raise OSError(
"xmllint was terminated by signal '{}'".format(
signal_number_to_name(-p.returncode)))
return p.returncode, stdout, stderr
|
c8bf24c56732fe144ba59d4a097ac5d020c46256d6ef4430b75844fb96a9b35d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import sys
import stat
import errno
import base64
import random
import shutil
import hashlib
import pathlib
import platform
import tempfile
import warnings
import itertools
import contextlib
import urllib.error
import urllib.parse
import urllib.request
from itertools import islice
from concurrent.futures import ThreadPoolExecutor
from tempfile import NamedTemporaryFile, TemporaryDirectory
import py.path
import pytest
from astropy import units as _u # u is taken
from astropy.config import paths
import astropy.utils.data
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.data import (
CacheMissingWarning,
CacheDamaged,
conf,
_deltemps,
compute_hash,
download_file,
cache_contents,
_tempfilestodel,
get_cached_urls,
is_url_in_cache,
cache_total_size,
get_file_contents,
check_download_cache,
clear_download_cache,
get_pkg_data_fileobj,
get_readable_fileobj,
import_file_to_cache,
export_download_cache,
get_pkg_data_contents,
get_pkg_data_filename,
import_download_cache,
get_free_space_in_dir,
check_free_space_in_dir,
_get_download_cache_loc,
download_files_in_parallel,
is_url,
get_pkg_data_path
)
CI = os.environ.get('CI', False) == "true"
TESTURL = "http://www.astropy.org"
TESTURL2 = "http://www.astropy.org/about.html"
TESTURL_SSL = "https://www.astropy.org"
TESTLOCAL = get_pkg_data_filename(os.path.join("data", "local.dat"))
# NOTE: Python can be built without bz2 or lzma.
from astropy.utils.compat.optional_deps import HAS_BZ2, HAS_LZMA
# For when we need "some" test URLs
FEW = 5
# For stress testing the locking system using multiprocessing
N_PARALLEL_HAMMER = 5 # as high as 500 to replicate a bug
# For stress testing the locking system using threads
# (cheaper, works with coverage)
N_THREAD_HAMMER = 10 # as high as 1000 to replicate a bug
def can_rename_directory_in_use():
with TemporaryDirectory() as d:
d1 = os.path.join(d, "a")
d2 = os.path.join(d, "b")
f1 = os.path.join(d1, "file")
os.mkdir(d1)
with open(f1, "wt") as f:
f.write("some contents\n")
try:
with open(f1, "rt"):
os.rename(d1, d2)
except PermissionError:
return False
else:
return True
CAN_RENAME_DIRECTORY_IN_USE = can_rename_directory_in_use()
def url_to(path):
return pathlib.Path(path).resolve().as_uri()
@pytest.fixture
def valid_urls(tmpdir):
def _valid_urls(tmpdir):
for i in itertools.count():
c = os.urandom(16).hex()
fn = os.path.join(tmpdir, "valid_" + str(i))
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
yield u, c
return _valid_urls(tmpdir)
@pytest.fixture
def invalid_urls(tmpdir):
def _invalid_urls(tmpdir):
for i in itertools.count():
fn = os.path.join(tmpdir, "invalid_" + str(i))
if not os.path.exists(fn):
yield url_to(fn)
return _invalid_urls(tmpdir)
@pytest.fixture
def temp_cache(tmpdir):
with paths.set_temp_cache(tmpdir):
yield None
check_download_cache()
def change_tree_permission(d, writable=False):
if writable:
dirperm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR | stat.S_IWUSR
else:
dirperm = stat.S_IRUSR | stat.S_IXUSR
fileperm = stat.S_IRUSR
for dirpath, dirnames, filenames in os.walk(d):
os.chmod(dirpath, dirperm)
for f in filenames:
os.chmod(os.path.join(dirpath, f), fileperm)
def is_dir_readonly(d):
try:
with NamedTemporaryFile(dir=d):
return False
except PermissionError:
return True
@contextlib.contextmanager
def readonly_dir(d):
try:
change_tree_permission(d, writable=False)
yield
finally:
change_tree_permission(d, writable=True)
@pytest.fixture
def readonly_cache(tmpdir, valid_urls):
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
with readonly_dir(d):
if not is_dir_readonly(d):
pytest.skip("Unable to make directory readonly")
yield urls
assert set(d.iterdir()) == files
check_download_cache()
@pytest.fixture
def fake_readonly_cache(tmpdir, valid_urls, monkeypatch):
def no_mkdir(path, mode=None):
raise OSError(errno.EPERM,
"os.mkdir monkeypatched out")
def no_mkdtemp(*args, **kwargs):
"""On Windows, mkdtemp uses mkdir in a loop and therefore hangs
with it monkeypatched out.
"""
raise OSError(errno.EPERM,
"os.mkdtemp monkeypatched out")
def no_TemporaryDirectory(*args, **kwargs):
raise OSError(errno.EPERM,
"_SafeTemporaryDirectory monkeypatched out")
with TemporaryDirectory(dir=tmpdir) as d:
# other fixtures use the same tmpdir so we need a subdirectory
# to make into the cache
d = pathlib.Path(d)
with paths.set_temp_cache(d):
us = set(u for u, c in islice(valid_urls, FEW))
urls = {u: download_file(u, cache=True) for u in us}
files = set(d.iterdir())
monkeypatch.setattr(os, "mkdir", no_mkdir)
monkeypatch.setattr(tempfile, "mkdtemp", no_mkdtemp)
monkeypatch.setattr(astropy.utils.data,
"_SafeTemporaryDirectory",
no_TemporaryDirectory)
yield urls
assert set(d.iterdir()) == files
check_download_cache()
def test_download_file_basic(valid_urls, temp_cache):
u, c = next(valid_urls)
assert get_file_contents(download_file(u, cache=False)) == c
assert not is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache miss
assert is_url_in_cache(u)
assert get_file_contents(download_file(u, cache=True)) == c # Cache hit
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_download_file_absolute_path(valid_urls, temp_cache):
def is_abs(p):
return p == os.path.abspath(p)
u, c = next(valid_urls)
assert is_abs(download_file(u, cache=False)) # no cache
assert is_abs(download_file(u, cache=True)) # not in cache
assert is_abs(download_file(u, cache=True)) # in cache
for k, v in cache_contents().items():
assert is_abs(v)
def test_unicode_url(valid_urls, temp_cache):
u, c = next(valid_urls)
unicode_url = "http://é—☃—è.com"
download_file(unicode_url, cache=False, sources=[u])
download_file(unicode_url, cache=True, sources=[u])
download_file(unicode_url, cache=True, sources=[])
assert is_url_in_cache(unicode_url)
assert unicode_url in cache_contents()
def test_too_long_url(valid_urls, temp_cache):
u, c = next(valid_urls)
long_url = "http://"+"a"*256+".com"
download_file(long_url, cache=False, sources=[u])
download_file(long_url, cache=True, sources=[u])
download_file(long_url, cache=True, sources=[])
def test_case_collision(valid_urls, temp_cache):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f1 = download_file("http://example.com/thing", cache=True, sources=[u])
f2 = download_file("http://example.com/THING", cache=True, sources=[u2])
assert f1 != f2
assert get_file_contents(f1) != get_file_contents(f2)
def test_domain_name_case(valid_urls, temp_cache):
u, c = next(valid_urls)
download_file("http://Example.com/thing", cache=True, sources=[u])
assert is_url_in_cache("http://EXAMPLE.com/thing")
download_file("http://EXAMPLE.com/thing", cache=True, sources=[])
assert is_url_in_cache("Http://example.com/thing")
download_file("Http://example.com/thing", cache=True, sources=[])
@pytest.mark.remote_data(source="astropy")
def test_download_nocache_from_internet():
fnout = download_file(TESTURL, cache=False)
assert os.path.isfile(fnout)
@pytest.fixture
def a_binary_file(tmp_path):
fn = tmp_path / "file"
b_contents = b"\xde\xad\xbe\xef"
with open(fn, "wb") as f:
f.write(b_contents)
yield fn, b_contents
@pytest.fixture
def a_file(tmp_path):
fn = tmp_path / "file.txt"
contents = "contents\n"
with open(fn, "w") as f:
f.write(contents)
yield fn, contents
def test_temp_cache(tmpdir):
dldir0 = _get_download_cache_loc()
check_download_cache()
with paths.set_temp_cache(tmpdir):
dldir1 = _get_download_cache_loc()
check_download_cache()
assert dldir1 != dldir0
dldir2 = _get_download_cache_loc()
check_download_cache()
assert dldir2 != dldir1
assert dldir2 == dldir0
# Check that things are okay even if we exit via an exception
class Special(Exception):
pass
try:
with paths.set_temp_cache(tmpdir):
dldir3 = _get_download_cache_loc()
check_download_cache()
assert dldir3 == dldir1
raise Special
except Special:
pass
dldir4 = _get_download_cache_loc()
check_download_cache()
assert dldir4 != dldir3
assert dldir4 == dldir0
@pytest.mark.parametrize("parallel", [False, True])
def test_download_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache, parallel):
# This is a combined test because the parallel version triggered a nasty
# bug and I was trying to track it down by comparing with the non-parallel
# version. I think the bug was that the parallel downloader didn't respect
# temporary cache settings.
# Make a big list of test URLs
u, c = next(valid_urls)
# as tuples (URL, right_content, wrong_content)
urls = [(u, c, None)]
# where to download the contents
sources = {}
# Set up some URLs to download where the "true" URL is not in the sources
# list; make the true URL valid with different contents so we can tell if
# it was loaded by mistake.
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
# For many of them the sources list starts with invalid URLs
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
# Now fetch them all
if parallel:
rs = download_files_in_parallel([u for (u, c, c_bad) in urls],
cache=True,
sources=sources)
else:
rs = [
download_file(u, cache=True, sources=sources.get(u, None))
for (u, c, c_bad) in urls
]
assert len(rs) == len(urls)
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
assert is_url_in_cache(u)
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many(temp_cache, valid_urls):
"""Hammer download_file with multiple threaded requests.
The goal is to stress-test the locking system. Normal parallel downloading
also does this but coverage tools lose track of which paths are explored.
"""
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(lambda u: download_file(u, cache=True),
[u for (u, c) in urls]))
check_download_cache()
assert len(r) == len(urls)
for r, (u, c) in zip(r, urls):
assert get_file_contents(r) == c
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_threaded_segfault(valid_urls):
"""Demonstrate urllib's segfault."""
def slurp_url(u):
with urllib.request.urlopen(u) as remote:
block = True
while block:
block = remote.read(1024)
urls = list(islice(valid_urls, N_THREAD_HAMMER))
with ThreadPoolExecutor(max_workers=len(urls)) as P:
list(P.map(lambda u: slurp_url(u),
[u for (u, c) in urls]))
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_download_file_threaded_many_partial_success(
temp_cache, valid_urls, invalid_urls):
"""Hammer download_file with multiple threaded requests.
Because some of these requests fail, the locking context manager is
exercised with exceptions as well as success returns. I do not expect many
surprises from the threaded version, but the process version gave trouble
here.
"""
urls = []
contents = {}
for (u, c), i in islice(zip(valid_urls, invalid_urls), N_THREAD_HAMMER):
urls.append(u)
contents[u] = c
urls.append(i)
def get(u):
try:
return download_file(u, cache=True)
except OSError:
return None
with ThreadPoolExecutor(max_workers=len(urls)) as P:
r = list(P.map(get, urls))
check_download_cache()
assert len(r) == len(urls)
for r, u in zip(r, urls):
if u in contents:
assert get_file_contents(r) == contents[u]
else:
assert r is None
def test_clear_download_cache(valid_urls):
u1, c1 = next(valid_urls)
download_file(u1, cache=True)
u2, c2 = next(valid_urls)
download_file(u2, cache=True)
assert is_url_in_cache(u2)
clear_download_cache(u2)
assert not is_url_in_cache(u2)
assert is_url_in_cache(u1)
u3, c3 = next(valid_urls)
f3 = download_file(u3, cache=True)
assert is_url_in_cache(u3)
clear_download_cache(f3)
assert not is_url_in_cache(u3)
assert is_url_in_cache(u1)
u4, c4 = next(valid_urls)
f4 = download_file(u4, cache=True)
assert is_url_in_cache(u4)
clear_download_cache(compute_hash(f4))
assert not is_url_in_cache(u4)
assert is_url_in_cache(u1)
def test_clear_download_multiple_references_doesnt_corrupt_storage(temp_cache, tmpdir):
"""Check that files with the same hash don't confuse the storage."""
content = "Test data; doesn't matter much.\n"
def make_url():
with NamedTemporaryFile("w", dir=str(tmpdir), delete=False) as f:
f.write(content)
url = url_to(f.name)
clear_download_cache(url)
filename = download_file(url, cache=True)
return url, filename
a_url, a_filename = make_url()
clear_download_cache(a_filename)
assert not is_url_in_cache(a_url)
f_url, f_filename = make_url()
g_url, g_filename = make_url()
assert f_url != g_url
assert is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
clear_download_cache(f_url)
assert not is_url_in_cache(f_url)
assert is_url_in_cache(g_url)
assert os.path.exists(
g_filename
), "Contents should not be deleted while a reference exists"
clear_download_cache(g_url)
assert not os.path.exists(
g_filename
), "No reference exists any more, file should be deleted"
@pytest.mark.parametrize("use_cache", [False, True])
def test_download_file_local_cache_survives(tmpdir, temp_cache, use_cache):
"""Confirm that downloading a local file does not delete it.
When implemented with urlretrieve (rather than urlopen) local files are
not copied to create temporaries, so importing them to the cache deleted
the original from wherever it was in the filesystem. I lost some built-in
astropy data.
"""
fn = tmpdir / "file"
contents = "some text"
with open(fn, "w") as f:
f.write(contents)
u = url_to(fn)
f = download_file(u, cache=use_cache)
assert fn not in _tempfilestodel, "File should not be deleted!"
assert os.path.isfile(fn), "File should not be deleted!"
assert get_file_contents(f) == contents
def test_sources_normal(temp_cache, valid_urls, invalid_urls):
primary, contents = next(valid_urls)
fallback1 = next(invalid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_fallback(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_ignore_primary(temp_cache, valid_urls, invalid_urls):
primary, bogus = next(valid_urls)
fallback1, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[fallback1])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
def test_sources_multiple(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2, contents = next(valid_urls)
f = download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert get_file_contents(f) == contents
assert is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_sources_multiple_missing(temp_cache, valid_urls, invalid_urls):
primary = next(invalid_urls)
fallback1 = next(invalid_urls)
fallback2 = next(invalid_urls)
with pytest.raises(urllib.error.URLError):
download_file(primary, cache=True, sources=[primary, fallback1, fallback2])
assert not is_url_in_cache(primary)
assert not is_url_in_cache(fallback1)
assert not is_url_in_cache(fallback2)
def test_update_url(tmpdir, temp_cache):
with TemporaryDirectory(dir=tmpdir) as d:
f_name = os.path.join(d, "f")
with open(f_name, "w") as f:
f.write("old")
f_url = url_to(f.name)
assert get_file_contents(download_file(f_url, cache=True)) == "old"
with open(f_name, "w") as f:
f.write("new")
assert get_file_contents(download_file(f_url, cache=True)) == "old"
assert get_file_contents(download_file(f_url, cache="update")) == "new"
# Now the URL doesn't exist any more.
assert not os.path.exists(f_name)
with pytest.raises(urllib.error.URLError):
# Direct download should fail
download_file(f_url, cache=False)
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Cached version should still exist"
with pytest.raises(urllib.error.URLError):
# cannot download new version to check for updates
download_file(f_url, cache="update")
assert get_file_contents(download_file(f_url, cache=True)) == "new", \
"Failed update should not remove the current version"
@pytest.mark.remote_data(source="astropy")
def test_download_noprogress():
fnout = download_file(TESTURL, cache=False, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_cache():
download_dir = _get_download_cache_loc()
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache("http://this_was_never_downloaded_before.com")
# Make sure lockdir was released
lockdir = os.path.join(download_dir, "lock")
assert not os.path.isdir(lockdir), "Cache dir lock was not released!"
@pytest.mark.remote_data(source="astropy")
def test_download_certificate_verification_failed():
"""Tests for https://github.com/astropy/astropy/pull/10434"""
# First test the expected exception when download fails due to a
# certificate verification error; we simulate this by passing a bogus
# CA directory to the ssl_context argument
ssl_context = {'cafile': None, 'capath': '/does/not/exist'}
msg = f'Verification of TLS/SSL certificate at {TESTURL_SSL} failed'
with pytest.raises(urllib.error.URLError, match=msg):
download_file(TESTURL_SSL, cache=False, ssl_context=ssl_context)
with pytest.warns(AstropyWarning, match=msg) as warning_lines:
fnout = download_file(TESTURL_SSL, cache=False,
ssl_context=ssl_context, allow_insecure=True)
assert len(warning_lines) == 1
assert os.path.isfile(fnout)
def test_download_cache_after_clear(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
download_dir = _get_download_cache_loc()
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(testurl, cache=True)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_download_parallel_from_internet_works(temp_cache):
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = "intersphinx/README"
urls = []
sources = {}
for s in ["", fileloc]:
urls.append(main_url + s)
sources[urls[-1]] = [urls[-1], mirror_url+s]
fnout = download_files_in_parallel(urls, sources=sources)
assert all([os.path.isfile(f) for f in fnout]), fnout
@pytest.mark.parametrize("method", [None, "spawn"])
def test_download_parallel_fills_cache(tmpdir, valid_urls, method):
urls = []
# tmpdir is shared between many tests, and that can cause weird
# interactions if we set the temporary cache too directly
with paths.set_temp_cache(tmpdir):
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel(
[u for (u, c) in urls], multiprocessing_start_method=method
)
assert len(rs) == len(urls)
url_set = set(u for (u, c) in urls)
assert url_set <= set(get_cached_urls())
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
check_download_cache()
assert not url_set.intersection(get_cached_urls())
check_download_cache()
def test_download_parallel_with_empty_sources(valid_urls, temp_cache):
urls = []
sources = {}
for um, c in islice(valid_urls, FEW):
assert not is_url_in_cache(um)
urls.append((um, c))
rs = download_files_in_parallel([u for (u, c) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c) in urls)
# assert u <= set(get_cached_urls())
check_download_cache()
for r, (u, c) in zip(rs, urls):
assert get_file_contents(r) == c
def test_download_parallel_with_sources_and_bogus_original(
valid_urls, invalid_urls, temp_cache
):
u, c = next(valid_urls)
urls = [(u, c, None)]
sources = {}
for i, (um, c_bad) in enumerate(islice(valid_urls, FEW)):
assert not is_url_in_cache(um)
sources[um] = []
for iu in islice(invalid_urls, i):
sources[um].append(iu)
u, c = next(valid_urls)
sources[um].append(u)
urls.append((um, c, c_bad))
rs = download_files_in_parallel([u for (u, c, c_bad) in urls], sources=sources)
assert len(rs) == len(urls)
# u = set(u for (u, c, c_bad) in urls)
# assert u <= set(get_cached_urls())
for r, (u, c, c_bad) in zip(rs, urls):
assert get_file_contents(r) == c
assert get_file_contents(r) != c_bad
def test_download_parallel_many(temp_cache, valid_urls):
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
r = download_files_in_parallel([u for (u, c) in td])
assert len(r) == len(td)
for r, (u, c) in zip(r, td):
assert get_file_contents(r) == c
def test_download_parallel_partial_success(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful download works.
Even in the presence of many requested URLs, presumably hitting all the
parallelism this system can manage, a download failure leads to a tidy
shutdown.
"""
td = list(islice(valid_urls, N_PARALLEL_HAMMER))
u_bad = next(invalid_urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel([u_bad] + [u for (u, c) in td])
# Actually some files may get downloaded, others not.
# Is this good? Should we stubbornly keep trying?
# assert not any([is_url_in_cache(u) for (u, c) in td])
def test_download_parallel_partial_success_lock_safe(temp_cache, valid_urls, invalid_urls):
"""Check that a partially successful parallel download leaves the cache unlocked.
This needs to be repeated many times because race conditions are what cause
this sort of thing, especially situations where a process might be forcibly
shut down while it holds the lock.
"""
s = random.getstate()
try:
random.seed(0)
for _ in range(N_PARALLEL_HAMMER):
td = list(islice(valid_urls, FEW))
u_bad = next(invalid_urls)
urls = [u_bad] + [u for (u, c) in td]
random.shuffle(urls)
with pytest.raises(urllib.request.URLError):
download_files_in_parallel(urls)
finally:
random.setstate(s)
def test_download_parallel_update(temp_cache, tmpdir):
td = []
for i in range(N_PARALLEL_HAMMER):
c = f"{i:04d}"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c)
u = url_to(fn)
clear_download_cache(u)
td.append((fn, u, c))
r1 = download_files_in_parallel([u for (fn, u, c) in td])
assert len(r1) == len(td)
for r_1, (fn, u, c) in zip(r1, td):
assert get_file_contents(r_1) == c
td2 = []
for (fn, u, c) in td:
c_plus = c + " updated"
fn = os.path.join(tmpdir, c)
with open(fn, "w") as f:
f.write(c_plus)
td2.append((fn, u, c, c_plus))
r2 = download_files_in_parallel([u for (fn, u, c) in td], cache=True)
assert len(r2) == len(td)
for r_2, (fn, u, c, c_plus) in zip(r2, td2):
assert get_file_contents(r_2) == c
assert c != c_plus
r3 = download_files_in_parallel([u for (fn, u, c) in td], cache="update")
assert len(r3) == len(td)
for r_3, (fn, u, c, c_plus) in zip(r3, td2):
assert get_file_contents(r_3) != c
assert get_file_contents(r_3) == c_plus
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel(temp_cache, valid_urls):
u, c = next(valid_urls)
u2, c2 = next(valid_urls)
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(i):
return download_file(u, cache="update", sources=[u2])
with ThreadPoolExecutor(max_workers=N_THREAD_HAMMER) as P:
r = set(P.map(update, range(N_THREAD_HAMMER)))
check_download_cache()
for f in r:
assert get_file_contents(f) == c2
@pytest.mark.skipif((sys.platform.startswith('win') and CI),
reason="flaky cache error on Windows CI")
def test_update_parallel_multi(temp_cache, valid_urls):
u, c = next(valid_urls)
iucs = list(islice(valid_urls, N_THREAD_HAMMER))
f = download_file(u, cache=True)
assert get_file_contents(f) == c
def update(uc):
u2, c2 = uc
return download_file(u, cache="update", sources=[u2]), c2
with ThreadPoolExecutor(max_workers=len(iucs)) as P:
r = list(P.map(update, iucs))
check_download_cache()
assert any(get_file_contents(f) == c for (f, c) in r)
@pytest.mark.remote_data(source="astropy")
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding="utf-8") as page:
assert page.read().find("Astropy") > -1
def test_find_by_hash(valid_urls, temp_cache):
testurl, contents = next(valid_urls)
p = download_file(testurl, cache=True)
hash = compute_hash(p)
hashstr = "hash/" + hash
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(fnout)
assert not os.path.isfile(fnout)
@pytest.mark.remote_data(source="astropy")
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename(
"kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli"
)
@pytest.mark.parametrize("package", [None, "astropy", "numpy"])
def test_get_invalid(package):
"""Test can create a file path to an invalid file."""
path = get_pkg_data_path("kjfrhgjkla", "hgiulrhgiu", package=package)
assert not os.path.isfile(path)
assert not os.path.isdir(path)
# Package data functions
@pytest.mark.parametrize(
("filename"), ["local.dat", "local.dat.gz", "local.dat.bz2", "local.dat.xz"]
)
def test_local_data_obj(filename):
if ((not HAS_BZ2 and "bz2" in filename) or
(not HAS_LZMA and "xz" in filename)):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert " format files are not supported" in str(e.value)
else:
with get_pkg_data_fileobj(
os.path.join("data", filename), encoding="binary"
) as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.fixture(params=["invalid.dat.bz2", "invalid.dat.gz"])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b"BZhinvalid"
gz_content = b"\x1f\x8b\x08invalid"
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith(".bz2"):
contents = bz_content
elif filename.endswith(".gz"):
contents = gz_content
else:
contents = "invalid"
datafile.write(contents, mode="wb")
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith(".bz2")
is_xz = bad_compressed.endswith(".xz")
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_LZMA and is_xz):
with pytest.raises(ModuleNotFoundError,
match=r'does not provide the [lb]z[2m]a? module\.'):
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
f.read()
else:
with get_readable_fileobj(bad_compressed, encoding="binary") as f:
assert f.read().rstrip().endswith(b"invalid")
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith("local.dat")
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), "data")
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert os.path.normcase(filename) == (
os.path.normcase(os.path.join(data_dir, "test_package", "data", "foo.txt"))
)
finally:
sys.path.pop(0)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
with pytest.raises(RuntimeError):
get_pkg_data_filename("../../../data/README.rst")
def test_compute_hash(tmpdir):
rands = b"1234567890abcdefghijklmnopqrstuvwxyz"
filename = tmpdir.join("tmp.dat").strpath
with open(filename, "wb") as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
with get_pkg_data_fileobj("data/local.dat") as f:
contents1 = f.read()
contents2 = get_pkg_data_contents("data/local.dat")
assert contents1 == contents2
@pytest.mark.remote_data(source="astropy")
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv("XDG_CONFIG_HOME", "foo")
monkeypatch.delenv("XDG_CONFIG_HOME")
monkeypatch.setenv("XDG_CACHE_HOME", "bar")
monkeypatch.delenv("XDG_CACHE_HOME")
monkeypatch.setattr(paths.set_temp_config, "_temp_path", None)
monkeypatch.setattr(paths.set_temp_cache, "_temp_path", None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto, pkgname=None):
raise OSError()
monkeypatch.setattr(paths, '_find_or_create_root_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir(rootname='astropy')
with pytest.warns(CacheMissingWarning) as warning_lines:
fnout = download_file(TESTURL, cache=True)
n_warns = len(warning_lines)
partial_warn_msgs = ['remote data cache could not be accessed', 'temporary file']
if n_warns == 4:
partial_warn_msgs.extend(['socket', 'socket'])
for wl in warning_lines:
cur_w = str(wl).lower()
for i, partial_msg in enumerate(partial_warn_msgs):
if partial_msg in cur_w:
del partial_warn_msgs[i]
break
assert len(partial_warn_msgs) == 0, f'Got some unexpected warnings: {partial_warn_msgs}'
assert n_warns in (2, 4), f'Expected 2 or 4 warnings, got {n_warns}'
assert os.path.isfile(fnout)
# clearing the cache should be a no-up that doesn't affect fnout
with pytest.warns(CacheMissingWarning,
match=r".*Not clearing data cache - cache inaccessible.*"):
clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
_deltemps()
assert not os.path.isfile(fnout)
# now try with no cache
fnnocache = download_file(TESTURL, cache=False)
with open(fnnocache, "rb") as page:
assert page.read().decode("utf-8").find("Astropy") > -1
# no warnings should be raise in fileobj because cache is unnecessary
@pytest.mark.parametrize(
("filename"),
[
"unicode.txt",
"unicode.txt.gz",
pytest.param(
"unicode.txt.bz2",
marks=pytest.mark.xfail(not HAS_BZ2, reason="no bz2 support"),
),
pytest.param(
"unicode.txt.xz",
marks=pytest.mark.xfail(not HAS_LZMA, reason="no lzma support"),
),
],
)
def test_read_unicode(filename):
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="utf-8")
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join("data", filename), encoding="binary")
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (
b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:]
)
def test_compressed_stream():
gzipped_data = (
b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA=="
)
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b""
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding="binary") as f:
f.readline()
assert f.read().rstrip() == b"CONTENT"
@pytest.mark.remote_data(source="astropy")
def test_invalid_location_download_raises_urlerror():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
with pytest.raises(urllib.error.URLError):
download_file("http://www.astropy.org/nonexistentfile")
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file("http://astropy.org/nonexistentfile")
@pytest.mark.remote_data(source="astropy")
def test_is_url_in_cache_remote():
assert not is_url_in_cache("http://astropy.org/nonexistentfile")
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_is_url_in_cache_local(temp_cache, valid_urls, invalid_urls):
testurl, contents = next(valid_urls)
nonexistent = next(invalid_urls)
assert not is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
download_file(testurl, cache=True, show_progress=False)
assert is_url_in_cache(testurl)
assert not is_url_in_cache(nonexistent)
# If non-deterministic failure happens see
# https://github.com/astropy/astropy/issues/9765
def test_check_download_cache(tmpdir, temp_cache, valid_urls, invalid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
clear_download_cache()
assert not check_download_cache()
download_file(testurl, cache=True)
check_download_cache()
download_file(testurl2, cache=True)
check_download_cache()
export_download_cache(zip_file_name, [testurl, testurl2])
check_download_cache()
clear_download_cache(testurl2)
check_download_cache()
import_download_cache(zip_file_name, [testurl])
check_download_cache()
def test_export_import_roundtrip_one(tmpdir, temp_cache, valid_urls):
testurl, contents = next(valid_urls)
f = download_file(testurl, cache=True, show_progress=False)
assert get_file_contents(f) == contents
initial_urls_in_cache = set(get_cached_urls())
zip_file_name = os.path.join(tmpdir, "the.zip")
export_download_cache(zip_file_name, [testurl])
clear_download_cache(testurl)
import_download_cache(zip_file_name)
assert is_url_in_cache(testurl)
assert set(get_cached_urls()) == initial_urls_in_cache
assert (
get_file_contents(download_file(testurl, cache=True, show_progress=False))
== contents
)
def test_export_url_not_present(temp_cache, valid_urls):
testurl, contents = next(valid_urls)
with NamedTemporaryFile("wb") as zip_file:
assert not is_url_in_cache(testurl)
with pytest.raises(KeyError):
export_download_cache(zip_file, [testurl])
def test_import_one(tmpdir, temp_cache, valid_urls):
testurl, testurl_contents = next(valid_urls)
testurl2, testurl2_contents = next(valid_urls)
zip_file_name = os.path.join(tmpdir, "the.zip")
download_file(testurl, cache=True)
download_file(testurl2, cache=True)
assert is_url_in_cache(testurl2)
export_download_cache(zip_file_name, [testurl, testurl2])
clear_download_cache(testurl)
clear_download_cache(testurl2)
import_download_cache(zip_file_name, [testurl])
assert is_url_in_cache(testurl)
assert not is_url_in_cache(testurl2)
def test_export_import_roundtrip(tmpdir, temp_cache, valid_urls):
zip_file_name = os.path.join(tmpdir, "the.zip")
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
export_download_cache(zip_file_name)
clear_download_cache()
import_download_cache(zip_file_name)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_import_roundtrip_stream(temp_cache, valid_urls):
for u, c in islice(valid_urls, FEW):
download_file(u, cache=True)
initial_urls_in_cache = set(get_cached_urls())
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
clear_download_cache()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == initial_urls_in_cache
def test_export_overwrite_flag_works(temp_cache, valid_urls, tmpdir):
fn = tmpdir / "f.zip"
c = b"Some contents\nto check later"
with open(fn, "wb") as f:
f.write(c)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with pytest.raises(FileExistsError):
export_download_cache(fn)
assert get_file_contents(fn, encoding='binary') == c
export_download_cache(fn, overwrite=True)
assert get_file_contents(fn, encoding='binary') != c
def test_export_import_roundtrip_different_location(tmpdir, valid_urls):
original_cache = tmpdir / "original"
os.mkdir(original_cache)
zip_file_name = tmpdir / "the.zip"
urls = list(islice(valid_urls, FEW))
initial_urls_in_cache = set(u for (u, c) in urls)
with paths.set_temp_cache(original_cache):
for u, c in urls:
download_file(u, cache=True)
assert set(get_cached_urls()) == initial_urls_in_cache
export_download_cache(zip_file_name)
new_cache = tmpdir / "new"
os.mkdir(new_cache)
with paths.set_temp_cache(new_cache):
import_download_cache(zip_file_name)
check_download_cache()
assert set(get_cached_urls()) == initial_urls_in_cache
for (u, c) in urls:
assert get_file_contents(download_file(u, cache=True)) == c
def test_cache_size_is_zero_when_empty(temp_cache):
assert not get_cached_urls()
assert cache_total_size() == 0
def test_cache_size_changes_correctly_when_files_are_added_and_removed(
temp_cache, valid_urls
):
u, c = next(valid_urls)
clear_download_cache(u)
s_i = cache_total_size()
download_file(u, cache=True)
assert cache_total_size() == s_i + len(c) + len(u.encode("utf-8"))
clear_download_cache(u)
assert cache_total_size() == s_i
def test_cache_contents_agrees_with_get_urls(temp_cache, valid_urls):
r = []
for a, a_c in islice(valid_urls, FEW):
a_f = download_file(a, cache=True)
r.append((a, a_c, a_f))
assert set(cache_contents().keys()) == set(get_cached_urls())
for (u, c, h) in r:
assert cache_contents()[u] == h
@pytest.mark.parametrize('desired_size',
[1_000_000_000_000_000_000, 1 * _u.Ebyte])
def test_free_space_checker_huge(tmpdir, desired_size):
with pytest.raises(OSError):
check_free_space_in_dir(str(tmpdir), desired_size)
def test_get_free_space_file_directory(tmpdir):
fn = tmpdir / "file"
with open(fn, "w"):
pass
with pytest.raises(OSError):
get_free_space_in_dir(str(fn))
free_space = get_free_space_in_dir(str(tmpdir))
assert free_space > 0 and not hasattr(free_space, 'unit')
# TODO: If unit=True starts to auto-guess prefix, this needs updating.
free_space = get_free_space_in_dir(str(tmpdir), unit=True)
assert free_space > 0 and free_space.unit == _u.byte
free_space = get_free_space_in_dir(str(tmpdir), unit=_u.Mbit)
assert free_space > 0 and free_space.unit == _u.Mbit
def test_download_file_bogus_settings(invalid_urls, temp_cache):
u = next(invalid_urls)
with pytest.raises(KeyError):
download_file(u, sources=[])
def test_download_file_local_directory(tmpdir):
"""Make sure we get a URLError rather than OSError even if it's a
local directory."""
with pytest.raises(urllib.request.URLError):
download_file(url_to(tmpdir))
def test_download_file_schedules_deletion(valid_urls):
u, c = next(valid_urls)
f = download_file(u)
assert f in _tempfilestodel
# how to test deletion actually occurs?
def test_clear_download_cache_refuses_to_delete_outside_the_cache(tmpdir):
fn = os.path.abspath(os.path.join(tmpdir, "file"))
with open(fn, "w") as f:
f.write("content")
assert os.path.exists(fn)
with pytest.raises(RuntimeError):
clear_download_cache(fn)
assert os.path.exists(fn)
def test_check_download_cache_finds_bogus_entries(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf = os.path.abspath(os.path.join(dldir, "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_finds_bogus_subentries(temp_cache, valid_urls):
u, c = next(valid_urls)
f = download_file(u, cache=True)
bf = os.path.abspath(os.path.join(os.path.dirname(f), "bogus"))
with open(bf, "wt") as f:
f.write("bogus file that exists")
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert bf in e.value.bad_files
clear_download_cache()
def test_check_download_cache_cleanup(temp_cache, valid_urls):
u, c = next(valid_urls)
fn = download_file(u, cache=True)
dldir = _get_download_cache_loc()
bf1 = os.path.abspath(os.path.join(dldir, "bogus1"))
with open(bf1, "wt") as f:
f.write("bogus file that exists")
bf2 = os.path.abspath(os.path.join(os.path.dirname(fn), "bogus2"))
with open(bf2, "wt") as f:
f.write("other bogus file that exists")
bf3 = os.path.abspath(os.path.join(dldir, "contents"))
with open(bf3, "wt") as f:
f.write("awkwardly-named bogus file that exists")
u2, c2 = next(valid_urls)
f2 = download_file(u, cache=True)
os.unlink(f2)
bf4 = os.path.dirname(f2)
with pytest.raises(CacheDamaged) as e:
check_download_cache()
assert set(e.value.bad_files) == set([bf1, bf2, bf3, bf4])
for bf in e.value.bad_files:
clear_download_cache(bf)
# download cache will be checked on exit
def test_download_cache_update_doesnt_damage_cache(temp_cache, valid_urls):
u, _ = next(valid_urls)
download_file(u, cache=True)
download_file(u, cache="update")
def test_cache_dir_is_actually_a_file(tmpdir, valid_urls):
"""Ensure that bogus cache settings are handled sensibly.
Because the user can specify the cache location in a config file, and
because they might try to deduce the location by looking around at what's
in their directory tree, and because the cache directory is actual several
tree levels down from the directory set in the config file, it's important
to check what happens if each of the steps in the path is wrong somehow.
"""
def check_quietly_ignores_bogus_cache():
"""We want a broken cache to produce a warning but then astropy should
act like there isn't a cache.
"""
with pytest.warns(CacheMissingWarning):
assert not get_cached_urls()
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache("http://www.example.com/")
with pytest.warns(CacheMissingWarning):
assert not cache_contents()
with pytest.warns(CacheMissingWarning):
u, c = next(valid_urls)
r = download_file(u, cache=True)
assert get_file_contents(r) == c
# check the filename r appears in a warning message?
# check r is added to the delete_at_exit list?
# in fact should there be testing of the delete_at_exit mechanism,
# as far as that is possible?
with pytest.warns(CacheMissingWarning):
assert not is_url_in_cache(u)
with pytest.warns(CacheMissingWarning):
with pytest.raises(OSError):
check_download_cache()
dldir = _get_download_cache_loc()
# set_temp_cache acts weird if it is pointed at a file (see below)
# but we want to see what happens when the cache is pointed
# at a file instead of a directory, so make a directory we can
# replace later.
fn = str(tmpdir / "file")
ct = "contents\n"
os.mkdir(fn)
with paths.set_temp_cache(fn):
shutil.rmtree(fn)
with open(fn, "w") as f:
f.write(ct)
with pytest.raises(OSError):
paths.get_cache_dir()
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(fn) == ct, "File should not be harmed."
# See what happens when set_temp_cache is pointed at a file
with pytest.raises(OSError):
with paths.set_temp_cache(fn):
pass
assert dldir == _get_download_cache_loc()
assert get_file_contents(str(fn)) == ct
# Now the cache directory is normal but the subdirectory it wants
# to make is a file
cd = str(tmpdir / "astropy")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto one level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
# Ditto another level deeper
os.makedirs(cd)
cd = str(tmpdir / "astropy" / "download" / "url")
with open(cd, "w") as f:
f.write(ct)
with paths.set_temp_cache(tmpdir):
check_quietly_ignores_bogus_cache()
assert dldir == _get_download_cache_loc()
assert get_file_contents(cd) == ct
os.remove(cd)
def test_get_fileobj_str(a_file):
fn, c = a_file
with get_readable_fileobj(str(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_localpath(a_file):
fn, c = a_file
with get_readable_fileobj(py.path.local(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_pathlib(a_file):
fn, c = a_file
with get_readable_fileobj(pathlib.Path(fn)) as rf:
assert rf.read() == c
def test_get_fileobj_binary(a_binary_file):
fn, c = a_binary_file
with get_readable_fileobj(fn, encoding="binary") as rf:
assert rf.read() == c
def test_get_fileobj_already_open_text(a_file):
fn, c = a_file
with open(fn, "r") as f:
with get_readable_fileobj(f) as rf:
with pytest.raises(TypeError):
rf.read()
def test_get_fileobj_already_open_binary(a_file):
fn, c = a_file
with open(fn, "rb") as f:
with get_readable_fileobj(f) as rf:
assert rf.read() == c
def test_get_fileobj_binary_already_open_binary(a_binary_file):
fn, c = a_binary_file
with open(fn, "rb") as f:
with get_readable_fileobj(f, encoding="binary") as rf:
assert rf.read() == c
def test_cache_contents_not_writable(temp_cache, valid_urls):
c = cache_contents()
with pytest.raises(TypeError):
c["foo"] = 7
u, _ = next(valid_urls)
download_file(u, cache=True)
c = cache_contents()
assert u in c
with pytest.raises(TypeError):
c["foo"] = 7
def test_cache_relocatable(tmpdir, valid_urls):
u, c = next(valid_urls)
d1 = tmpdir / "1"
d2 = tmpdir / "2"
os.mkdir(d1)
with paths.set_temp_cache(d1):
p1 = download_file(u, cache=True)
assert is_url_in_cache(u)
assert get_file_contents(p1) == c
shutil.copytree(d1, d2)
clear_download_cache()
with paths.set_temp_cache(d2):
assert is_url_in_cache(u)
p2 = download_file(u, cache=True)
assert p1 != p2
assert os.path.exists(p2)
clear_download_cache(p2)
check_download_cache()
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = url_to(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, "tempdir", str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url) as f:
f.read()
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == (
"This file is used in the test_local_data_* testing functions\nCONTENT"
)
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding="binary") as fileobj:
with get_readable_fileobj(fileobj, encoding="UTF-8") as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
# assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
def test_download_file_wrong_size(monkeypatch):
@contextlib.contextmanager
def mockurl(remote_url, timeout=None):
yield MockURL()
def mockurl_builder(*args, tlscontext=None, **kwargs):
mock_opener = type('MockOpener', (object,), {})()
mock_opener.open = mockurl
return mock_opener
class MockURL:
def __init__(self):
self.reader = io.BytesIO(b"a" * real_length)
def info(self):
return {"Content-Length": str(report_length)}
def read(self, length=None):
return self.reader.read(length)
monkeypatch.setattr(astropy.utils.data, "_build_urlopener", mockurl_builder)
with pytest.raises(urllib.error.ContentTooShortError):
report_length = 1024
real_length = 1023
download_file(TESTURL, cache=False)
with pytest.raises(urllib.error.URLError):
report_length = 1023
real_length = 1024
download_file(TESTURL, cache=False)
report_length = 1023
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
report_length = None
real_length = 1023
fn = download_file(TESTURL, cache=False)
with open(fn, "rb") as f:
assert f.read() == b"a" * real_length
def test_can_make_directories_readonly(tmpdir):
try:
with readonly_dir(tmpdir):
assert is_dir_readonly(tmpdir)
except AssertionError:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip(
"We are root, we can't make a directory un-writable with chmod."
)
elif platform.system() == "Windows":
pytest.skip(
"It seems we can't make a driectory un-writable under Windows "
"with chmod, in spite of the documentation."
)
else:
raise
def test_can_make_files_readonly(tmpdir):
fn = tmpdir / "test"
c = "contents\n"
with open(fn, "w") as f:
f.write(c)
with readonly_dir(tmpdir):
try:
with open(fn, "w+") as f:
f.write("more contents\n")
except PermissionError:
pass
else:
if hasattr(os, "geteuid") and os.geteuid() == 0:
pytest.skip("We are root, we can't make a file un-writable with chmod.")
assert get_file_contents(fn) == c
def test_read_cache_readonly(readonly_cache):
assert cache_contents() == readonly_cache
def test_download_file_cache_readonly(readonly_cache):
for u in readonly_cache:
f = download_file(u, cache=True)
assert f == readonly_cache[u]
def test_import_file_cache_readonly(readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_readonly_cache_miss(readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert get_file_contents(f) == c
assert not is_url_in_cache(u)
def test_download_file_cache_readonly_update(readonly_cache):
for u in readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != readonly_cache[u]
assert compute_hash(f) == compute_hash(readonly_cache[u])
def test_check_download_cache_works_if_readonly(readonly_cache):
check_download_cache()
# On Windows I can't make directories readonly. On CircleCI I can't make
# anything readonly because the test suite runs as root. So on those platforms
# none of the "real" tests above can be run. I can use monkeypatch to trigger
# the readonly code paths, see the "fake" versions of the tests below, but I
# don't totally trust those to completely explore what happens either, so we
# have both. I couldn't see an easy way to parameterize over fixtures and share
# tests.
def test_read_cache_fake_readonly(fake_readonly_cache):
assert cache_contents() == fake_readonly_cache
def test_download_file_cache_fake_readonly(fake_readonly_cache):
for u in fake_readonly_cache:
f = download_file(u, cache=True)
assert f == fake_readonly_cache[u]
def test_mkdtemp_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
tempfile.mkdtemp()
def test_TD_cache_fake_readonly(fake_readonly_cache):
with pytest.raises(OSError):
with TemporaryDirectory():
pass
def test_import_file_cache_fake_readonly(fake_readonly_cache, tmpdir):
filename = os.path.join(tmpdir, "test-file")
content = "Some text or other"
url = "http://example.com/"
with open(filename, "wt") as f:
f.write(content)
with pytest.raises(OSError):
import_file_to_cache(url, filename, remove_original=True)
assert not is_url_in_cache(url)
def test_download_file_cache_fake_readonly_cache_miss(fake_readonly_cache, valid_urls):
u, c = next(valid_urls)
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache=True)
assert not is_url_in_cache(u)
assert get_file_contents(f) == c
def test_download_file_cache_fake_readonly_update(fake_readonly_cache):
for u in fake_readonly_cache:
with pytest.warns(CacheMissingWarning):
f = download_file(u, cache="update")
assert f != fake_readonly_cache[u]
assert compute_hash(f) == compute_hash(fake_readonly_cache[u])
def test_check_download_cache_works_if_fake_readonly(fake_readonly_cache):
check_download_cache()
def test_pkgname_isolation(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True, pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() < cache_total_size(pkgname=a)
for u, _ in islice(valid_urls, FEW+1):
download_file(u, cache=True)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW
assert cache_total_size() > cache_total_size(pkgname=a)
assert set(get_cached_urls()) == set(cache_contents().keys())
assert set(get_cached_urls(pkgname=a)) == set(cache_contents(pkgname=a).keys())
for i in get_cached_urls():
assert is_url_in_cache(i)
assert not is_url_in_cache(i, pkgname=a)
for i in get_cached_urls(pkgname=a):
assert not is_url_in_cache(i)
assert is_url_in_cache(i, pkgname=a)
# FIXME: need to break a cache to test whether we check the right one
check_download_cache()
check_download_cache(pkgname=a)
# FIXME: check that cache='update' works
u = get_cached_urls()[0]
with pytest.raises(KeyError):
download_file(u, cache=True, sources=[], pkgname=a)
clear_download_cache(u, pkgname=a)
assert len(get_cached_urls()) == FEW+1, "wrong pkgname should do nothing"
assert len(get_cached_urls(pkgname=a)) == FEW, "wrong pkgname should do nothing"
f = download_file(u, sources=[], cache=True)
with pytest.raises(RuntimeError):
clear_download_cache(f, pkgname=a)
ua = get_cached_urls(pkgname=a)[0]
with pytest.raises(KeyError):
download_file(ua, cache=True, sources=[])
fa = download_file(ua, sources=[], cache=True, pkgname=a)
with pytest.raises(RuntimeError):
clear_download_cache(fa)
clear_download_cache(ua, pkgname=a)
assert len(get_cached_urls()) == FEW+1
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(u)
assert len(get_cached_urls()) == FEW
assert len(get_cached_urls(pkgname=a)) == FEW-1
clear_download_cache(pkgname=a)
assert len(get_cached_urls()) == FEW
assert not get_cached_urls(pkgname=a)
clear_download_cache()
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
def test_transport_cache_via_zip(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
for u, _ in islice(valid_urls, FEW):
download_file(u, cache=True)
with io.BytesIO() as f:
export_download_cache(f)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f, pkgname=a)
check_download_cache()
check_download_cache(pkgname=a)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
cca = cache_contents(pkgname=a)
for k, v in cache_contents().items():
assert v != cca[k]
assert get_file_contents(v) == get_file_contents(cca[k])
clear_download_cache()
with io.BytesIO() as f:
export_download_cache(f, pkgname=a)
b = f.getvalue()
with io.BytesIO(b) as f:
import_download_cache(f)
assert set(get_cached_urls()) == set(get_cached_urls(pkgname=a))
def test_download_parallel_respects_pkgname(temp_cache, valid_urls):
a = "bogus_cache_name"
assert not get_cached_urls()
assert not get_cached_urls(pkgname=a)
download_files_in_parallel([u for (u, c) in islice(valid_urls, FEW)],
pkgname=a)
assert not get_cached_urls()
assert len(get_cached_urls(pkgname=a)) == FEW
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_removal_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
clear_download_cache(u)
assert not is_url_in_cache(u)
check_download_cache()
@pytest.mark.skipif(not CAN_RENAME_DIRECTORY_IN_USE,
reason="This platform is unable to rename directories that are in use.")
def test_update_of_open_files(temp_cache, valid_urls):
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert is_url_in_cache(u)
def test_removal_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
clear_download_cache(u)
def test_update_of_open_files_windows(temp_cache, valid_urls, monkeypatch):
def no_rmtree(*args, **kwargs):
warnings.warn(CacheMissingWarning("in use"))
raise PermissionError
if CAN_RENAME_DIRECTORY_IN_USE:
# This platform is able to remove files while in use.
monkeypatch.setattr(astropy.utils.data, "_rmtree", no_rmtree)
u, c = next(valid_urls)
with open(download_file(u, cache=True)):
u2, c2 = next(valid_urls)
with pytest.warns(CacheMissingWarning, match=r".*in use.*"):
f = download_file(u, cache='update', sources=[u2])
check_download_cache()
assert is_url_in_cache(u)
assert get_file_contents(f) == c2
assert get_file_contents(download_file(u, cache=True, sources=[])) == c
def test_no_allow_internet(temp_cache, valid_urls):
u, c = next(valid_urls)
with conf.set_temp('allow_internet', False):
with pytest.raises(urllib.error.URLError):
download_file(u)
assert not is_url_in_cache(u)
with pytest.raises(urllib.error.URLError):
# This will trigger the remote data error if it's allowed to touch the internet
download_file(TESTURL)
def test_clear_download_cache_not_too_aggressive(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file(u, cache=True)
dldir = _get_download_cache_loc()
bad_filename = os.path.join(dldir, "contents")
assert is_url_in_cache(u)
clear_download_cache(bad_filename)
assert is_url_in_cache(u)
def test_clear_download_cache_variants(temp_cache, valid_urls):
# deletion by contents filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(f)
assert not is_url_in_cache(u)
# deletion by url filename
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.join(os.path.dirname(f), 'url'))
assert not is_url_in_cache(u)
# deletion by hash directory name
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f))
assert not is_url_in_cache(u)
# deletion by directory name with trailing slash
u, c = next(valid_urls)
f = download_file(u, cache=True)
clear_download_cache(os.path.dirname(f)+'/')
assert not is_url_in_cache(u)
# deletion by hash of file contents
u, c = next(valid_urls)
f = download_file(u, cache=True)
h = compute_hash(f)
clear_download_cache(h)
assert not is_url_in_cache(u)
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_ftp_tls_auto(temp_cache):
url = "ftp://anonymous:mail%[email protected]/pub/products/iers/finals2000A.all" # noqa
download_file(url)
@pytest.mark.parametrize('base', ["http://example.com", "https://example.com"])
def test_url_trailing_slash(temp_cache, valid_urls, base):
slash = base + "/"
no_slash = base
u, c = next(valid_urls)
download_file(slash, cache=True, sources=[u])
assert is_url_in_cache(no_slash)
download_file(no_slash, cache=True, sources=[])
clear_download_cache(no_slash)
assert not is_url_in_cache(no_slash)
assert not is_url_in_cache(slash)
download_file(no_slash, cache=True, sources=[u])
# see if implicit check_download_cache squawks
def test_empty_url(temp_cache, valid_urls):
u, c = next(valid_urls)
download_file('file://', cache=True, sources=[u])
assert not is_url_in_cache('file:///')
@pytest.mark.remote_data
def test_download_ftp_file_properly_handles_socket_error():
faulty_url = "ftp://anonymous:mail%40astropy.org@nonexisting/pub/products/iers/finals2000A.all"
with pytest.raises(urllib.error.URLError) as excinfo:
download_file(faulty_url)
errmsg = excinfo.exconly()
found_msg = False
possible_msgs = ['Name or service not known',
'nodename nor servname provided, or not known',
'getaddrinfo failed',
'Temporary failure in name resolution',
'No address associated with hostname']
for cur_msg in possible_msgs:
if cur_msg in errmsg:
found_msg = True
break
assert found_msg, f'Got {errmsg}, expected one of these: {",".join(possible_msgs)}'
@pytest.mark.parametrize(
('s', 'ans'),
[('http://googlecom', True),
('https://google.com', True),
('ftp://google.com', True),
('sftp://google.com', True),
('ssh://google.com', True),
('file:///c:/path/to/the%20file.txt', True),
('google.com', False),
('C:\\\\path\\\\file.docx', False),
('data://file', False)])
def test_string_is_url_check(s, ans):
assert is_url(s) is ans
|
4adb06f561b78cbb286aaf9614ab45316d3387d183072825a105561f132f319a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
import warnings
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['asdf', 'asdf_astropy', 'bleach', 'bottleneck', 'bs4', 'bz2', 'h5py',
'html5lib', 'IPython', 'jplephem', 'lxml', 'matplotlib',
'mpmath', 'pandas', 'PIL', 'pytz', 'scipy', 'skyfield',
'sortedcontainers', 'lzma', 'pyarrow']
_formerly_optional_deps = ['yaml'] # for backward compatibility
_deps = {k.upper(): k for k in _optional_deps + _formerly_optional_deps}
# Any subpackages that have different import behavior:
_deps['PLT'] = 'matplotlib.pyplot'
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
if module_name == "YAML":
warnings.warn(
"PyYaml is now a strict dependency. HAS_YAML is deprecated as "
"of v5.0 and will be removed in a subsequent version.",
category=AstropyDeprecationWarning)
try:
importlib.import_module(_deps[module_name])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
de118a3ca7cb3d813cf02030f585a62da6414a50e68849d8d180c0588beecbb0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = ['NUMPY_LT_1_19', 'NUMPY_LT_1_19_1', 'NUMPY_LT_1_20',
'NUMPY_LT_1_21_1', 'NUMPY_LT_1_22', 'NUMPY_LT_1_22_1',
'NUMPY_LT_1_23']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_19 = not minversion(np, '1.19')
NUMPY_LT_1_19_1 = not minversion(np, '1.19.1')
NUMPY_LT_1_20 = not minversion(np, '1.20')
NUMPY_LT_1_21_1 = not minversion(np, '1.21.1')
NUMPY_LT_1_22 = not minversion(np, '1.22')
NUMPY_LT_1_22_1 = not minversion(np, '1.22.1')
NUMPY_LT_1_23 = not minversion(np, '1.23dev0')
|
ce0940e9bc5c72bcee320894ee8f4412b72c918a0c58d5f442610d714394479d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import warnings
from pathlib import Path
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.iers import iers
from astropy import units as u
from astropy.table import QTable
from astropy.time import Time, TimeDelta
CI = os.environ.get('CI', False)
FILE_NOT_FOUND_ERROR = getattr(__builtins__, 'FileNotFoundError', OSError)
try:
iers.IERS_A.open('finals2000A.all') # check if IERS_A is available
except OSError:
HAS_IERS_A = False
else:
HAS_IERS_A = True
IERS_A_EXCERPT = get_pkg_data_filename(os.path.join('data', 'iers_a_excerpt'))
def setup_module():
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail. Files to be downloaded are handled appropriately in the tests.
iers.conf.auto_download = True
def teardown_module():
# This setting is to be consistent with astropy/conftest.py
iers.conf.auto_download = False
class TestBasic():
"""Basic tests that IERS_B returns correct values"""
@pytest.mark.parametrize('iers_cls', (iers.IERS_B, iers.IERS))
def test_simple(self, iers_cls):
"""Test the default behaviour for IERS_B and IERS."""
# Arguably, IERS itself should not be used at all, but it used to
# provide IERS_B by default so we check that it continues to do so.
# Eventually, IERS should probably be deprecated.
iers_cls.close()
assert iers_cls.iers_table is None
iers_tab = iers_cls.open()
assert iers_cls.iers_table is not None
assert iers_cls.iers_table is iers_tab
assert isinstance(iers_tab, QTable)
assert isinstance(iers_tab, iers.IERS_B)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc = iers_tab.ut1_utc(jd1, jd2)
assert isinstance(ut1_utc, u.Quantity)
assert (ut1_utc.unit / u.second).is_unity()
# IERS files change at the 0.1 ms level; see gh-6981
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# should be future-proof; surely we've moved to another planet by then
with pytest.raises(IndexError):
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0.)
# also check it returns the right status
ut1_utc2, status2 = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status2 == iers.FROM_IERS_B)
ut1_utc4, status4 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status4 == iers.TIME_BEYOND_IERS_RANGE
# check it works via Time too
t = Time(jd1, jd2, format='jd', scale='utc')
ut1_utc3 = iers_tab.ut1_utc(t)
assert_quantity_allclose(ut1_utc3, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
def test_open_filename(self):
iers.IERS_B.close()
iers.IERS_B.open(iers.IERS_B_FILE)
assert iers.IERS_B.iers_table is not None
assert isinstance(iers.IERS_B.iers_table, QTable)
iers.IERS_B.close()
with pytest.raises(FILE_NOT_FOUND_ERROR):
iers.IERS_B.open('surely this does not exist')
def test_open_network_url(self):
iers.IERS_A.close()
iers.IERS_A.open(Path(IERS_A_EXCERPT).as_uri())
assert iers.IERS_A.iers_table is not None
assert isinstance(iers.IERS_A.iers_table, QTable)
iers.IERS_A.close()
class TestIERS_AExcerpt():
def test_simple(self):
# Test the IERS A reader. It is also a regression tests that ensures
# values do not get overridden by IERS B; see #4933.
iers_tab = iers.IERS_A.open(IERS_A_EXCERPT)
assert (iers_tab['UT1_UTC'].unit / u.second).is_unity()
assert 'P' in iers_tab['UT1Flag']
assert 'I' in iers_tab['UT1Flag']
assert 'B' in iers_tab['UT1Flag']
assert np.all((iers_tab['UT1Flag'] == 'I') |
(iers_tab['UT1Flag'] == 'P') |
(iers_tab['UT1Flag'] == 'B'))
assert (iers_tab['dX_2000A'].unit / u.marcsec).is_unity()
assert (iers_tab['dY_2000A'].unit / u.marcsec).is_unity()
assert 'P' in iers_tab['NutFlag']
assert 'I' in iers_tab['NutFlag']
assert 'B' in iers_tab['NutFlag']
assert np.all((iers_tab['NutFlag'] == 'P') |
(iers_tab['NutFlag'] == 'I') |
(iers_tab['NutFlag'] == 'B'))
assert (iers_tab['PM_x'].unit / u.arcsecond).is_unity()
assert (iers_tab['PM_y'].unit / u.arcsecond).is_unity()
assert 'P' in iers_tab['PolPMFlag']
assert 'I' in iers_tab['PolPMFlag']
assert 'B' in iers_tab['PolPMFlag']
assert np.all((iers_tab['PolPMFlag'] == 'P') |
(iers_tab['PolPMFlag'] == 'I') |
(iers_tab['PolPMFlag'] == 'B'))
t = Time([57053., 57054., 57055.], format='mjd')
ut1_utc, status = iers_tab.ut1_utc(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
assert_quantity_allclose(ut1_utc,
[-0.4916557, -0.4925323, -0.4934373] * u.s,
atol=0.1*u.ms)
dcip_x, dcip_y, status = iers_tab.dcip_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
# These values are *exactly* as given in the table, so they should
# match to double precision accuracy.
print(dcip_x)
print(dcip_y)
assert_quantity_allclose(dcip_x,
[-0.086, -0.093, -0.087] * u.marcsec,
atol=1.*u.narcsec)
assert_quantity_allclose(dcip_y,
[0.094, 0.081, 0.072] * u.marcsec,
atol=1*u.narcsec)
pm_x, pm_y, status = iers_tab.pm_xy(t, return_status=True)
assert status[0] == iers.FROM_IERS_B
assert np.all(status[1:] == iers.FROM_IERS_A)
assert_quantity_allclose(pm_x,
[0.003734, 0.004581, 0.004623] * u.arcsec,
atol=0.1*u.marcsec)
assert_quantity_allclose(pm_y,
[0.310824, 0.313150, 0.315517] * u.arcsec,
atol=0.1*u.marcsec)
# Table behaves properly as a table (e.g. can be sliced)
assert len(iers_tab[:2]) == 2
@pytest.mark.skipif('not HAS_IERS_A')
class TestIERS_A():
def test_simple(self):
"""Test that open() by default reads a 'finals2000A.all' file."""
# Ensure we remove any cached table (gh-5131).
iers.IERS_A.close()
iers_tab = iers.IERS_A.open()
jd1 = np.array([2456108.5, 2456108.5, 2456108.5, 2456109.5, 2456109.5])
jd2 = np.array([0.49999421, 0.99997685, 0.99998843, 0., 0.5])
ut1_utc, status = iers_tab.ut1_utc(jd1, jd2, return_status=True)
assert np.all(status == iers.FROM_IERS_B)
assert_quantity_allclose(ut1_utc, [-0.5868211, -0.5868184, -0.5868184,
0.4131816, 0.41328895] * u.s,
atol=0.1*u.ms)
ut1_utc2, status2 = iers_tab.ut1_utc(1e11, 0., return_status=True)
assert status2 == iers.TIME_BEYOND_IERS_RANGE
tnow = Time.now()
ut1_utc3, status3 = iers_tab.ut1_utc(tnow, return_status=True)
assert status3 == iers.FROM_IERS_A_PREDICTION
assert ut1_utc3 != 0.
class TestIERS_Auto():
def setup_class(self):
"""Set up useful data for the tests.
"""
self.N = 40
self.ame = 30.0
self.iers_a_file_1 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-02-30-test'))
self.iers_a_file_2 = get_pkg_data_filename(
os.path.join('data', 'finals2000A-2016-04-30-test'))
self.iers_a_url_1 = Path(self.iers_a_file_1).as_uri()
self.iers_a_url_2 = Path(self.iers_a_file_2).as_uri()
self.t = Time.now() + TimeDelta(10, format='jd') * np.arange(self.N)
def teardown_method(self, method):
"""Run this after every test.
"""
iers.IERS_Auto.close()
def test_interpolate_error_formatting(self):
"""Regression test: make sure the error message in
IERS_Auto._check_interpolate_indices() is formatted correctly.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('iers_auto_url_mirror', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', self.ame):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
with warnings.catch_warnings():
# Ignoring this if it comes up -- IERS_Auto predictive
# values are older than 30.0 days but downloading the
# latest table did not find newer values
warnings.simplefilter('ignore', iers.IERSStaleWarning)
iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == iers.INTERPOLATE_ERROR.format(self.ame)
def test_auto_max_age_none(self):
"""Make sure that iers.INTERPOLATE_ERROR's advice about setting
auto_max_age = None actually works.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', None):
iers_table = iers.IERS_Auto.open()
delta = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert isinstance(delta, np.ndarray)
assert delta.shape == (self.N,)
assert_quantity_allclose(delta, np.array([-0.2246227]*self.N)*u.s)
def test_auto_max_age_minimum(self):
"""Check that the minimum auto_max_age is enforced.
"""
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
with iers.conf.set_temp('auto_max_age', 5.0):
with pytest.raises(ValueError) as err:
iers_table = iers.IERS_Auto.open()
_ = iers_table.ut1_utc(self.t.jd1, self.t.jd2)
assert str(err.value) == 'IERS auto_max_age configuration value must be larger than 10 days'
def test_no_auto_download(self):
with iers.conf.set_temp('auto_download', False):
t = iers.IERS_Auto.open()
assert type(t) is iers.IERS_B
@pytest.mark.remote_data
def test_simple(self):
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_1):
dat = iers.IERS_Auto.open()
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == 57539.0 * u.d
# Pretend we are accessing at a time 7 days after start of predictive data
predictive_mjd = dat.meta['predictive_mjd']
dat._time_now = Time(predictive_mjd, format='mjd') + 7 * u.d
# Look at times before and after the test file begins. 0.1292905 is
# the IERS-B value from MJD=57359. The value in
# finals2000A-2016-02-30-test has been replaced at this point.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.2246227)
# Now pretend we are accessing at time 60 days after start of predictive data.
# There will be a warning when downloading the file doesn't give new data
# and an exception when extrapolating into the future with insufficient data.
dat._time_now = Time(predictive_mjd, format='mjd') + 60 * u.d
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto predictive '
'values are older') as warns, \
pytest.raises(ValueError, match='interpolating from IERS_Auto '
'using predictive values'):
dat.ut1_utc(Time(60000, format='mjd').jd)
assert len(warns) == 1
# Warning only if we are getting return status
with pytest.warns(iers.IERSStaleWarning, match='IERS_Auto '
'predictive values are older') as warns:
dat.ut1_utc(Time(60000, format='mjd').jd, return_status=True)
assert len(warns) == 1
# Now set auto_max_age = None which says that we don't care how old the
# available IERS-A file is. There should be no warnings or exceptions.
with iers.conf.set_temp('auto_max_age', None):
dat.ut1_utc(Time(60000, format='mjd').jd)
# Now point to a later file with same values but MJD increased by
# 60 days and see that things work. dat._time_now is still the same value
# as before, i.e. right around the start of predictive values for the new file.
# (In other words this is like downloading the latest file online right now).
with iers.conf.set_temp('iers_auto_url', self.iers_a_url_2):
# Look at times before and after the test file begins. This forces a new download.
assert np.allclose(dat.ut1_utc(Time(50000, format='mjd').jd).value, 0.1293286)
assert np.allclose(dat.ut1_utc(Time(60000, format='mjd').jd).value, -0.3)
# Now the time range should be different.
assert dat['MJD'][0] == 57359.0 * u.d
assert dat['MJD'][-1] == (57539.0 + 60) * u.d
@pytest.mark.remote_data
def test_IERS_B_parameters_loading_into_IERS_Auto():
A = iers.IERS_Auto.open()
B = iers.IERS_B.open()
ok_A = A["MJD"] <= B["MJD"][-1]
assert not np.all(ok_A), "IERS B covers all of IERS A: should not happen"
# We only overwrite IERS_B values in the IERS_A table that were already
# there in the first place. Better take that into account.
ok_A &= np.isfinite(A["UT1_UTC_B"])
i_B = np.searchsorted(B["MJD"], A["MJD"][ok_A])
assert np.all(np.diff(i_B) == 1), "Valid region not contiguous"
assert np.all(A["MJD"][ok_A] == B["MJD"][i_B])
# Check that values are copied correctly. Since units are not
# necessarily the same, we use allclose with very strict tolerance.
for name in ("UT1_UTC", "PM_x", "PM_y", "dX_2000A", "dY_2000A"):
assert_quantity_allclose(
A[name][ok_A], B[name][i_B], rtol=1e-15,
err_msg=("Bug #9206 IERS B parameter {} not copied over "
"correctly to IERS Auto".format(name)))
# Issue with FTP, rework test into previous one when it's fixed
@pytest.mark.skipif("CI", reason="Flaky on CI")
@pytest.mark.remote_data
def test_iers_a_dl():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.xfail(reason="https://github.com/astropy/astropy/issues/12998")
@pytest.mark.remote_data
def test_iers_a_dl_mirror():
iersa_tab = iers.IERS_A.open(iers.IERS_A_URL_MIRROR, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersa_tab) > 0
assert 'UT1_UTC_A' in iersa_tab.colnames
finally:
iers.IERS_A.close()
@pytest.mark.remote_data
def test_iers_b_dl():
iersb_tab = iers.IERS_B.open(iers.IERS_B_URL, cache=False)
try:
# some basic checks to ensure the format makes sense
assert len(iersb_tab) > 0
assert 'UT1_UTC' in iersb_tab.colnames
finally:
iers.IERS_B.close()
|
e10bcc2d11a083e52de75878820d167b7b5761b99917fcbb027c6c0569bdfe44 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from packaging.version import Version
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.stretch import LogStretch, PowerStretch, SqrtStretch
from astropy.utils.compat.optional_deps import HAS_MATPLOTLIB, HAS_PLT # noqa
if HAS_MATPLOTLIB:
import matplotlib
MATPLOTLIB_LT_32 = Version(matplotlib.__version__) < Version('3.2')
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
DATA3 = np.linspace(-3., 3., 7)
STRETCHES = (SqrtStretch(), PowerStretch(0.5), LogStretch())
INVALID = (None, -np.inf, -1)
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2., vmax=10., stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_call_clip(self):
"""Test that the clip keyword is used when calling the object."""
data = np.arange(5)
norm = ImageNormalize(vmin=1., vmax=3., clip=False)
output = norm(data, clip=True)
assert_equal(output.data, [0, 0, 0.5, 1.0, 1.0])
assert np.all(~output.mask)
output = norm(data, clip=False)
assert_equal(output.data, [-0.5, 0, 0.5, 1.0, 1.5])
assert np.all(~output.mask)
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False, invalid=None)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False,
invalid=None)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, 'linear', percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.parametrize('stretch', STRETCHES)
def test_invalid_keyword(self, stretch):
norm1 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=None)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False)
norm3 = ImageNormalize(DATA3, stretch=stretch, vmin=-1, vmax=1,
clip=False, invalid=-1.)
result1 = norm1(DATA3)
result2 = norm2(DATA3)
result3 = norm3(DATA3)
assert_equal(result1[0:2], (np.nan, np.nan))
assert_equal(result2[0:2], (-1., -1.))
assert_equal(result1[2:], result2[2:])
assert_equal(result2, result3)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm1 = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm1(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
@pytest.mark.parametrize('invalid', INVALID)
def test_sqrt_invalid_kw(self, invalid):
stretch = SqrtStretch()
norm1 = simple_norm(DATA3, stretch='sqrt', min_cut=-1, max_cut=1,
clip=False, invalid=invalid)
norm2 = ImageNormalize(stretch=stretch, vmin=-1, vmax=1, clip=False,
invalid=invalid)
assert_equal(norm1(DATA3), norm2(DATA3))
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1., clip=True)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99., clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5, clip=True)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_PLT')
def test_imshow_norm():
import matplotlib.pyplot as plt
image = np.random.randn(10, 10)
plt.clf()
ax = plt.subplot(label='test_imshow_norm')
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
plt.clf()
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# make sure the pyplot version works
plt.clf()
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close('all')
|
aa5ba5cba1faacc5a63f5d34df00533c5449678761b9e05f32a968a4ac4ea710 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from packaging.version import Version
import pytest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.contour import QuadContourSet
from astropy import units as u
from astropy.wcs import WCS
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcsapi import SlicedLowLevelWCS, HighLevelWCSWrapper
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.visualization.wcsaxes.frame import (
EllipticalFrame, RectangularFrame, RectangularFrame1D)
from astropy.visualization.wcsaxes.utils import get_coord_meta
from astropy.visualization.wcsaxes.transforms import CurvedTransform
ft_version = Version(matplotlib.ft2font.__freetype_version__)
FREETYPE_261 = ft_version == Version("2.6.1")
TEX_UNAVAILABLE = not matplotlib.checkdep_usetex(True)
MATPLOTLIB_DEV = Version(matplotlib.__version__).is_devrelease
def teardown_function(function):
plt.close('all')
def test_grid_regression(ignore_matplotlibrc):
# Regression test for a bug that meant that if the rc parameter
# axes.grid was set to True, WCSAxes would crash upon initialization.
plt.rc('axes', grid=True)
fig = plt.figure(figsize=(3, 3))
WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
def test_format_coord_regression(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that meant that if format_coord was called by
# Matplotlib before the axes were drawn, an error occurred.
fig = plt.figure(figsize=(3, 3))
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
assert ax.format_coord(10, 10) == ""
assert ax.coords[0].format_coord(10) == ""
assert ax.coords[1].format_coord(10) == ""
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(10, 10) == "10.0 10.0 (world)"
assert ax.coords[0].format_coord(10) == "10.0"
assert ax.coords[1].format_coord(10) == "10.0"
TARGET_HEADER = fits.Header.fromstring("""
NAXIS = 2
NAXIS1 = 200
NAXIS2 = 100
CTYPE1 = 'RA---MOL'
CRPIX1 = 500
CRVAL1 = 180.0
CDELT1 = -0.4
CUNIT1 = 'deg '
CTYPE2 = 'DEC--MOL'
CRPIX2 = 400
CRVAL2 = 0.0
CDELT2 = 0.4
CUNIT2 = 'deg '
COORDSYS= 'icrs '
""", sep='\n')
@pytest.mark.parametrize('grid_type', ['lines', 'contours'])
def test_no_numpy_warnings(ignore_matplotlibrc, tmpdir, grid_type):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.imshow(np.zeros((100, 200)))
ax.coords.grid(color='white', grid_type=grid_type)
# There should be no warnings raised if some pixels are outside WCS
# (since this is normal).
# BUT our own catch_warning was ignoring some warnings before, so now we
# have to catch it. Otherwise, the pytest filterwarnings=error
# setting in setup.cfg will fail this test.
# There are actually multiple warnings but they are all similar.
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message=r'.*converting a masked element to nan.*')
warnings.filterwarnings('ignore', message=r'.*No contour levels were found within the data range.*')
warnings.filterwarnings('ignore', message=r'.*np\.asscalar\(a\) is deprecated since NumPy v1\.16.*')
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN will be required.*')
fig.savefig(tmpdir.join('test.png').strpath)
def test_invalid_frame_overlay(ignore_matplotlibrc):
# Make sure a nice error is returned if a frame doesn't exist
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
with pytest.raises(ValueError) as exc:
ax.get_coords_overlay('banana')
assert exc.value.args[0] == 'Frame banana not found'
with pytest.raises(ValueError) as exc:
get_coord_meta('banana')
assert exc.value.args[0] == 'Unknown frame: banana'
def test_plot_coord_transform(ignore_matplotlibrc):
twoMASS_k_header = get_pkg_data_filename('data/2MASS_k_header')
twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(359.76045223*u.deg, 0.26876217*u.deg)
with pytest.raises(TypeError):
ax.plot_coord(c, 'o', transform=ax.get_transform('galactic'))
def test_set_label_properties(ignore_matplotlibrc):
# Regression test to make sure that arguments passed to
# set_xlabel/set_ylabel are passed to the underlying coordinate helpers
ax = plt.subplot(1, 1, 1, projection=WCS(TARGET_HEADER))
ax.set_xlabel('Test x label', labelpad=2, color='red')
ax.set_ylabel('Test y label', labelpad=3, color='green')
assert ax.coords[0].axislabels.get_text() == 'Test x label'
assert ax.coords[0].axislabels.get_minpad('b') == 2
assert ax.coords[0].axislabels.get_color() == 'red'
assert ax.coords[1].axislabels.get_text() == 'Test y label'
assert ax.coords[1].axislabels.get_minpad('l') == 3
assert ax.coords[1].axislabels.get_color() == 'green'
assert ax.get_xlabel() == 'Test x label'
assert ax.get_ylabel() == 'Test y label'
GAL_HEADER = fits.Header.fromstring("""
SIMPLE = T / conforms to FITS standard
BITPIX = -32 / array data type
NAXIS = 3 / number of array dimensions
NAXIS1 = 31
NAXIS2 = 2881
NAXIS3 = 480
EXTEND = T
CTYPE1 = 'DISTMOD '
CRVAL1 = 3.5
CDELT1 = 0.5
CRPIX1 = 1.0
CTYPE2 = 'GLON-CAR'
CRVAL2 = 180.0
CDELT2 = -0.125
CRPIX2 = 1.0
CTYPE3 = 'GLAT-CAR'
CRVAL3 = 0.0
CDELT3 = 0.125
CRPIX3 = 241.0
""", sep='\n')
def test_slicing_warnings(ignore_matplotlibrc, tmpdir):
# Regression test to make sure that no warnings are emitted by the tick
# locator for the sliced axis when slicing a cube.
# Scalar case
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
plt.savefig(tmpdir.join('test.png').strpath)
# Angle case
wcs3d = WCS(GAL_HEADER)
with warnings.catch_warnings():
# https://github.com/astropy/astropy/issues/9690
warnings.filterwarnings('ignore', message=r'.*PY_SSIZE_T_CLEAN.*')
plt.clf()
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 2))
plt.savefig(tmpdir.join('test.png').strpath)
def test_plt_xlabel_ylabel(tmpdir):
# Regression test for a bug that happened when using plt.xlabel
# and plt.ylabel with Matplotlib 3.0
plt.subplot(projection=WCS())
plt.xlabel('Galactic Longitude')
plt.ylabel('Galactic Latitude')
plt.savefig(tmpdir.join('test.png').strpath)
def test_grid_type_contours_transform(tmpdir):
# Regression test for a bug that caused grid_type='contours' to not work
# with custom transforms
class CustomTransform(CurvedTransform):
# We deliberately don't define the inverse, and has_inverse should
# default to False.
def transform(self, values):
return values * 1.3
transform = CustomTransform()
coord_meta = {'type': ('scalar', 'scalar'),
'unit': (u.m, u.s),
'wrap': (None, None),
'name': ('x', 'y')}
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8],
transform=transform, coord_meta=coord_meta)
fig.add_axes(ax)
ax.grid(grid_type='contours')
fig.savefig(tmpdir.join('test.png').strpath)
def test_plt_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# plt.imshow was called.
ax = plt.subplot(projection=WCS())
plt.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_ax_imshow_origin():
# Regression test for a bug that caused origin to be set to upper when
# ax.imshow was called with no origin
ax = plt.subplot(projection=WCS())
ax.imshow(np.ones((2, 2)))
assert ax.get_xlim() == (-0.5, 1.5)
assert ax.get_ylim() == (-0.5, 1.5)
def test_grid_contour_large_spacing(tmpdir):
# Regression test for a bug that caused a crash when grid was called and
# didn't produce grid lines (due e.g. to too large spacing) and was then
# called again.
filename = tmpdir.join('test.png').strpath
ax = plt.subplot(projection=WCS())
ax.set_xlim(-0.5, 1.5)
ax.set_ylim(-0.5, 1.5)
ax.coords[0].set_ticks(values=[] * u.one)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
ax.coords[0].grid(grid_type='contours')
plt.savefig(filename)
def test_contour_return():
# Regression test for a bug that caused contour and contourf to return None
# instead of the contour object.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
cset = ax.contour(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
cset = ax.contourf(np.arange(16).reshape(4, 4), transform=ax.get_transform('world'))
assert isinstance(cset, QuadContourSet)
def test_contour_empty():
# Regression test for a bug that caused contour to crash if no contours
# were present.
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
with pytest.warns(UserWarning, match='No contour levels were found within the data range'):
ax.contour(np.zeros((4, 4)), transform=ax.get_transform('world'))
def test_iterate_coords(ignore_matplotlibrc, tmpdir):
# Regression test for a bug that caused ax.coords to return too few axes
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
wcs3d.wcs.cunit = ['deg', 'deg', 'km/s']
wcs3d.wcs.crpix = [614.5, 856.5, 333]
wcs3d.wcs.cdelt = [6.25, 6.25, 23]
wcs3d.wcs.crval = [0., 0., 1.]
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
x, y, z = ax.coords
def test_invalid_slices_errors(ignore_matplotlibrc):
# Make sure that users get a clear message when specifying a WCS with
# >2 dimensions without giving the 'slices' argument, or if the 'slices'
# argument has too many/few elements.
wcs3d = WCS(naxis=3)
wcs3d.wcs.ctype = ['x', 'y', 'z']
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d)
assert exc.value.args[0] == ("WCS has more than 2 pixel dimensions, so "
"'slices' should be set")
with pytest.raises(ValueError) as exc:
plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1, 2))
assert exc.value.args[0] == ("'slices' should have as many elements as "
"WCS has pixel dimensions (should be 3)")
wcs2d = WCS(naxis=2)
wcs2d.wcs.ctype = ['x', 'y']
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d)
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('x', 'y'))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=('y', 'x'))
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=['x', 'y'])
assert ax.frame_class is RectangularFrame
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'x'))
assert ax.frame_class is RectangularFrame1D
wcs1d = WCS(naxis=1)
wcs1d.wcs.ctype = ['x']
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs1d)
assert ax.frame_class is RectangularFrame1D
with pytest.raises(ValueError):
plt.subplot(1, 1, 1, projection=wcs2d, slices=(1, 'y'))
EXPECTED_REPR_1 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None no
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
EXPECTED_REPR_2 = """
<CoordinatesMap with 3 world coordinates:
index aliases type unit wrap format_unit visible
----- ------------------------------ --------- ---- ---- ----------- -------
0 distmod dist scalar None yes
1 pos.galactic.lon glon-car glon longitude deg 360 deg yes
2 pos.galactic.lat glat-car glat latitude deg None deg yes
>
""".strip()
def test_repr(ignore_matplotlibrc):
# Unit test to make sure __repr__ looks as expected
wcs3d = WCS(GAL_HEADER)
# Cube header has world coordinates as distance, lon, lat, so start off
# by slicing in a way that we select just lon,lat:
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=(1, 'x', 'y'))
assert repr(ax.coords) == EXPECTED_REPR_1
# Now slice in a way that all world coordinates are still present:
plt.clf()
ax = plt.subplot(1, 1, 1, projection=wcs3d, slices=('x', 'y', 1))
assert repr(ax.coords) == EXPECTED_REPR_2
@pytest.fixture
def time_spectral_wcs_2d():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['FREQ', 'TIME']
wcs.wcs.set()
return wcs
def test_time_wcs(time_spectral_wcs_2d):
# Regression test for a bug that caused WCSAxes to error when using a WCS
# with a time axis.
plt.subplot(projection=time_spectral_wcs_2d)
@pytest.mark.skipif('TEX_UNAVAILABLE')
def test_simplify_labels_usetex(ignore_matplotlibrc, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/8004."""
plt.rc('text', usetex=True)
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---MOL',
'CTYPE2': 'DEC--MOL',
'RADESYS': 'ICRS'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=EllipticalFrame, projection=wcs))
ax.set_xlim(-0.5, header['NAXIS1'] - 0.5)
ax.set_ylim(-0.5, header['NAXIS2'] - 0.5)
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[0].set_ticks(spacing=45 * u.deg)
ax.coords[1].set_ticks(spacing=30 * u.deg)
ax.grid()
fig.savefig(tmpdir / 'plot.png')
@pytest.mark.parametrize('frame_class', [RectangularFrame, EllipticalFrame])
def test_set_labels_with_coords(ignore_matplotlibrc, frame_class):
"""Test if ``axis.set_xlabel()`` calls the correct ``coords[i]_set_axislabel()`` in a
WCS plot. Regression test for https://github.com/astropy/astropy/issues/10435.
"""
labels = ['RA', 'Declination']
header = {
'NAXIS': 2,
'NAXIS1': 360,
'NAXIS2': 180,
'CRPIX1': 180.5,
'CRPIX2': 90.5,
'CRVAL1': 180.0,
'CRVAL2': 0.0,
'CDELT1': -2 * np.sqrt(2) / np.pi,
'CDELT2': 2 * np.sqrt(2) / np.pi,
'CTYPE1': 'RA---AIT',
'CTYPE2': 'DEC--AIT'}
wcs = WCS(header)
fig, ax = plt.subplots(
subplot_kw=dict(frame_class=frame_class, projection=wcs))
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
assert ax.get_xlabel() == labels[0]
assert ax.get_ylabel() == labels[1]
for i in range(2):
assert ax.coords[i].get_axislabel() == labels[i]
@pytest.mark.parametrize('atol', [0.2, 1.0e-8])
def test_bbox_size(atol):
# Test for the size of a WCSAxes bbox (only have Matplotlib >= 3.0 now)
extents = [11.38888888888889, 3.5, 576.0, 432.0]
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8])
fig.add_axes(ax)
fig.canvas.draw()
renderer = fig.canvas.renderer
ax_bbox = ax.get_tightbbox(renderer)
# Enforce strict test only with reference Freetype version
if atol < 0.1 and not FREETYPE_261:
pytest.xfail("Exact BoundingBox dimensions are only ensured with FreeType 2.6.1")
assert np.allclose(ax_bbox.extents, extents, atol=atol)
def test_wcs_type_transform_regression():
wcs = WCS(TARGET_HEADER)
sliced_wcs = SlicedLowLevelWCS(wcs, np.s_[1:-1, 1:-1])
ax = plt.subplot(1, 1, 1, projection=wcs)
ax.get_transform(sliced_wcs)
high_wcs = HighLevelWCSWrapper(sliced_wcs)
ax.get_transform(sliced_wcs)
def test_multiple_draws_grid_contours(tmpdir):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=WCS())
ax.grid(color='black', grid_type='contours')
fig.savefig(tmpdir / 'plot.png')
fig.savefig(tmpdir / 'plot.png')
|
7a482f67415bfe8a6ee901d4ab975580d1379a938179051402bc8d644afbe8e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from textwrap import dedent
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D, IdentityTransform
from astropy.io import fits
from astropy import units as u
from astropy.wcs.wcsapi import BaseLowLevelWCS, SlicedLowLevelWCS
from astropy.coordinates import SkyCoord
from astropy.time import Time
from astropy.units import Quantity
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs import WCS
from astropy.visualization.wcsaxes.frame import RectangularFrame, RectangularFrame1D
from astropy.visualization.wcsaxes.wcsapi import (WCSWorld2PixelTransform,
transform_coord_meta_from_wcs,
apply_slices)
@pytest.fixture
def plt_close():
yield
plt.close('all')
WCS2D = WCS(naxis=2)
WCS2D.wcs.ctype = ['x', 'y']
WCS2D.wcs.cunit = ['km', 'km']
WCS2D.wcs.crpix = [614.5, 856.5]
WCS2D.wcs.cdelt = [6.25, 6.25]
WCS2D.wcs.crval = [0., 0.]
WCS3D = WCS(naxis=3)
WCS3D.wcs.ctype = ['x', 'y', 'z']
WCS3D.wcs.cunit = ['km', 'km', 'km']
WCS3D.wcs.crpix = [614.5, 856.5, 333]
WCS3D.wcs.cdelt = [6.25, 6.25, 23]
WCS3D.wcs.crval = [0., 0., 1.]
@pytest.fixture
def wcs_4d():
header = dedent("""\
WCSAXES = 4 / Number of coordinate axes
CRPIX1 = 0.0 / Pixel coordinate of reference point
CRPIX2 = 0.0 / Pixel coordinate of reference point
CRPIX3 = 0.0 / Pixel coordinate of reference point
CRPIX4 = 5.0 / Pixel coordinate of reference point
CDELT1 = 0.4 / [min] Coordinate increment at reference point
CDELT2 = 2E-11 / [m] Coordinate increment at reference point
CDELT3 = 0.0027777777777778 / [deg] Coordinate increment at reference point
CDELT4 = 0.0013888888888889 / [deg] Coordinate increment at reference point
CUNIT1 = 'min' / Units of coordinate increment and value
CUNIT2 = 'm' / Units of coordinate increment and value
CUNIT3 = 'deg' / Units of coordinate increment and value
CUNIT4 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'TIME' / Coordinate type code
CTYPE2 = 'WAVE' / Vacuum wavelength (linear)
CTYPE3 = 'HPLT-TAN' / Coordinate type codegnomonic projection
CTYPE4 = 'HPLN-TAN' / Coordinate type codegnomonic projection
CRVAL1 = 0.0 / [min] Coordinate value at reference point
CRVAL2 = 0.0 / [m] Coordinate value at reference point
CRVAL3 = 0.0 / [deg] Coordinate value at reference point
CRVAL4 = 0.0 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 0.0 / [deg] Native latitude of celestial pole
""")
return WCS(header=fits.Header.fromstring(header, sep='\n'))
@pytest.fixture
def cube_wcs():
cube_header = get_pkg_data_filename('data/cube_header')
header = fits.Header.fromtextfile(cube_header)
return WCS(header=header)
def test_shorthand_inversion():
"""
Test that the Matplotlib subtraction shorthand for composing and inverting
transformations works.
"""
w1 = WCS(naxis=2)
w1.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w1.wcs.crpix = [256.0, 256.0]
w1.wcs.cdelt = [-0.05, 0.05]
w1.wcs.crval = [120.0, -19.0]
w2 = WCS(naxis=2)
w2.wcs.ctype = ['RA---SIN', 'DEC--SIN']
w2.wcs.crpix = [256.0, 256.0]
w2.wcs.cdelt = [-0.05, 0.05]
w2.wcs.crval = [235.0, +23.7]
t1 = WCSWorld2PixelTransform(w1)
t2 = WCSWorld2PixelTransform(w2)
assert t1 - t2 == t1 + t2.inverted()
assert t1 - t2 != t2.inverted() + t1
assert t1 - t1 == IdentityTransform()
# We add Affine2D to catch the fact that in Matplotlib, having a Composite
# transform can end up in more strict requirements for the dimensionality.
def test_2d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS2D) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world, world_2)
def test_3d():
world = np.ones((10, 2))
w1 = WCSWorld2PixelTransform(WCS3D[:, 0, :]) + Affine2D()
pixel = w1.transform(world)
world_2 = w1.inverted().transform(pixel)
np.testing.assert_allclose(world[:, 0], world_2[:, 0])
np.testing.assert_allclose(world[:, 1], world_2[:, 1])
def test_coord_type_from_ctype(cube_wcs):
_, coord_meta = transform_coord_meta_from_wcs(cube_wcs, RectangularFrame,
slices=(50, 'y', 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ['l', 'r', 'b']
assert ticklabel_position == ['l', 'r', 'b']
assert ticks_position == ['l', 'r', 'b']
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['GLON-TAN', 'GLAT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cname = ['Longitude', '']
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['default_axis_label'] == ['Longitude', 'pos.galactic.lat']
assert coord_meta['name'] == [('pos.galactic.lon', 'glon-tan', 'glon', 'Longitude'),
('pos.galactic.lat', 'glat-tan', 'glat')]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HPLN-TAN', 'HPLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.arcsec, u.arcsec]
assert coord_meta['wrap'] == [180., None]
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame,
slices=('y', 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes should be swapped because of slices
assert axislabel_position == ['l', 'b']
assert ticklabel_position == ['l', 'b']
assert ticks_position == ['bltr', 'bltr']
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['HGLN-TAN', 'HGLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [180., None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['CRLN-TAN', 'CRLT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [360., None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.hourangle, u.deg]
assert coord_meta['wrap'] == [None, None]
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['spam', 'spam']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame)
assert coord_meta['type'] == ['scalar', 'scalar']
assert coord_meta['format_unit'] == [u.one, u.one]
assert coord_meta['wrap'] == [None, None]
def test_coord_type_1d_1d_wcs():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['WAVE']
wcs.wcs.crpix = [256.0]
wcs.wcs.cdelt = [-0.05]
wcs.wcs.crval = [50.0]
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D)
assert coord_meta['type'] == ['scalar']
assert coord_meta['format_unit'] == [u.m]
assert coord_meta['wrap'] == [None]
def test_coord_type_1d_2d_wcs_correlated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['GLON-TAN', 'GLAT-TAN']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D, slices=('x', 0))
assert coord_meta['type'] == ['longitude', 'latitude']
assert coord_meta['format_unit'] == [u.deg, u.deg]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['visible'] == [True, True]
def test_coord_type_1d_2d_wcs_uncorrelated():
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['WAVE', 'UTC']
wcs.wcs.crpix = [256.0] * 2
wcs.wcs.cdelt = [-0.05] * 2
wcs.wcs.crval = [50.0] * 2
wcs.wcs.cunit = ['nm', 's']
wcs.wcs.set()
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame1D, slices=('x', 0))
assert coord_meta['type'] == ['scalar', 'scalar']
assert coord_meta['format_unit'] == [u.m, u.s]
assert coord_meta['wrap'] == [None, None]
assert coord_meta['visible'] == [True, False]
def test_coord_meta_4d(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(wcs_4d, RectangularFrame, slices=(0, 0, 'x', 'y'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
assert axislabel_position == ['', '', 'b', 'l']
assert ticklabel_position == ['', '', 'b', 'l']
assert ticks_position == ['', '', 'bltr', 'bltr']
def test_coord_meta_4d_line_plot(wcs_4d):
_, coord_meta = transform_coord_meta_from_wcs(wcs_4d, RectangularFrame1D, slices=(0, 0, 0, 'x'))
axislabel_position = coord_meta['default_axislabel_position']
ticklabel_position = coord_meta['default_ticklabel_position']
ticks_position = coord_meta['default_ticks_position']
# These axes are swapped due to the pixel derivatives
assert axislabel_position == ['', '', 't', 'b']
assert ticklabel_position == ['', '', 't', 'b']
assert ticks_position == ['', '', 't', 'b']
@pytest.fixture
def sub_wcs(wcs_4d, wcs_slice):
return SlicedLowLevelWCS(wcs_4d, wcs_slice)
@pytest.mark.parametrize(("wcs_slice", "wcsaxes_slices", "world_map", "ndim"),
[
(np.s_[...], [0,0,'x','y'], (2, 3), 2),
(np.s_[...], [0,'x',0,'y'], (1, 2, 3), 3),
(np.s_[...], ['x',0,0,'y'], (0, 2, 3), 3),
(np.s_[...], ['x','y',0,0], (0, 1), 2),
(np.s_[:,:,0,:], [0, 'x', 'y'], (1, 2), 2),
(np.s_[:,:,0,:], ['x', 0, 'y'], (0, 1, 2), 3),
(np.s_[:,:,0,:], ['x', 'y', 0], (0, 1, 2), 3),
(np.s_[:,0,:,:], ['x', 'y', 0], (0, 1), 2),
])
def test_apply_slices(sub_wcs, wcs_slice, wcsaxes_slices, world_map, ndim):
transform_wcs, _, out_world_map = apply_slices(sub_wcs, wcsaxes_slices)
assert transform_wcs.world_n_dim == ndim
assert out_world_map == world_map
# parametrize here to pass to the fixture
@pytest.mark.parametrize("wcs_slice", [np.s_[:, :, 0, :]])
def test_sliced_ND_input(wcs_4d, sub_wcs, wcs_slice, plt_close):
slices_wcsaxes = [0, 'x', 'y']
for sub_wcs in (sub_wcs, SlicedLowLevelWCS(wcs_4d, wcs_slice)):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FutureWarning)
_, coord_meta = transform_coord_meta_from_wcs(sub_wcs, RectangularFrame, slices=slices_wcsaxes)
assert all(len(x) == 3 for x in coord_meta.values())
assert coord_meta['name'] == ['time',
('custom:pos.helioprojective.lat', 'hplt-tan', 'hplt'),
('custom:pos.helioprojective.lon', 'hpln-tan', 'hpln')]
assert coord_meta['type'] == ['scalar', 'latitude', 'longitude']
assert coord_meta['wrap'] == [None, None, 180.0]
assert coord_meta['unit'] == [u.Unit("min"), u.Unit("deg"), u.Unit("deg")]
assert coord_meta['visible'] == [False, True, True]
assert coord_meta['format_unit'] == [u.Unit("min"), u.Unit("arcsec"), u.Unit("arcsec")]
assert coord_meta['default_axislabel_position'] == ['', 'b', 'l']
assert coord_meta['default_ticklabel_position'] == ['', 'b', 'l']
assert coord_meta['default_ticks_position'] == ['', 'bltr', 'bltr']
# Validate the axes initialize correctly
plt.clf()
plt.subplot(projection=sub_wcs, slices=slices_wcsaxes)
class LowLevelWCS5D(BaseLowLevelWCS):
pixel_dim = 2
@property
def pixel_n_dim(self):
return self.pixel_dim
@property
def world_n_dim(self):
return 5
@property
def world_axis_physical_types(self):
return ['em.freq', 'time', 'pos.eq.ra', 'pos.eq.dec', 'phys.polarization.stokes']
@property
def world_axis_units(self):
return ['Hz', 'day', 'deg', 'deg', '']
@property
def world_axis_names(self):
return ['Frequency', '', 'RA', 'DEC', '']
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = (list(pixel_arrays) * 3)[:-1] # make list have 5 elements
return [np.asarray(pix) * scale for pix, scale in zip(pixel_arrays, [10, 0.2, 0.4, 0.39, 2])]
def world_to_pixel_values(self, *world_arrays):
world_arrays = world_arrays[:2] # make list have 2 elements
return [np.asarray(world) / scale for world, scale in zip(world_arrays, [10, 0.2])]
@property
def world_axis_object_components(self):
return [('freq', 0, 'value'),
('time', 0, 'mjd'),
('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree'),
('stokes', 0, 'value')]
@property
def world_axis_object_classes(self):
return {'celestial': (SkyCoord, (), {'unit': 'deg'}),
'time': (Time, (), {'format': 'mjd'}),
'freq': (Quantity, (), {'unit': 'Hz'}),
'stokes': (Quantity, (), {'unit': 'one'})}
def test_edge_axes():
# Check that axes on the edge of a spherical projection are shown properley
# (see https://github.com/astropy/astropy/issues/10441)
shape = [180, 360]
data = np.random.rand(*shape)
header = {'wcsaxes': 2,
'crpix1': 180.5, 'crpix2': 90.5,
'cdelt1': 1.0, 'cdelt2': 1.0,
'cunit1': 'deg', 'cunit2': 'deg',
'ctype1': 'CRLN-CAR', 'ctype2': 'CRLT-CAR',
'crval1': 0.0, 'crval2': 0.0,
'lonpole': 0.0, 'latpole': 90.0,
}
wcs = WCS(header)
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=wcs)
ax.imshow(data, origin='lower')
# By default the x- and y- axes should be drawn
lon = ax.coords[0]
lat = ax.coords[1]
fig.canvas.draw()
np.testing.assert_equal(lon.ticks.world['b'],
np.array([90.0, 180.0, 180.0, 270.0, 0.0]))
np.testing.assert_equal(lat.ticks.world['l'],
np.array([-90.0, -60.0, -30.0, 0.0, 30.0, 60.0, 90.0]))
def test_coord_meta_wcsapi():
wcs = LowLevelWCS5D()
wcs.pixel_dim = 5
_, coord_meta = transform_coord_meta_from_wcs(wcs, RectangularFrame, slices=[0, 0, 'x', 'y', 0])
assert coord_meta['name'] == [('em.freq', 'Frequency'), 'time', ('pos.eq.ra', 'RA'), ('pos.eq.dec', 'DEC'), 'phys.polarization.stokes']
assert coord_meta['type'] == ['scalar', 'scalar', 'longitude', 'latitude', 'scalar']
assert coord_meta['wrap'] == [None, None, None, None, None]
assert coord_meta['unit'] == [u.Unit("Hz"), u.Unit("d"), u.Unit("deg"), u.Unit("deg"), u.one]
assert coord_meta['visible'] == [True, True, True, True, True]
assert coord_meta['format_unit'] == [u.Unit("Hz"), u.Unit("d"), u.Unit("hourangle"), u.Unit("deg"), u.one]
assert coord_meta['default_axislabel_position'] == ['b', 'l', 't', 'r', '']
assert coord_meta['default_ticklabel_position'] == ['b', 'l', 't', 'r', '']
assert coord_meta['default_ticks_position'] == ['b', 'l', 't', 'r', '']
assert coord_meta['default_axis_label'] == ['Frequency', 'time', 'RA', 'DEC', 'phys.polarization.stokes']
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_wcsapi_5d_with_names(plt_close):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=LowLevelWCS5D())
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
return fig
|
01abd89ba092a0abe2840d538b146f300ab59108e2104d355964ae90b39cd732 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
from contextlib import nullcontext
from datetime import datetime
from packaging.version import Version
import pytest
import numpy as np
from numpy.testing import (
assert_allclose, assert_array_almost_equal, assert_array_almost_equal_nulp,
assert_array_equal)
from astropy import wcs
from astropy.wcs import _wcs # noqa
from astropy import units as u
from astropy.utils.data import (
get_pkg_data_filenames, get_pkg_data_contents, get_pkg_data_filename)
from astropy.utils.misc import NumpyRNGContext
from astropy.utils.exceptions import (
AstropyUserWarning, AstropyWarning, AstropyDeprecationWarning)
from astropy.tests.helper import assert_quantity_allclose
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.nddata import Cutout2D
_WCSLIB_VER = Version(_wcs.__version__)
# NOTE: User can choose to use system wcslib instead of bundled.
def ctx_for_v71_dateref_warnings():
if _WCSLIB_VER >= Version('7.1') and _WCSLIB_VER < Version('7.3'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set DATE-REF to '1858-11-17' from MJD-REF'\.")
else:
ctx = nullcontext()
return ctx
class TestMaps:
def setup(self):
# get the list of the hdr files that we want to test
self._file_list = list(get_pkg_data_filenames(
"data/maps", pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 28
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_maps(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "maps", filename), encoding='binary')
# finally run the test.
wcsobj = wcs.WCS(header)
world = wcsobj.wcs_pix2world([[97, 97]], 1)
assert_array_almost_equal(world, [[285.0, -66.25]], decimal=1)
pix = wcsobj.wcs_world2pix([[285.0, -66.25]], 1)
assert_array_almost_equal(pix, [[97, 97]], decimal=0)
class TestSpectra:
def setup(self):
self._file_list = list(get_pkg_data_filenames("data/spectra",
pattern="*.hdr"))
def test_consistency(self):
# Check to see that we actually have the list we expect, so that we
# do not get in a situation where the list is empty or incomplete and
# the tests still seem to pass correctly.
# how many do we expect to see?
n_data_files = 6
assert len(self._file_list) == n_data_files, (
"test_spectra has wrong number data files: found {}, expected "
" {}".format(len(self._file_list), n_data_files))
def test_spectra(self):
for filename in self._file_list:
# use the base name of the file, so we get more useful messages
# for failing tests.
filename = os.path.basename(filename)
# Now find the associated file in the installed wcs test directory.
header = get_pkg_data_contents(
os.path.join("data", "spectra", filename), encoding='binary')
# finally run the test.
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJD-OBS to 53925\.853472 from DATE-OBS'\.") # noqa
else:
ctx = nullcontext()
with ctx:
all_wcs = wcs.find_all_wcs(header)
assert len(all_wcs) == 9
def test_fixes():
"""
From github issue #36
"""
header = get_pkg_data_contents('data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header, translate_units='dhs')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(w) == 3
assert "'datfix' made the change 'Success'." in str(w.pop().message)
else:
assert len(w) == 2
first_wmsg = str(w[0].message)
assert 'unitfix' in first_wmsg and 'Hz' in first_wmsg and 'M/S' in first_wmsg
assert 'plane angle' in str(w[1].message) and 'm/s' in str(w[1].message)
# Ignore "PV2_2 = 0.209028857410973 invalid keyvalue" warning seen on Windows.
@pytest.mark.filterwarnings(r'ignore:PV2_2')
def test_outside_sky():
"""
From github issue #107
"""
header = get_pkg_data_contents(
'data/outside_sky.hdr', encoding='binary')
w = wcs.WCS(header)
assert np.all(np.isnan(w.wcs_pix2world([[100., 500.]], 0))) # outside sky
assert np.all(np.isnan(w.wcs_pix2world([[200., 200.]], 0))) # outside sky
assert not np.any(np.isnan(w.wcs_pix2world([[1000., 1000.]], 0)))
def test_pix2world():
"""
From github issue #1463
"""
# TODO: write this to test the expected output behavior of pix2world,
# currently this just makes sure it doesn't error out in unexpected ways
# (and compares `wcs.pc` and `result` values?)
filename = get_pkg_data_filename('data/sip2.fits')
with pytest.warns(wcs.FITSFixedWarning) as caught_warnings:
# this raises a warning unimportant for this testing the pix2world
# FITSFixedWarning(u'The WCS transformation has more axes (2) than
# the image it is associated with (0)')
ww = wcs.WCS(filename)
# might as well monitor for changing behavior
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
assert len(caught_warnings) == 2
else:
assert len(caught_warnings) == 1
n = 3
pixels = (np.arange(n) * np.ones((2, n))).T
result = ww.wcs_pix2world(pixels, 0, ra_dec_order=True)
# Catch #2791
ww.wcs_pix2world(pixels[..., 0], pixels[..., 1], 0, ra_dec_order=True)
# assuming that the data of sip2.fits doesn't change
answer = np.array([[0.00024976, 0.00023018],
[0.00023043, -0.00024997]])
assert np.allclose(ww.wcs.pc, answer, atol=1.e-8)
answer = np.array([[202.39265216, 47.17756518],
[202.39335826, 47.17754619],
[202.39406436, 47.1775272]])
assert np.allclose(result, answer, atol=1.e-8, rtol=1.e-10)
def test_load_fits_path():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(fits_name)
def test_dict_init():
"""
Test that WCS can be initialized with a dict-like object
"""
# Dictionary with no actual WCS, returns identity transform
with ctx_for_v71_dateref_warnings():
w = wcs.WCS({})
xp, yp = w.wcs_world2pix(41., 2., 1)
assert_array_almost_equal_nulp(xp, 41., 10)
assert_array_almost_equal_nulp(yp, 2., 10)
# Valid WCS
hdr = {
'CTYPE1': 'GLON-CAR',
'CTYPE2': 'GLAT-CAR',
'CUNIT1': 'deg',
'CUNIT2': 'deg',
'CRPIX1': 1,
'CRPIX2': 1,
'CRVAL1': 40.,
'CRVAL2': 0.,
'CDELT1': -0.1,
'CDELT2': 0.1
}
if _WCSLIB_VER >= Version('7.1'):
hdr['DATEREF'] = '1858-11-17'
if _WCSLIB_VER >= Version('7.4'):
ctx = pytest.warns(
wcs.wcs.FITSFixedWarning,
match=r"'datfix' made the change 'Set MJDREF to 0\.000000 from DATEREF'\.")
else:
ctx = nullcontext()
with ctx:
w = wcs.WCS(hdr)
xp, yp = w.wcs_world2pix(41., 2., 0)
assert_array_almost_equal_nulp(xp, -10., 10)
assert_array_almost_equal_nulp(yp, 20., 10)
def test_extra_kwarg():
"""
Issue #444
"""
w = wcs.WCS()
with NumpyRNGContext(123456789):
data = np.random.rand(100, 2)
with pytest.raises(TypeError):
w.wcs_pix2world(data, origin=1)
def test_3d_shapes():
"""
Issue #444
"""
w = wcs.WCS(naxis=3)
with NumpyRNGContext(123456789):
data = np.random.rand(100, 3)
result = w.wcs_pix2world(data, 1)
assert result.shape == (100, 3)
result = w.wcs_pix2world(
data[..., 0], data[..., 1], data[..., 2], 1)
assert len(result) == 3
def test_preserve_shape():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((2, 3, 4))
xw, yw = w.wcs_pix2world(x, y, 1)
assert xw.shape == (2, 3, 4)
assert yw.shape == (2, 3, 4)
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_broadcasting():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = 1
xp, yp = w.wcs_world2pix(x, y, 1)
assert xp.shape == (2, 3, 4)
assert yp.shape == (2, 3, 4)
def test_shape_mismatch():
w = wcs.WCS(naxis=2)
x = np.random.random((2, 3, 4))
y = np.random.random((3, 2, 4))
with pytest.raises(ValueError) as exc:
xw, yw = w.wcs_pix2world(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
with pytest.raises(ValueError) as exc:
xp, yp = w.wcs_world2pix(x, y, 1)
assert exc.value.args[0] == "Coordinate arrays are not broadcastable to each other"
# There are some ambiguities that need to be worked around when
# naxis == 1
w = wcs.WCS(naxis=1)
x = np.random.random((42, 1))
xw = w.wcs_pix2world(x, 1)
assert xw.shape == (42, 1)
x = np.random.random((42,))
xw, = w.wcs_pix2world(x, 1)
assert xw.shape == (42,)
def test_invalid_shape():
# Issue #1395
w = wcs.WCS(naxis=2)
xy = np.random.random((2, 3))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
xy = np.random.random((2, 1))
with pytest.raises(ValueError) as exc:
w.wcs_pix2world(xy, 1)
assert exc.value.args[0] == 'When providing two arguments, the array must be of shape (N, 2)'
def test_warning_about_defunct_keywords():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
if Version('7.4') <= _WCSLIB_VER < Version('7.6'):
n_warn = 5
else:
n_warn = 4
# Make sure the warnings come out every time...
for _ in range(2):
with pytest.warns(wcs.FITSFixedWarning) as w:
wcs.WCS(header)
assert len(w) == n_warn
# 7.4 adds a fifth warning "'datfix' made the change 'Success'."
for item in w[:4]:
assert 'PCi_ja' in str(item.message)
def test_warning_about_defunct_keywords_exception():
header = get_pkg_data_contents('data/defunct_keywords.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
wcs.WCS(header)
def test_to_header_string():
hdrstr = (
"WCSAXES = 2 / Number of coordinate axes ",
"CRPIX1 = 0.0 / Pixel coordinate of reference point ",
"CRPIX2 = 0.0 / Pixel coordinate of reference point ",
"CDELT1 = 1.0 / Coordinate increment at reference point ",
"CDELT2 = 1.0 / Coordinate increment at reference point ",
"CRVAL1 = 0.0 / Coordinate value at reference point ",
"CRVAL2 = 0.0 / Coordinate value at reference point ",
"LATPOLE = 90.0 / [deg] Native latitude of celestial pole ",
)
if _WCSLIB_VER >= Version('7.3'):
hdrstr += (
"MJDREF = 0.0 / [d] MJD of fiducial time ",
)
elif _WCSLIB_VER >= Version('7.1'):
hdrstr += (
"DATEREF = '1858-11-17' / ISO-8601 fiducial time ",
"MJDREFI = 0.0 / [d] MJD of fiducial time, integer part ",
"MJDREFF = 0.0 / [d] MJD of fiducial time, fractional part "
)
hdrstr += ("END", )
header_string = ''.join(hdrstr)
w = wcs.WCS()
h0 = fits.Header.fromstring(w.to_header_string().strip())
if 'COMMENT' in h0:
del h0['COMMENT']
if '' in h0:
del h0['']
h1 = fits.Header.fromstring(header_string.strip())
assert dict(h0) == dict(h1)
def test_to_fits():
nrec = 11 if _WCSLIB_VER >= Version('7.1') else 8
if _WCSLIB_VER < Version('7.1'):
nrec = 8
elif _WCSLIB_VER < Version('7.3'):
nrec = 11
else:
nrec = 9
w = wcs.WCS()
header_string = w.to_header()
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert header_string == wfits[0].header[-nrec:]
def test_to_header_warning():
fits_name = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
x = wcs.WCS(fits_name)
with pytest.warns(AstropyWarning, match='A_ORDER') as w:
x.to_header()
assert len(w) == 1
def test_no_comments_in_header():
w = wcs.WCS()
header = w.to_header()
assert w.wcs.alt not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
assert 'COMMENT' not in header
wkey = 'P'
header = w.to_header(key=wkey)
assert wkey not in header
assert 'COMMENT' not in header
assert 'COMMENT' + w.wcs.alt.strip() not in header
def test_find_all_wcs_crash():
"""
Causes a double free without a recent fix in wcslib_wrap.C
"""
with open(get_pkg_data_filename("data/too_many_pv.hdr")) as fd:
header = fd.read()
# We have to set fix=False here, because one of the fixing tasks is to
# remove redundant SCAMP distortion parameters when SIP distortion
# parameters are also present.
with pytest.raises(wcs.InvalidTransformError), pytest.warns(wcs.FITSFixedWarning):
wcs.find_all_wcs(header, fix=False)
# NOTE: Warning bubbles up from C layer during wcs.validate() and
# is hard to catch, so we just ignore it.
@pytest.mark.filterwarnings("ignore")
def test_validate():
results = wcs.validate(get_pkg_data_filename("data/validate.fits"))
results_txt = sorted(set([x.strip() for x in repr(results).splitlines()]))
if _WCSLIB_VER >= Version('7.6'):
filename = 'data/validate.7.6.txt'
elif _WCSLIB_VER >= Version('7.4'):
filename = 'data/validate.7.4.txt'
elif _WCSLIB_VER >= Version('6.0'):
filename = 'data/validate.6.txt'
elif _WCSLIB_VER >= Version('5.13'):
filename = 'data/validate.5.13.txt'
elif _WCSLIB_VER >= Version('5.0'):
filename = 'data/validate.5.0.txt'
else:
filename = 'data/validate.txt'
with open(get_pkg_data_filename(filename), "r") as fd:
lines = fd.readlines()
assert sorted(set([x.strip() for x in lines])) == results_txt
def test_validate_with_2_wcses():
# From Issue #2053
with pytest.warns(AstropyUserWarning):
results = wcs.validate(get_pkg_data_filename("data/2wcses.hdr"))
assert "WCS key 'A':" in str(results)
def test_crpix_maps_to_crval():
twcs = wcs.WCS(naxis=2)
twcs.wcs.crval = [251.29, 57.58]
twcs.wcs.cdelt = [1, 1]
twcs.wcs.crpix = [507, 507]
twcs.wcs.pc = np.array([[7.7e-6, 3.3e-5], [3.7e-5, -6.8e-6]])
twcs._naxis = [1014, 1014]
twcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[-2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
twcs.sip = wcs.Sip(a, b, None, None, twcs.wcs.crpix)
twcs.wcs.set()
pscale = np.sqrt(wcs.utils.proj_plane_pixel_area(twcs))
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.wcs_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
# test that CRPIX maps to CRVAL:
assert_allclose(
twcs.all_pix2world(*twcs.wcs.crpix, 1), twcs.wcs.crval,
rtol=0.0, atol=1e-6 * pscale
)
def test_all_world2pix(fname=None, ext=0,
tolerance=1.0e-4, origin=0,
random_npts=25000,
adaptive=False, maxiter=20,
detect_divergence=True):
"""Test all_world2pix, iterative inverse of all_pix2world"""
# Open test FITS file:
if fname is None:
fname = get_pkg_data_filename('data/j94f05bgq_flt.fits')
ext = ('SCI', 1)
if not os.path.isfile(fname):
raise OSError(f"Input file '{fname:s}' to 'test_all_world2pix' not found.")
h = fits.open(fname)
w = wcs.WCS(h[ext].header, h)
h.close()
del h
crpix = w.wcs.crpix
ncoord = crpix.shape[0]
# Assume that CRPIX is at the center of the image and that the image has
# a power-of-2 number of pixels along each axis. Only use the central
# 1/64 for this testing purpose:
naxesi_l = list((7. / 16 * crpix).astype(int))
naxesi_u = list((9. / 16 * crpix).astype(int))
# Generate integer indices of pixels (image grid):
img_pix = np.dstack([i.flatten() for i in
np.meshgrid(*map(range, naxesi_l, naxesi_u))])[0]
# Generage random data (in image coordinates):
with NumpyRNGContext(123456789):
rnd_pix = np.random.rand(random_npts, ncoord)
# Scale random data to cover the central part of the image
mwidth = 2 * (crpix * 1. / 8)
rnd_pix = crpix - 0.5 * mwidth + (mwidth - 1) * rnd_pix
# Reference pixel coordinates in image coordinate system (CS):
test_pix = np.append(img_pix, rnd_pix, axis=0)
# Reference pixel coordinates in sky CS using forward transformation:
all_world = w.all_pix2world(test_pix, origin)
try:
runtime_begin = datetime.now()
# Apply the inverse iterative process to pixels in world coordinates
# to recover the pixel coordinates in image space.
all_pix = w.all_world2pix(
all_world, origin, tolerance=tolerance, adaptive=adaptive,
maxiter=maxiter, detect_divergence=detect_divergence)
runtime_end = datetime.now()
except wcs.wcs.NoConvergence as e:
runtime_end = datetime.now()
ndiv = 0
if e.divergent is not None:
ndiv = e.divergent.shape[0]
print(f"There are {ndiv} diverging solutions.")
print(f"Indices of diverging solutions:\n{e.divergent}")
print(f"Diverging solutions:\n{e.best_solution[e.divergent]}\n")
print("Mean radius of the diverging solutions: {}"
.format(np.mean(
np.linalg.norm(e.best_solution[e.divergent], axis=1))))
print("Mean accuracy of the diverging solutions: {}\n"
.format(np.mean(
np.linalg.norm(e.accuracy[e.divergent], axis=1))))
else:
print("There are no diverging solutions.")
nslow = 0
if e.slow_conv is not None:
nslow = e.slow_conv.shape[0]
print(f"There are {nslow} slowly converging solutions.")
print(f"Indices of slowly converging solutions:\n{e.slow_conv}")
print(f"Slowly converging solutions:\n{e.best_solution[e.slow_conv]}\n")
else:
print("There are no slowly converging solutions.\n")
print("There are {} converged solutions."
.format(e.best_solution.shape[0] - ndiv - nslow))
print(f"Best solutions (all points):\n{e.best_solution}")
print(f"Accuracy:\n{e.accuracy}\n")
print("\nFinished running 'test_all_world2pix' with errors.\n"
"ERROR: {}\nRun time: {}\n"
.format(e.args[0], runtime_end - runtime_begin))
raise e
# Compute differences between reference pixel coordinates and
# pixel coordinates (in image space) recovered from reference
# pixels in world coordinates:
errors = np.sqrt(np.sum(np.power(all_pix - test_pix, 2), axis=1))
meanerr = np.mean(errors)
maxerr = np.amax(errors)
print("\nFinished running 'test_all_world2pix'.\n"
"Mean error = {:e} (Max error = {:e})\n"
"Run time: {}\n"
.format(meanerr, maxerr, runtime_end - runtime_begin))
assert(maxerr < 2.0 * tolerance)
def test_scamp_sip_distortion_parameters():
"""
Test parsing of WCS parameters with redundant SIP and SCAMP distortion
parameters.
"""
header = get_pkg_data_contents('data/validate.fits', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header)
# Just check that this doesn't raise an exception.
w.all_pix2world(0, 0, 0)
def test_fixes2():
"""
From github issue #1854
"""
header = get_pkg_data_contents(
'data/nonstandard_units.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
wcs.WCS(header, fix=False)
def test_unit_normalization():
"""
From github issue #1918
"""
header = get_pkg_data_contents(
'data/unit.hdr', encoding='binary')
w = wcs.WCS(header)
assert w.wcs.cunit[2] == 'm/s'
def test_footprint_to_file(tmpdir):
"""
From github issue #1912
"""
# Arbitrary keywords from real data
hdr = {'CTYPE1': 'RA---ZPN', 'CRUNIT1': 'deg',
'CRPIX1': -3.3495999e+02, 'CRVAL1': 3.185790700000e+02,
'CTYPE2': 'DEC--ZPN', 'CRUNIT2': 'deg',
'CRPIX2': 3.0453999e+03, 'CRVAL2': 4.388538000000e+01,
'PV2_1': 1., 'PV2_3': 220., 'NAXIS1': 2048, 'NAXIS2': 1024}
w = wcs.WCS(hdr)
testfile = str(tmpdir.join('test.txt'))
w.footprint_to_file(testfile)
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'ICRS\n'
assert 'color=green' in lines[3]
w.footprint_to_file(testfile, coordsys='FK5', color='red')
with open(testfile, 'r') as f:
lines = f.readlines()
assert len(lines) == 4
assert lines[2] == 'FK5\n'
assert 'color=red' in lines[3]
with pytest.raises(ValueError):
w.footprint_to_file(testfile, coordsys='FOO')
del hdr['NAXIS1']
del hdr['NAXIS2']
w = wcs.WCS(hdr)
with pytest.warns(AstropyUserWarning):
w.footprint_to_file(testfile)
# Ignore FITSFixedWarning about keyrecords following the END keyrecord were
# ignored, which comes from src/astropy_wcs.c . Only a blind catch like this
# seems to work when pytest warnings are turned into exceptions.
@pytest.mark.filterwarnings('ignore')
def test_validate_faulty_wcs():
"""
From github issue #2053
"""
h = fits.Header()
# Illegal WCS:
h['RADESYSA'] = 'ICRS'
h['PV2_1'] = 1.0
hdu = fits.PrimaryHDU([[0]], header=h)
hdulist = fits.HDUList([hdu])
# Check that this doesn't raise a NameError exception
wcs.validate(hdulist)
def test_error_message():
header = get_pkg_data_contents(
'data/invalid_header.hdr', encoding='binary')
with pytest.raises(wcs.InvalidTransformError):
# Both lines are in here, because 0.4 calls .set within WCS.__init__,
# whereas 0.3 and earlier did not.
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(header, _do_set=False)
w.all_pix2world([[536.0, 894.0]], 0)
def test_out_of_bounds():
# See #2107
header = get_pkg_data_contents('data/zpn-hole.hdr', encoding='binary')
w = wcs.WCS(header)
ra, dec = w.wcs_pix2world(110, 110, 0)
assert np.isnan(ra)
assert np.isnan(dec)
ra, dec = w.wcs_pix2world(0, 0, 0)
assert not np.isnan(ra)
assert not np.isnan(dec)
def test_calc_footprint_1():
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39314493, 47.17753352],
[202.71885939, 46.94630488],
[202.94631893, 47.15855022],
[202.72053428, 47.37893142]])
footprint = w.calc_footprint(axes=axes)
assert_allclose(footprint, ref)
def test_calc_footprint_2():
""" Test calc_footprint without distortion. """
fits = get_pkg_data_filename('data/sip.fits')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(fits)
axes = (1000, 1051)
ref = np.array([[202.39265216, 47.17756518],
[202.7469062, 46.91483312],
[203.11487481, 47.14359319],
[202.76092671, 47.40745948]])
footprint = w.calc_footprint(axes=axes, undistort=False)
assert_allclose(footprint, ref)
def test_calc_footprint_3():
""" Test calc_footprint with corner of the pixel."""
w = wcs.WCS()
w.wcs.ctype = ["GLON-CAR", "GLAT-CAR"]
w.wcs.crpix = [1.5, 5.5]
w.wcs.cdelt = [-0.1, 0.1]
axes = (2, 10)
ref = np.array([[0.1, -0.5],
[0.1, 0.5],
[359.9, 0.5],
[359.9, -0.5]])
footprint = w.calc_footprint(axes=axes, undistort=False, center=False)
assert_allclose(footprint, ref)
def test_sip():
# See #2107
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
w = wcs.WCS(header)
x0, y0 = w.sip_pix2foc(200, 200, 0)
assert_allclose(72, x0, 1e-3)
assert_allclose(72, y0, 1e-3)
x1, y1 = w.sip_foc2pix(x0, y0, 0)
assert_allclose(200, x1, 1e-3)
assert_allclose(200, y1, 1e-3)
def test_sub_3d_with_sip():
# See #10527
header = get_pkg_data_contents('data/irac_sip.hdr', encoding='binary')
header = fits.Header.fromstring(header)
header['NAXIS'] = 3
header.set('NAXIS3', 64, after=header.index('NAXIS2'))
w = wcs.WCS(header, naxis=2)
assert w.naxis == 2
def test_printwcs(capsys):
"""
Just make sure that it runs
"""
h = get_pkg_data_contents(
'data/spectra/orion-freq-1.hdr', encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
h = get_pkg_data_contents('data/3d_cd.hdr', encoding='binary')
w = wcs.WCS(h)
w.printwcs()
captured = capsys.readouterr()
assert 'WCS Keywords' in captured.out
def test_invalid_spherical():
header = """
SIMPLE = T / conforms to FITS standard
BITPIX = 8 / array data type
WCSAXES = 2 / no comment
CTYPE1 = 'RA---TAN' / TAN (gnomic) projection
CTYPE2 = 'DEC--TAN' / TAN (gnomic) projection
EQUINOX = 2000.0 / Equatorial coordinates definition (yr)
LONPOLE = 180.0 / no comment
LATPOLE = 0.0 / no comment
CRVAL1 = 16.0531567459 / RA of reference point
CRVAL2 = 23.1148929108 / DEC of reference point
CRPIX1 = 2129 / X reference pixel
CRPIX2 = 1417 / Y reference pixel
CUNIT1 = 'deg ' / X pixel scale units
CUNIT2 = 'deg ' / Y pixel scale units
CD1_1 = -0.00912247310646 / Transformation matrix
CD1_2 = -0.00250608809647 / no comment
CD2_1 = 0.00250608809647 / no comment
CD2_2 = -0.00912247310646 / no comment
IMAGEW = 4256 / Image width, in pixels.
IMAGEH = 2832 / Image height, in pixels.
"""
f = io.StringIO(header)
header = fits.Header.fromtextfile(f)
w = wcs.WCS(header)
x, y = w.wcs_world2pix(211, -26, 0)
assert np.isnan(x) and np.isnan(y)
def test_no_iteration():
# Regression test for #3066
w = wcs.WCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'WCS' object is not iterable"
class NewWCS(wcs.WCS):
pass
w = NewWCS(naxis=2)
with pytest.raises(TypeError) as exc:
iter(w)
assert exc.value.args[0] == "'NewWCS' object is not iterable"
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_sip_tpv_agreement():
sip_header = get_pkg_data_contents(
os.path.join("data", "siponly.hdr"), encoding='binary')
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_sip = wcs.WCS(sip_header)
w_tpv = wcs.WCS(tpv_header)
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv.all_pix2world([w_tpv.wcs.crpix], 1))
w_sip2 = wcs.WCS(w_sip.to_header())
w_tpv2 = wcs.WCS(w_tpv.to_header())
assert_array_almost_equal(
w_sip.all_pix2world([w_sip.wcs.crpix], 1),
w_sip2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_tpv.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_sip.wcs.crpix], 1))
assert_array_almost_equal(
w_sip2.all_pix2world([w_sip.wcs.crpix], 1),
w_tpv2.all_pix2world([w_tpv.wcs.crpix], 1))
@pytest.mark.skipif('_wcs.__version__[0] < "5"',
reason="TPV only works with wcslib 5.x or later")
def test_tpv_copy():
# See #3904
tpv_header = get_pkg_data_contents(
os.path.join("data", "tpvonly.hdr"), encoding='binary')
with pytest.warns(wcs.FITSFixedWarning):
w_tpv = wcs.WCS(tpv_header)
ra, dec = w_tpv.wcs_pix2world([0, 100, 200], [0, -100, 200], 0)
assert ra[0] != ra[1] and ra[1] != ra[2]
assert dec[0] != dec[1] and dec[1] != dec[2]
def test_hst_wcs():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
with fits.open(path) as hdulist:
# wcslib will complain about the distortion parameters if they
# weren't correctly deleted from the header
w = wcs.WCS(hdulist[1].header, hdulist)
# Check pixel scale and area
assert_quantity_allclose(
w.proj_plane_pixel_scales(), [1.38484378e-05, 1.39758488e-05] * u.deg)
assert_quantity_allclose(
w.proj_plane_pixel_area(), 1.93085492e-10 * (u.deg * u.deg))
# Exercise the main transformation functions, mainly just for
# coverage
w.p4_pix2foc([0, 100, 200], [0, -100, 200], 0)
w.det2im([0, 100, 200], [0, -100, 200], 0)
w.cpdis1 = w.cpdis1
w.cpdis2 = w.cpdis2
w.det2im1 = w.det2im1
w.det2im2 = w.det2im2
w.sip = w.sip
w.cpdis1.cdelt = w.cpdis1.cdelt
w.cpdis1.crpix = w.cpdis1.crpix
w.cpdis1.crval = w.cpdis1.crval
w.cpdis1.data = w.cpdis1.data
assert w.sip.a_order == 4
assert w.sip.b_order == 4
assert w.sip.ap_order == 0
assert w.sip.bp_order == 0
assert_array_equal(w.sip.crpix, [2048., 1024.])
wcs.WCS(hdulist[1].header, hdulist)
def test_cpdis_comments():
path = get_pkg_data_filename("data/dist_lookup.fits.gz")
f = fits.open(path)
w = wcs.WCS(f[1].header, f)
hdr = w.to_fits()[0].header
f.close()
wcscards = list(hdr['CPDIS*'].cards) + list(hdr['DP*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('CPDIS1', 'LOOKUP', 'Prior distortion function type'),
('DP1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('DP1.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP1.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
('CPDIS2', 'LOOKUP', 'Prior distortion function type'),
('DP2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('DP2.NAXES', 2.0, 'Number of independent variables in CPDIS function'),
('DP2.AXIS.1', 1.0, 'Axis number of the 1st variable in a CPDIS function'),
('DP2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a CPDIS function'),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_d2im_comments():
path = get_pkg_data_filename("data/ie6d07ujq_wcs.fits")
f = fits.open(path)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header, f)
f.close()
wcscards = list(w.to_fits()[0].header['D2IM*'].cards)
wcsdict = {k: (v, c) for k, v, c in wcscards}
refcards = [
('D2IMDIS1', 'LOOKUP', 'Detector to image correction type'),
('D2IM1.EXTVER', 1.0, 'Version number of WCSDVARR extension'),
('D2IM1.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM1.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM1.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
('D2IMDIS2', 'LOOKUP', 'Detector to image correction type'),
('D2IM2.EXTVER', 2.0, 'Version number of WCSDVARR extension'),
('D2IM2.NAXES', 2.0, 'Number of independent variables in D2IM function'),
('D2IM2.AXIS.1', 1.0, 'Axis number of the 1st variable in a D2IM function'),
('D2IM2.AXIS.2', 2.0, 'Axis number of the 2nd variable in a D2IM function'),
# ('D2IMERR1', 0.049, 'Maximum error of D2IM correction for axis 1'),
# ('D2IMERR2', 0.035, 'Maximum error of D2IM correction for axis 2'),
# ('D2IMEXT', 'iref$y7b1516hi_d2i.fits', ''),
]
assert len(wcsdict) == len(refcards)
for k, v, c in refcards:
assert wcsdict[k] == (v, c)
def test_sip_broken():
# This header caused wcslib to segfault because it has a SIP
# specification in a non-default keyword
hdr = get_pkg_data_contents("data/sip-broken.hdr")
wcs.WCS(hdr)
def test_no_truncate_crval():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_try2():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [50, 50, 2.12345678e11]
w.wcs.cdelt = [1e-5, 1e-5, 1e5]
w.wcs.ctype = ['RA---SIN', 'DEC--SIN', 'FREQ']
w.wcs.cunit = ['deg', 'deg', 'Hz']
w.wcs.crpix = [1, 1, 1]
w.wcs.restfrq = 2.34e11
w.wcs.set()
header = w.to_header()
for ii in range(3):
assert header[f'CRVAL{ii + 1}'] == w.wcs.crval[ii]
assert header[f'CDELT{ii + 1}'] == w.wcs.cdelt[ii]
def test_no_truncate_crval_p17():
"""
Regression test for https://github.com/astropy/astropy/issues/5162
"""
w = wcs.WCS(naxis=2)
w.wcs.crval = [50.1234567890123456, 50.1234567890123456]
w.wcs.cdelt = [1e-3, 1e-3]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.set()
header = w.to_header()
assert header['CRVAL1'] != w.wcs.crval[0]
assert header['CRVAL2'] != w.wcs.crval[1]
header = w.to_header(relax=wcs.WCSHDO_P17)
assert header['CRVAL1'] == w.wcs.crval[0]
assert header['CRVAL2'] == w.wcs.crval[1]
def test_no_truncate_using_compare():
"""
Regression test for https://github.com/astropy/astropy/issues/4612
This one uses WCS.wcs.compare and some slightly different values
"""
w = wcs.WCS(naxis=3)
w.wcs.crval = [2.409303333333E+02, 50, 2.12345678e11]
w.wcs.cdelt = [1e-3, 1e-3, 1e8]
w.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'FREQ']
w.wcs.set()
w2 = wcs.WCS(w.to_header())
w.wcs.compare(w2.wcs)
def test_passing_ImageHDU():
"""
Passing ImageHDU or PrimaryHDU and comparing it with
wcs initialized from header. For #4493.
"""
path = get_pkg_data_filename('data/validate.fits')
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
wcs_hdu = wcs.WCS(hdulist[0])
wcs_header = wcs.WCS(hdulist[0].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
wcs_hdu = wcs.WCS(hdulist[1])
wcs_header = wcs.WCS(hdulist[1].header)
assert wcs_hdu.wcs.compare(wcs_header.wcs)
def test_inconsistent_sip():
"""
Test for #4814
"""
hdr = get_pkg_data_contents("data/sip-broken.hdr")
ctx = ctx_for_v71_dateref_warnings()
with ctx:
w = wcs.WCS(hdr)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(relax=None)
# CTYPE should not include "-SIP" if relax is None
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
newhdr = w.to_header(relax=False)
assert 'A_0_2' not in newhdr
# CTYPE should not include "-SIP" if relax is False
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key="C")
assert 'A_0_2' not in newhdr
# Test writing header with a different key
with ctx:
wnew = wcs.WCS(newhdr, key='C')
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
with pytest.warns(AstropyWarning):
newhdr = w.to_header(key=" ")
# Test writing a primary WCS to header
with ctx:
wnew = wcs.WCS(newhdr)
assert all(not ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
# Test that "-SIP" is kept into CTYPE if relax=True and
# "-SIP" was in the original header
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
assert 'A_0_2' in newhdr
# Test that SIP coefficients are also written out.
assert wnew.sip is not None
# ######### broken header ###########
# Test that "-SIP" is added to CTYPE if relax=True and
# "-SIP" was not in the original header but SIP coefficients
# are present.
with ctx:
w = wcs.WCS(hdr)
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
newhdr = w.to_header(relax=True)
with ctx:
wnew = wcs.WCS(newhdr)
assert all(ctyp.endswith('-SIP') for ctyp in wnew.wcs.ctype)
def test_bounds_check():
"""Test for #4957"""
w = wcs.WCS(naxis=2)
w.wcs.ctype = ["RA---CAR", "DEC--CAR"]
w.wcs.cdelt = [10, 10]
w.wcs.crval = [-90, 90]
w.wcs.crpix = [1, 1]
w.wcs.bounds_check(False, False)
ra, dec = w.wcs_pix2world(300, 0, 0)
assert_allclose(ra, -180)
assert_allclose(dec, -30)
def test_naxis():
w = wcs.WCS(naxis=2)
w.wcs.crval = [1, 1]
w.wcs.cdelt = [0.1, 0.1]
w.wcs.crpix = [1, 1]
w._naxis = [1000, 500]
assert w.pixel_shape == (1000, 500)
assert w.array_shape == (500, 1000)
w.pixel_shape = (99, 59)
assert w._naxis == [99, 59]
w.array_shape = (45, 23)
assert w._naxis == [23, 45]
assert w.pixel_shape == (23, 45)
w.pixel_shape = None
assert w.pixel_bounds is None
def test_sip_with_altkey():
"""
Test that when creating a WCS object using a key, CTYPE with
that key is looked at and not the primary CTYPE.
fix for #5443.
"""
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
# create a header with two WCSs.
h1 = w.to_header(relax=True, key='A')
h2 = w.to_header(relax=False)
h1['CTYPE1A'] = "RA---SIN-SIP"
h1['CTYPE2A'] = "DEC--SIN-SIP"
h1.update(h2)
with ctx_for_v71_dateref_warnings():
w = wcs.WCS(h1, key='A')
assert (w.wcs.ctype == np.array(['RA---SIN-SIP', 'DEC--SIN-SIP'])).all()
def test_to_fits_1():
"""
Test to_fits() with LookupTable distortion.
"""
fits_name = get_pkg_data_filename('data/dist.fits')
with pytest.warns(AstropyDeprecationWarning):
w = wcs.WCS(fits_name)
wfits = w.to_fits()
assert isinstance(wfits, fits.HDUList)
assert isinstance(wfits[0], fits.PrimaryHDU)
assert isinstance(wfits[1], fits.ImageHDU)
def test_keyedsip():
"""
Test sip reading with extra key.
"""
hdr_name = get_pkg_data_filename('data/sip-broken.hdr')
header = fits.Header.fromfile(hdr_name)
del header["CRPIX1"]
del header["CRPIX2"]
w = wcs.WCS(header=header, key="A")
assert isinstance(w.sip, wcs.Sip)
assert w.sip.crpix[0] == 2048
assert w.sip.crpix[1] == 1026
def test_zero_size_input():
with fits.open(get_pkg_data_filename('data/sip.fits')) as f:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(f[0].header)
inp = np.zeros((0, 2))
assert_array_equal(inp, w.all_pix2world(inp, 0))
assert_array_equal(inp, w.all_world2pix(inp, 0))
inp = [], [1]
result = w.all_pix2world([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
result = w.all_world2pix([], [1], 0)
assert_array_equal(inp[0], result[0])
assert_array_equal(inp[1], result[1])
def test_scalar_inputs():
"""
Issue #7845
"""
wcsobj = wcs.WCS(naxis=1)
result = wcsobj.all_pix2world(2, 1)
assert_array_equal(result, [np.array(2.)])
assert result[0].shape == ()
result = wcsobj.all_pix2world([2], 1)
assert_array_equal(result, [np.array([2.])])
assert result[0].shape == (1,)
# Ignore RuntimeWarning raised on s390.
@pytest.mark.filterwarnings('ignore:.*invalid value encountered in.*')
def test_footprint_contains():
"""
Test WCS.footprint_contains(skycoord)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
""" # noqa
header = fits.Header.fromstring(header.strip(), '\n')
test_wcs = wcs.WCS(header)
hasCoord = test_wcs.footprint_contains(SkyCoord(254, 2, unit='deg'))
assert hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(240, 2, unit='deg'))
assert not hasCoord
hasCoord = test_wcs.footprint_contains(SkyCoord(24, 2, unit='deg'))
assert not hasCoord
def test_cunit():
# Initializing WCS
w1 = wcs.WCS(naxis=2)
w2 = wcs.WCS(naxis=2)
w3 = wcs.WCS(naxis=2)
w4 = wcs.WCS(naxis=2)
# Initializing the values of cunit
w1.wcs.cunit = ['deg', 'm/s']
w2.wcs.cunit = ['km/h', 'km/h']
w3.wcs.cunit = ['deg', 'm/s']
w4.wcs.cunit = ['deg', 'deg']
# Equality checking a cunit with itself
assert w1.wcs.cunit == w1.wcs.cunit
assert not w1.wcs.cunit != w1.wcs.cunit
# Equality checking of two different cunit object having same values
assert w1.wcs.cunit == w3.wcs.cunit
assert not w1.wcs.cunit != w3.wcs.cunit
# Equality checking of two different cunit object having the same first unit
# but different second unit (see #9154)
assert not w1.wcs.cunit == w4.wcs.cunit
assert w1.wcs.cunit != w4.wcs.cunit
# Inequality checking of two different cunit object having different values
assert not w1.wcs.cunit == w2.wcs.cunit
assert w1.wcs.cunit != w2.wcs.cunit
# Inequality checking of cunit with a list of literals
assert not w1.wcs.cunit == [1, 2, 3]
assert w1.wcs.cunit != [1, 2, 3]
# Inequality checking with some characters
assert not w1.wcs.cunit == ['a', 'b', 'c']
assert w1.wcs.cunit != ['a', 'b', 'c']
# Comparison is not implemented TypeError will raise
with pytest.raises(TypeError):
w1.wcs.cunit < w2.wcs.cunit
class TestWcsWithTime:
def setup(self):
if _WCSLIB_VER >= Version('7.1'):
fname = get_pkg_data_filename('data/header_with_time_wcslib71.fits')
else:
fname = get_pkg_data_filename('data/header_with_time.fits')
self.header = fits.Header.fromfile(fname)
with pytest.warns(wcs.FITSFixedWarning):
self.w = wcs.WCS(self.header, key='A')
def test_keywods2wcsprm(self):
""" Make sure Wcsprm is populated correctly from the header."""
ctype = [self.header[val] for val in self.header["CTYPE*"]]
crval = [self.header[val] for val in self.header["CRVAL*"]]
crpix = [self.header[val] for val in self.header["CRPIX*"]]
cdelt = [self.header[val] for val in self.header["CDELT*"]]
cunit = [self.header[val] for val in self.header["CUNIT*"]]
assert list(self.w.wcs.ctype) == ctype
time_axis_code = 4000 if _WCSLIB_VER >= Version('7.9') else 0
assert list(self.w.wcs.axis_types) == [2200, 2201, 3300, time_axis_code]
assert_allclose(self.w.wcs.crval, crval)
assert_allclose(self.w.wcs.crpix, crpix)
assert_allclose(self.w.wcs.cdelt, cdelt)
assert list(self.w.wcs.cunit) == cunit
naxis = self.w.naxis
assert naxis == 4
pc = np.zeros((naxis, naxis), dtype=np.float64)
for i in range(1, 5):
for j in range(1, 5):
if i == j:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 1)
else:
pc[i-1, j-1] = self.header.get(f'PC{i}_{j}A', 0)
assert_allclose(self.w.wcs.pc, pc)
char_keys = ['timesys', 'trefpos', 'trefdir', 'plephem', 'timeunit',
'dateref', 'dateobs', 'datebeg', 'dateavg', 'dateend']
for key in char_keys:
assert getattr(self.w.wcs, key) == self.header.get(key, "")
num_keys = ['mjdref', 'mjdobs', 'mjdbeg', 'mjdend',
'jepoch', 'bepoch', 'tstart', 'tstop', 'xposure',
'timsyer', 'timrder', 'timedel', 'timepixr',
'timeoffs', 'telapse', 'czphs', 'cperi']
for key in num_keys:
if key.upper() == 'MJDREF':
hdrv = [self.header.get('MJDREFIA', np.nan),
self.header.get('MJDREFFA', np.nan)]
else:
hdrv = self.header.get(key, np.nan)
assert_allclose(getattr(self.w.wcs, key), hdrv)
def test_transforms(self):
assert_allclose(self.w.all_pix2world(*self.w.wcs.crpix, 1),
self.w.wcs.crval)
def test_invalid_coordinate_masking():
# Regression test for an issue which caused all coordinates to be set to NaN
# after a transformation rather than just the invalid ones as reported by
# WCSLIB. A specific example of this is that when considering an all-sky
# spectral cube with a spectral axis that is not correlated with the sky
# axes, if transforming pixel coordinates that did not fall 'in' the sky,
# the spectral world value was also masked even though that coordinate
# was valid.
w = wcs.WCS(naxis=3)
w.wcs.ctype = 'VELO_LSR', 'GLON-CAR', 'GLAT-CAR'
w.wcs.crval = -20, 0, 0
w.wcs.crpix = 1, 1441, 241
w.wcs.cdelt = 1.3, -0.125, 0.125
px = [-10, -10, 20]
py = [-10, 10, 20]
pz = [-10, 10, 20]
wx, wy, wz = w.wcs_pix2world(px, py, pz, 0)
# Before fixing this, wx used to return np.nan for the first element
assert_allclose(wx, [-33, -33, 6])
assert_allclose(wy, [np.nan, 178.75, 177.5])
assert_allclose(wz, [np.nan, -28.75, -27.5])
def test_no_pixel_area():
w = wcs.WCS(naxis=3)
# Pixel area cannot be computed
with pytest.raises(ValueError, match='Pixel area is defined only for 2D pixels'):
w.proj_plane_pixel_area()
# Pixel scales still possible
assert_quantity_allclose(w.proj_plane_pixel_scales(), 1)
def test_distortion_header(tmpdir):
"""
Test that plate distortion model is correctly described by `wcs.to_header()`
and preserved when creating a Cutout2D from the image, writing it to FITS,
and reading it back from the file.
"""
path = get_pkg_data_filename("data/dss.14.29.56-62.41.05.fits.gz")
cen = np.array((50, 50))
siz = np.array((20, 20))
with fits.open(path) as hdulist:
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdulist[0].header)
cut = Cutout2D(hdulist[0].data, position=cen, size=siz, wcs=w)
# This converts the DSS plate solution model with AMD[XY]n coefficients into a
# Template Polynomial Distortion model (TPD.FWD.n coefficients);
# not testing explicitly for the header keywords here.
if _WCSLIB_VER < Version("7.4"):
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w0 = wcs.WCS(w.to_header_string())
with pytest.warns(AstropyWarning, match="WCS contains a TPD distortion model in CQDIS"):
w1 = wcs.WCS(cut.wcs.to_header_string())
if _WCSLIB_VER >= Version("7.1"):
pytest.xfail("TPD coefficients incomplete with WCSLIB >= 7.1 < 7.4")
else:
w0 = wcs.WCS(w.to_header_string())
w1 = wcs.WCS(cut.wcs.to_header_string())
assert w.pixel_to_world(0, 0).separation(w0.pixel_to_world(0, 0)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w0.pixel_to_world(*cen)) < 1.e-3 * u.mas
assert w.pixel_to_world(*cen).separation(w1.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
cutfile = str(tmpdir.join('cutout.fits'))
fits.writeto(cutfile, cut.data, cut.wcs.to_header())
with fits.open(cutfile) as hdulist:
w2 = wcs.WCS(hdulist[0].header)
assert w.pixel_to_world(*cen).separation(w2.pixel_to_world(*(siz / 2))) < 1.e-3 * u.mas
def test_pixlist_wcs_colsel():
"""
Test selection of a specific pixel list WCS using ``colsel``. See #11412.
"""
hdr_file = get_pkg_data_filename('data/chandra-pixlist-wcs.hdr')
hdr = fits.Header.fromtextfile(hdr_file)
with pytest.warns(wcs.FITSFixedWarning):
w = wcs.WCS(hdr, keysel=['image', 'pixel'], colsel=[11, 12])
assert w.naxis == 2
assert list(w.wcs.ctype) == ['RA---TAN', 'DEC--TAN']
assert np.allclose(w.wcs.crval, [229.38051931869, -58.81108068885])
assert np.allclose(w.wcs.pc, [[1, 0], [0, 1]])
assert np.allclose(w.wcs.cdelt, [-0.00013666666666666, 0.00013666666666666])
assert np.allclose(w.wcs.lonpole, 180.)
|
7bd9c249bf8b92ef4e165cce7084bbfa3359148808563b88a083ad742de93016 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['HADec']
doc_components = """
ha : `~astropy.coordinates.Angle`, optional, keyword-only
The Hour Angle for this object (``dec`` must also be given and
``representation`` must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ha`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_ha_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in hour angle (including the ``cos(dec)`` factor) for
this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in declination for this object (``pm_ha_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to HADec and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" equatorial coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HADec(BaseCoordinateFrame):
"""
A coordinate or frame in the Hour Angle-Declination system (Equatorial
coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented
with respect to upper culmination such that the hour angle is negative to
the East and positive to the West.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from HADec to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'ha', u.hourangle),
RepresentationMapping('lat', 'dec')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.has_data:
self._set_data_lon_wrap_angle(self.data)
@staticmethod
def _set_data_lon_wrap_angle(data):
if hasattr(data, 'lon'):
data.lon.wrap_angle = 180. * u.deg
return data
def represent_as(self, base, s='base', in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_data_lon_wrap_angle(data)
return data
# self-transform defined in cirs_observed_transforms.py
|
5f25afa9c00df939dd7a3e9fe6230a2626742453a7e4e88235ee66202b6c5433 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.time import Time
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
RepresentationMapping,
frame_transform_graph, base_doc)
from astropy.coordinates.transformations import AffineTransform
from astropy.coordinates.attributes import DifferentialAttribute
from .baseradec import BaseRADecFrame, doc_components as doc_components_radec
from .icrs import ICRS
from .galactic import Galactic
# For speed
J2000 = Time('J2000')
v_bary_Schoenrich2010 = r.CartesianDifferential([11.1, 12.24, 7.25]*u.km/u.s)
__all__ = ['LSR', 'GalacticLSR', 'LSRK', 'LSRD']
doc_footer_lsr = """
Other parameters
----------------
v_bary : `~astropy.coordinates.representation.CartesianDifferential`
The velocity of the solar system barycenter with respect to the LSR, in
Galactic cartesian velocity components.
"""
@format_doc(base_doc, components=doc_components_radec, footer=doc_footer_lsr)
class LSR(BaseRADecFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR).
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010,
allowed_classes=[r.CartesianDifferential])
@frame_transform_graph.transform(AffineTransform, ICRS, LSR)
def icrs_to_lsr(icrs_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_coord)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, LSR, ICRS)
def lsr_to_icrs(lsr_coord, icrs_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_bary_icrs = v_bary_gal.transform_to(icrs_frame)
v_offset = v_bary_icrs.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
doc_components_gal = """
l : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic longitude for this object (``b`` must also be given and
``representation`` must be None).
b : `~astropy.coordinates.Angle`, optional, keyword-only
The Galactic latitude for this object (``l`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
(``representation`` must be None).
pm_l_cosb : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic longitude (including the ``cos(b)`` term)
for this object (``pm_b`` must also be given).
pm_b : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in Galactic latitude for this object (``pm_l_cosb``
must also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object.
"""
@format_doc(base_doc, components=doc_components_gal, footer=doc_footer_lsr)
class GalacticLSR(BaseCoordinateFrame):
r"""A coordinate or frame in the Local Standard of Rest (LSR), axis-aligned
to the `Galactic` frame.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSR. Roughly, the LSR is the mean
velocity of the stars in the solar neighborhood, but the precise definition
of which depends on the study. As defined in Schönrich et al. (2010):
"The LSR is the rest frame at the location of the Sun of a star that would
be on a circular orbit in the gravitational potential one would obtain by
azimuthally averaging away non-axisymmetric features in the actual Galactic
potential." No such orbit truly exists, but it is still a commonly used
velocity frame.
We use default values from Schönrich et al. (2010) for the barycentric
velocity relative to the LSR, which is defined in Galactic (right-handed)
cartesian velocity components
:math:`(U, V, W) = (11.1, 12.24, 7.25)~{{\rm km}}~{{\rm s}}^{{-1}}`. These
values are customizable via the ``v_bary`` argument which specifies the
velocity of the solar system barycenter with respect to the LSR.
The frame attributes are listed under **Other Parameters**.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'l'),
RepresentationMapping('lat', 'b')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# frame attributes:
v_bary = DifferentialAttribute(default=v_bary_Schoenrich2010)
@frame_transform_graph.transform(AffineTransform, Galactic, GalacticLSR)
def galactic_to_galacticlsr(galactic_coord, lsr_frame):
v_bary_gal = Galactic(lsr_frame.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=v_offset)
return None, offset
@frame_transform_graph.transform(AffineTransform, GalacticLSR, Galactic)
def galacticlsr_to_galactic(lsr_coord, galactic_frame):
v_bary_gal = Galactic(lsr_coord.v_bary.to_cartesian())
v_offset = v_bary_gal.data.represent_as(r.CartesianDifferential)
offset = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-v_offset)
return None, offset
# ------------------------------------------------------------------------------
# The LSRK velocity frame, defined as having a velocity of 20 km/s towards
# RA=270 Dec=30 (B1900) relative to the solar system Barycenter. This is defined
# in:
#
# Gordon 1975, Methods of Experimental Physics: Volume 12:
# Astrophysics, Part C: Radio Observations - Section 6.1.5.
class LSRK(BaseRADecFrame):
r"""
A coordinate or frame in the Kinematic Local Standard of Rest (LSR).
This frame is defined as having a velocity of 20 km/s towards RA=270 Dec=30
(B1900) relative to the solar system Barycenter. This is defined in:
Gordon 1975, Methods of Experimental Physics: Volume 12:
Astrophysics, Part C: Radio Observations - Section 6.1.5.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRK.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# GORDON1975_V_BARY = 20*u.km/u.s
# GORDON1975_DIRECTION = FK4(ra=270*u.deg, dec=30*u.deg, equinox='B1900')
# V_OFFSET_LSRK = ((GORDON1975_V_BARY * GORDON1975_DIRECTION.transform_to(ICRS()).data)
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRK = r.CartesianDifferential([0.28999706839034606,
-17.317264789717928,
10.00141199546947]*u.km/u.s)
ICRS_LSRK_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=V_OFFSET_LSRK)
LSRK_ICRS_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-V_OFFSET_LSRK)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRK)
def icrs_to_lsrk(icrs_coord, lsr_frame):
return None, ICRS_LSRK_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRK, ICRS)
def lsrk_to_icrs(lsr_coord, icrs_frame):
return None, LSRK_ICRS_OFFSET
# ------------------------------------------------------------------------------
# The LSRD velocity frame, defined as a velocity of U=9 km/s, V=12 km/s,
# and W=7 km/s in Galactic coordinates or 16.552945 km/s
# towards l=53.13 b=25.02. This is defined in:
#
# Delhaye 1965, Solar Motion and Velocity Distribution of
# Common Stars.
class LSRD(BaseRADecFrame):
r"""
A coordinate or frame in the Dynamical Local Standard of Rest (LSRD)
This frame is defined as a velocity of U=9 km/s, V=12 km/s,
and W=7 km/s in Galactic coordinates or 16.552945 km/s
towards l=53.13 b=25.02. This is defined in:
Delhaye 1965, Solar Motion and Velocity Distribution of
Common Stars.
This coordinate frame is axis-aligned and co-spatial with `ICRS`, but has
a velocity offset relative to the solar system barycenter to remove the
peculiar motion of the sun relative to the LSRD.
"""
# NOTE: To avoid a performance penalty at import time, we hard-code the ICRS
# offsets here. The code to generate the offsets is provided for reproducibility.
# V_BARY_DELHAYE1965 = r.CartesianDifferential([9, 12, 7] * u.km/u.s)
# V_OFFSET_LSRD = (Galactic(V_BARY_DELHAYE1965.to_cartesian()).transform_to(ICRS()).data
# .represent_as(r.CartesianDifferential))
V_OFFSET_LSRD = r.CartesianDifferential([-0.6382306360182073,
-14.585424483191094,
7.8011572411006815]*u.km/u.s)
ICRS_LSRD_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=V_OFFSET_LSRD)
LSRD_ICRS_OFFSET = r.CartesianRepresentation([0, 0, 0]*u.au, differentials=-V_OFFSET_LSRD)
@frame_transform_graph.transform(AffineTransform, ICRS, LSRD)
def icrs_to_lsrd(icrs_coord, lsr_frame):
return None, ICRS_LSRD_OFFSET
@frame_transform_graph.transform(AffineTransform, LSRD, ICRS)
def lsrd_to_icrs(lsr_coord, icrs_frame):
return None, LSRD_ICRS_OFFSET
# ------------------------------------------------------------------------------
# Create loopback transformations
frame_transform_graph._add_merged_transform(LSR, ICRS, LSR)
frame_transform_graph._add_merged_transform(GalacticLSR, Galactic, GalacticLSR)
|
d0e1bafdffa8a739ab2adff3f7567464b4e3128f249dd16048b4ff1d8df56964 | import os
import pytest
import numpy as np
from urllib.error import HTTPError, URLError
from astropy.time import Time
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import GCRS, TETE
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.representation import CartesianRepresentation, UnitSphericalRepresentation
from astropy.coordinates.solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC,
_get_apparent_body_position, solar_system_ephemeris,
get_body_barycentric, get_body_barycentric_posvel)
from astropy.coordinates.funcs import get_sun
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy.utils.data import download_file, get_pkg_data_filename
from astropy.utils.compat.optional_deps import (HAS_JPLEPHEM, # noqa
HAS_SKYFIELD)
if HAS_SKYFIELD:
from skyfield.api import Loader, Topos
de432s_separation_tolerance_planets = 5*u.arcsec
de432s_separation_tolerance_moon = 5*u.arcsec
de432s_distance_tolerance = 20*u.km
skyfield_angular_separation_tolerance = 1*u.arcsec
skyfield_separation_tolerance = 10*u.km
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_SKYFIELD')
def test_positions_skyfield(tmpdir):
"""
Test positions against those generated by skyfield.
"""
load = Loader(tmpdir)
t = Time('1980-03-25 00:00')
location = None
# skyfield ephemeris
try:
planets = load('de421.bsp')
ts = load.timescale()
except OSError as e:
if os.environ.get('CI', False) and 'timed out' in str(e):
pytest.xfail('Timed out in CI')
else:
raise
mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon']
earth = planets['earth']
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth+Topos(latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m))
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
frame = TETE(obstime=t, location=location)
else:
frame = TETE(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch='date')
skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_jupiter.radec(epoch='date')
skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_moon.radec(epoch='date')
skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
# planet positions w.r.t true equator and equinox
moon_astropy = get_moon(t, location, ephemeris='de430').transform_to(frame)
mercury_astropy = get_body('mercury', t, location, ephemeris='de430').transform_to(frame)
jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430').transform_to(frame)
assert (moon_astropy.separation(skyfield_moon) <
skyfield_angular_separation_tolerance)
assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance)
assert (jupiter_astropy.separation(skyfield_jupiter) <
skyfield_angular_separation_tolerance)
assert (jupiter_astropy.separation_3d(skyfield_jupiter) <
skyfield_separation_tolerance)
assert (mercury_astropy.separation(skyfield_mercury) <
skyfield_angular_separation_tolerance)
assert (mercury_astropy.separation_3d(skyfield_mercury) <
skyfield_separation_tolerance)
planets.close()
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
self.t = Time('1980-03-25 00:00')
self.apparent_frame = TETE(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s',
distance=c*6.323037*u.min, frame=self.apparent_frame),
'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s',
distance=c*0.021921*u.min, frame=self.apparent_frame),
'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s',
distance=c*37.694557*u.min, frame=self.apparent_frame),
'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s',
distance=c*8.294858*u.min, frame=self.apparent_frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 1000*u.km),
('jupiter', 78.*u.arcsec, 76000*u.km),
('moon', 20.*u.arcsec, 80*u.km),
('sun', 5.*u.arcsec, 11.*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg,
lat=31.963333333333342*u.deg,
height=2120*u.m)
self.t = Time('2014-09-25T00:00', location=kitt_peak)
self.apparent_frame = TETE(obstime=self.t, location=kitt_peak)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s',
distance=c*7.699020*u.min, frame=self.apparent_frame),
'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s',
distance=c*0.022054*u.min, frame=self.apparent_frame),
'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s',
distance=c*49.244937*u.min, frame=self.apparent_frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 500*u.km),
('jupiter', 78.*u.arcsec, 82000*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = astropy.transform_to(self.apparent_frame)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('bodyname', ('mercury', 'jupiter'))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris='de432s')
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s')
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_horizons_consistency_with_precision():
"""
A test to compare at high precision against output of JPL horizons.
Tests ephemerides, and conversions from ICRS to GCRS to TETE. We are aiming for
better than 2 milli-arcsecond precision.
We use the Moon since it is nearby, and moves fast in the sky so we are
testing for parallax, proper handling of light deflection and aberration.
"""
# JPL Horizon values for 2020_04_06 00:00 to 23:00 in 1 hour steps
# JPL Horizons has a known offset (frame bias) of 51.02 mas in RA. We correct that here
ra_apparent_horizons = [
170.167332531, 170.560688674, 170.923834838, 171.271663481, 171.620188972, 171.985340827,
172.381766539, 172.821772139, 173.314502650, 173.865422398, 174.476108551, 175.144332386,
175.864375310, 176.627519827, 177.422655853, 178.236955730, 179.056584831, 179.867427392,
180.655815385, 181.409252074, 182.117113814, 182.771311578, 183.366872837, 183.902395443
] * u.deg + 51.02376467 * u.mas
dec_apparent_horizons = [
10.269112037, 10.058820647, 9.837152044, 9.603724551, 9.358956528, 9.104012390, 8.840674927,
8.571162442, 8.297917326, 8.023394488, 7.749873882, 7.479312991, 7.213246666, 6.952732614,
6.698336823, 6.450150213, 6.207828142, 5.970645962, 5.737565957, 5.507313851, 5.278462034,
5.049521497, 4.819038911, 4.585696512
] * u.deg
with solar_system_ephemeris.set('de430'):
loc = EarthLocation.from_geodetic(-67.787260*u.deg, -22.959748*u.deg, 5186*u.m)
times = Time('2020-04-06 00:00') + np.arange(0, 24, 1)*u.hour
astropy = get_body('moon', times, loc)
apparent_frame = TETE(obstime=times, location=loc)
astropy = astropy.transform_to(apparent_frame)
usrepr = UnitSphericalRepresentation(ra_apparent_horizons, dec_apparent_horizons)
horizons = apparent_frame.realize_frame(usrepr)
assert_quantity_allclose(astropy.separation(horizons), 0*u.mas, atol=1.5*u.mas)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s')
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1*u.arcsec
def test_get_moon_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_moon(times, ephemeris='builtin')
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00'))
ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * -30. * u.km / u.s
assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel('earth', t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(ep.get_xyz(xyz_axis=-1),
[[-1., 0., 0.], [+1., 0., 0.]]*u.AU,
atol=0.06*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected,
atol=2.*u.km/u.s)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'),
(('mercury', 1000.*u.km, 1.*u.km/u.s),
('jupiter', 100000.*u.km, 2.*u.km/u.s),
('earth', 10*u.km, 10*u.mm/u.s),
('moon', 18*u.km, 50*u.mm/u.s)))
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms), and the Moon
# (which nominally is 6 km rms).
t = Time('2016-03-20T12:30:00')
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = 'http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp'
# Pass the ephemeris directly as a URL.
coord_by_url = get_body('earth', time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body('earth', time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)
assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)
assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_url_ephemeris_wrong_input():
# Try loading a non-existing URL:
time = Time('1960-01-12 00:00')
with pytest.raises((HTTPError, URLError)):
get_body('earth', time, ephemeris=get_pkg_data_filename('path/to/nonexisting/file.bsp'))
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_file_ephemeris_wrong_input():
time = Time('1960-01-12 00:00')
# Try loading a non-existing file:
with pytest.raises(ValueError):
get_body('earth', time, ephemeris='/path/to/nonexisting/file.bsp')
# NOTE: This test currently leaves the file open (ResourceWarning).
# To fix this issue, an upstream fix is required in jplephem
# package.
# Try loading a file that does exist, but is not an ephemeris file:
with pytest.warns(ResourceWarning), pytest.raises(ValueError):
get_body('earth', time, ephemeris=__file__)
def test_regression_10271():
t = Time(58973.534052125986, format='mjd')
# GCRS position of ALMA at this time
obs_p = CartesianRepresentation(5724535.74068625, -1311071.58985697, -2492738.93017009, u.m)
geocentre = CartesianRepresentation(0, 0, 0, u.m)
icrs_sun_from_alma = _get_apparent_body_position('sun', t, 'builtin', obs_p)
icrs_sun_from_geocentre = _get_apparent_body_position('sun', t, 'builtin', geocentre)
difference = (icrs_sun_from_alma - icrs_sun_from_geocentre).norm()
assert_quantity_allclose(difference, 0.13046941*u.m, atol=1*u.mm)
|
0732ebf4ce1a5cb2e67d53633d0d265fcb0d2edcea3d135efab5f4efd879cc45 |
import pytest
import numpy as np
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.time import Time
from astropy.coordinates import EarthLocation, SkyCoord, Angle, Distance
from astropy.coordinates.sites import get_builtin_sites
from astropy.utils.data import get_pkg_data_filename
from astropy.constants import c as speed_of_light
from astropy.table import Table
@pytest.mark.parametrize('kind', ['heliocentric', 'barycentric'])
def test_basic(kind):
t0 = Time('2015-1-1')
loc = get_builtin_sites()['example_site']
sc = SkyCoord(0, 0, unit=u.deg, obstime=t0, location=loc)
rvc0 = sc.radial_velocity_correction(kind)
assert rvc0.shape == ()
assert rvc0.unit.is_equivalent(u.km/u.s)
scs = SkyCoord(0, 0, unit=u.deg, obstime=t0 + np.arange(10)*u.day,
location=loc)
rvcs = scs.radial_velocity_correction(kind)
assert rvcs.shape == (10,)
assert rvcs.unit.is_equivalent(u.km/u.s)
test_input_time = Time(2457244.5, format='jd')
# test_input_loc = EarthLocation.of_site('Cerro Paranal')
# to avoid the network hit we just copy here what that yields
test_input_loc = EarthLocation.from_geodetic(lon=-70.403*u.deg,
lat=-24.6252*u.deg,
height=2635*u.m)
def test_helio_iraf():
"""
Compare the heliocentric correction to the IRAF rvcorrect.
`generate_IRAF_input` function is provided to show how the comparison data
was produced
"""
# this is based on running IRAF with the output of `generate_IRAF_input` below
rvcorr_result = """
# RVCORRECT: Observatory parameters for European Southern Observatory: Paranal
# latitude = -24:37.5
# longitude = 70:24.2
# altitude = 2635
## HJD VOBS VHELIO VLSR VDIURNAL VLUNAR VANNUAL VSOLAR
2457244.50120 0.00 -10.36 -20.35 -0.034 -0.001 -10.325 -9.993
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.50278 0.00 -2.29 -11.75 0.115 0.004 -2.413 -9.459
2457244.50025 0.00 -14.20 -23.86 -0.115 -0.004 -14.085 -9.656
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.50317 0.00 -17.19 -17.44 0.078 0.001 -17.269 -0.253
2457244.50348 0.00 2.35 -6.21 0.192 0.006 2.156 -8.560
2457244.49959 0.00 2.13 -15.06 -0.078 -0.000 2.211 -17.194
2457244.49929 0.00 -17.41 -26.30 -0.192 -0.006 -17.214 -8.888
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.50186 0.00 -24.47 -22.16 -0.038 -0.004 -24.433 2.313
2457244.50470 0.00 -11.11 -8.57 0.221 0.005 -11.332 2.534
2457244.50402 0.00 6.90 -0.38 0.259 0.008 6.629 -7.277
2457244.50051 0.00 11.53 -5.78 0.038 0.004 11.489 -17.311
2457244.49768 0.00 -1.84 -19.37 -0.221 -0.004 -1.612 -17.533
2457244.49835 0.00 -19.84 -27.56 -0.259 -0.008 -19.573 -7.721
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.50109 0.00 -27.69 -22.90 -0.096 -0.006 -27.584 4.785
2457244.50457 0.00 -17.00 -9.30 0.196 0.003 -17.201 7.704
2457244.50532 0.00 2.62 2.97 0.340 0.009 2.276 0.349
2457244.50277 0.00 16.42 4.67 0.228 0.009 16.178 -11.741
2457244.49884 0.00 13.98 -5.48 -0.056 0.002 14.039 -19.463
2457244.49649 0.00 -2.84 -19.84 -0.297 -0.007 -2.533 -17.000
2457244.49749 0.00 -21.38 -27.59 -0.315 -0.010 -21.056 -6.209
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.50025 0.00 -29.30 -22.47 -0.149 -0.008 -29.146 6.831
2457244.50398 0.00 -21.55 -9.88 0.146 0.001 -21.700 11.670
2457244.50577 0.00 -3.26 4.00 0.356 0.009 -3.623 7.263
2457244.50456 0.00 14.87 11.06 0.357 0.011 14.497 -3.808
2457244.50106 0.00 22.20 7.14 0.149 0.008 22.045 -15.058
2457244.49732 0.00 14.45 -5.44 -0.146 -0.001 14.600 -19.897
2457244.49554 0.00 -3.84 -19.33 -0.356 -0.008 -3.478 -15.491
2457244.49675 0.00 -21.97 -26.39 -0.357 -0.011 -21.598 -4.419
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49942 0.00 -29.36 -20.83 -0.193 -0.009 -29.157 8.527
2457244.50312 0.00 -24.26 -9.75 0.088 -0.001 -24.348 14.511
2457244.50552 0.00 -8.66 4.06 0.327 0.007 -8.996 12.721
2457244.50549 0.00 10.14 14.13 0.413 0.012 9.715 3.994
2457244.50305 0.00 23.35 15.76 0.306 0.011 23.031 -7.586
2457244.49933 0.00 24.78 8.18 0.056 0.006 24.721 -16.601
2457244.49609 0.00 13.77 -5.06 -0.221 -0.003 13.994 -18.832
2457244.49483 0.00 -4.53 -17.77 -0.394 -0.010 -4.131 -13.237
2457244.49615 0.00 -21.57 -24.00 -0.383 -0.012 -21.172 -2.432
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49907 0.00 -28.17 -17.30 -0.197 -0.009 -27.966 10.874
2457244.50285 0.00 -22.96 -5.96 0.090 -0.001 -23.048 16.995
2457244.50531 0.00 -7.00 8.16 0.335 0.007 -7.345 15.164
2457244.50528 0.00 12.23 18.47 0.423 0.012 11.795 6.238
2457244.50278 0.00 25.74 20.13 0.313 0.012 25.416 -5.607
2457244.49898 0.00 27.21 12.38 0.057 0.006 27.144 -14.829
2457244.49566 0.00 15.94 -1.17 -0.226 -0.003 16.172 -17.111
2457244.49437 0.00 -2.78 -14.17 -0.403 -0.010 -2.368 -11.387
2457244.49572 0.00 -20.20 -20.54 -0.392 -0.013 -19.799 -0.335
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49875 0.00 -25.73 -12.99 -0.193 -0.009 -25.525 12.734
2457244.50246 0.00 -20.63 -1.91 0.088 -0.001 -20.716 18.719
2457244.50485 0.00 -5.03 11.90 0.327 0.007 -5.365 16.928
2457244.50482 0.00 13.77 21.97 0.413 0.012 13.347 8.202
2457244.50238 0.00 26.98 23.60 0.306 0.011 26.663 -3.378
2457244.49867 0.00 28.41 16.02 0.056 0.005 28.353 -12.393
2457244.49542 0.00 17.40 2.78 -0.221 -0.003 17.625 -14.625
2457244.49416 0.00 -0.90 -9.93 -0.394 -0.010 -0.499 -9.029
2457244.49548 0.00 -17.94 -16.16 -0.383 -0.012 -17.541 1.776
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49894 0.00 -22.20 -7.14 -0.149 -0.008 -22.045 15.058
2457244.50268 0.00 -14.45 5.44 0.146 0.001 -14.600 19.897
2457244.50446 0.00 3.84 19.33 0.356 0.008 3.478 15.491
2457244.50325 0.00 21.97 26.39 0.357 0.011 21.598 4.419
2457244.49975 0.00 29.30 22.47 0.149 0.008 29.146 -6.831
2457244.49602 0.00 21.55 9.88 -0.146 -0.001 21.700 -11.670
2457244.49423 0.00 3.26 -4.00 -0.356 -0.009 3.623 -7.263
2457244.49544 0.00 -14.87 -11.06 -0.357 -0.011 -14.497 3.808
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49921 0.00 -17.43 -0.77 -0.096 -0.006 -17.333 16.664
2457244.50269 0.00 -6.75 12.83 0.196 0.003 -6.949 19.583
2457244.50344 0.00 12.88 25.10 0.340 0.009 12.527 12.227
2457244.50089 0.00 26.67 26.80 0.228 0.009 26.430 0.137
2457244.49696 0.00 24.24 16.65 -0.056 0.002 24.290 -7.584
2457244.49461 0.00 7.42 2.29 -0.297 -0.007 7.719 -5.122
2457244.49561 0.00 -11.13 -5.46 -0.315 -0.010 -10.805 5.670
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49949 0.00 -11.53 5.78 -0.038 -0.004 -11.489 17.311
2457244.50232 0.00 1.84 19.37 0.221 0.004 1.612 17.533
2457244.50165 0.00 19.84 27.56 0.259 0.008 19.573 7.721
2457244.49814 0.00 24.47 22.16 0.038 0.004 24.433 -2.313
2457244.49530 0.00 11.11 8.57 -0.221 -0.005 11.332 -2.534
2457244.49598 0.00 -6.90 0.38 -0.259 -0.008 -6.629 7.277
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.50041 0.00 -2.13 15.06 0.078 0.000 -2.211 17.194
2457244.50071 0.00 17.41 26.30 0.192 0.006 17.214 8.888
2457244.49683 0.00 17.19 17.44 -0.078 -0.001 17.269 0.253
2457244.49652 0.00 -2.35 6.21 -0.192 -0.006 -2.156 8.560
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49975 0.00 14.20 23.86 0.115 0.004 14.085 9.656
2457244.49722 0.00 2.29 11.75 -0.115 -0.004 2.413 9.459
2457244.49805 0.00 6.84 16.77 -0.034 -0.001 6.874 9.935
"""
vhs_iraf = []
for line in rvcorr_result.strip().split('\n'):
if not line.strip().startswith('#'):
vhs_iraf.append(float(line.split()[2]))
vhs_iraf = vhs_iraf*u.km/u.s
targets = SkyCoord(_get_test_input_radecs(), obstime=test_input_time,
location=test_input_loc)
vhs_astropy = targets.radial_velocity_correction('heliocentric')
assert_quantity_allclose(vhs_astropy, vhs_iraf, atol=150*u.m/u.s)
return vhs_astropy, vhs_iraf # for interactively examination
def generate_IRAF_input(writefn=None):
dt = test_input_time.utc.datetime
coos = _get_test_input_radecs()
lines = []
for ra, dec in zip(coos.ra, coos.dec):
rastr = Angle(ra).to_string(u.hour, sep=':')
decstr = Angle(dec).to_string(u.deg, sep=':')
msg = '{yr} {mo} {day} {uth}:{utmin} {ra} {dec}'
lines.append(msg.format(yr=dt.year, mo=dt.month, day=dt.day,
uth=dt.hour, utmin=dt.minute,
ra=rastr, dec=decstr))
if writefn:
with open(writefn, 'w') as f:
for l in lines:
f.write(l)
else:
for l in lines:
print(l)
print('Run IRAF as:\nastutil\nrvcorrect f=<filename> observatory=Paranal')
def _get_test_input_radecs():
ras = []
decs = []
for dec in np.linspace(-85, 85, 15):
nra = int(np.round(10*np.cos(dec*u.deg)).value)
ras1 = np.linspace(-180, 180-1e-6, nra)
ras.extend(ras1)
decs.extend([dec]*len(ras1))
return SkyCoord(ra=ras, dec=decs, unit=u.deg)
def test_barycorr():
# this is the result of calling _get_barycorr_bvcs
barycorr_bvcs = u.Quantity([
-10335.93326096, -14198.47605491, -2237.60012494, -14198.47595363,
-17425.46512587, -17131.70901174, 2424.37095076, 2130.61519166,
-17425.46495779, -19872.50026998, -24442.37091097, -11017.08975893,
6978.0622355, 11547.93333743, -1877.34772637, -19872.50004258,
-21430.08240017, -27669.14280689, -16917.08506807, 2729.57222968,
16476.49569232, 13971.97171764, -2898.04250914, -21430.08212368,
-22028.51337105, -29301.92349394, -21481.13036199, -3147.44828909,
14959.50065514, 22232.91155425, 14412.11903105, -3921.56359768,
-22028.51305781, -21641.01479409, -29373.0512649, -24205.90521765,
-8557.34138828, 10250.50350732, 23417.2299926, 24781.98057941,
13706.17339044, -4627.70005932, -21641.01445812, -20284.92627505,
-28193.91696959, -22908.51624166, -6901.82132125, 12336.45758056,
25804.51614607, 27200.50029664, 15871.21385688, -2882.24738355,
-20284.9259314, -18020.92947805, -25752.96564978, -20585.81957567,
-4937.25573801, 13870.58916957, 27037.31568441, 28402.06636994,
17326.25977035, -1007.62209045, -18020.92914212, -14950.33284575,
-22223.74260839, -14402.94943965, 3930.73265119, 22037.68163353,
29311.09265126, 21490.30070307, 3156.62229843, -14950.33253252,
-11210.53846867, -17449.59867676, -6697.54090389, 12949.11642965,
26696.03999586, 24191.5164355, 7321.50355488, -11210.53819218,
-6968.89359681, -11538.76423011, 1886.51695238, 19881.66902396,
24451.54039956, 11026.26000765, -6968.89336945, -2415.20201758,
-2121.44599781, 17434.63406085, 17140.87871753, -2415.2018495,
2246.76923076, 14207.64513054, 2246.76933194, 6808.40787728],
u.m/u.s)
# this tries the *other* way of calling radial_velocity_correction relative
# to the IRAF tests
targets = _get_test_input_radecs()
bvcs_astropy = targets.radial_velocity_correction(obstime=test_input_time,
location=test_input_loc,
kind='barycentric')
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s)
return bvcs_astropy, barycorr_bvcs # for interactively examination
def _get_barycorr_bvcs(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for ra, dec in ProgressBar(list(zip(coos.ra.deg, coos.dec.deg)),
ipython_widget=injupyter):
res = barycorr.bvc(test_input_time.utc.jd, ra, dec,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
elevation=loc.geodetic[2].to(u.m).value)
bvcs.append(res)
return bvcs*u.m/u.s
def test_rvcorr_multiple_obstimes_onskycoord():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
arrtime = Time('2005-03-21 00:00:00') + np.linspace(-1, 1, 10)*u.day
sc = SkyCoord(1*u.deg, 2*u.deg, 100*u.kpc, obstime=arrtime, location=loc)
rvcbary_sc2 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc2) == 10
# check the multiple-obstime and multi- mode
sc = SkyCoord(([1]*10)*u.deg, 2*u.deg, 100*u.kpc,
obstime=arrtime, location=loc)
rvcbary_sc3 = sc.radial_velocity_correction(kind='barycentric')
assert len(rvcbary_sc3) == 10
def test_invalid_argument_combos():
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
timel = Time('2005-03-21 00:00:00', location=loc)
scwattrs = SkyCoord(1*u.deg, 2*u.deg, obstime=time, location=loc)
scwoattrs = SkyCoord(1*u.deg, 2*u.deg)
scwattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction(obstime=time)
scwoattrs.radial_velocity_correction(obstime=time, location=loc)
with pytest.raises(TypeError):
scwoattrs.radial_velocity_correction()
with pytest.raises(ValueError):
scwattrs.radial_velocity_correction(timel)
def test_regression_9645():
sc = SkyCoord(10*u.deg, 20*u.deg, distance=5*u.pc, obstime=test_input_time,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr, radial_velocity=0*u.km/u.s)
sc_novel = SkyCoord(10*u.deg, 20*u.deg, distance=5*u.pc, obstime=test_input_time)
corr = sc.radial_velocity_correction(obstime=test_input_time, location=test_input_loc)
corr_novel = sc_novel.radial_velocity_correction(obstime=test_input_time, location=test_input_loc)
assert_quantity_allclose(corr, corr_novel)
def test_barycorr_withvels():
# this is the result of calling _get_barycorr_bvcs_withvels
barycorr_bvcs = u.Quantity(
[-10335.94926581, -14198.49117304, -2237.58656335,
-14198.49078575, -17425.47883864, -17131.72711182,
2424.38466675, 2130.62819093, -17425.47834604,
-19872.51254565, -24442.39064348, -11017.0964353,
6978.07515501, 11547.94831175, -1877.34560543,
-19872.51188308, -21430.0931411, -27669.15919972,
-16917.09482078, 2729.57757823, 16476.5087925,
13971.97955641, -2898.04451551, -21430.09220144,
-22028.52224227, -29301.93613248, -21481.14015151,
-3147.44852058, 14959.50849997, 22232.91906264,
14412.12044201, -3921.56783473, -22028.52088749,
-21641.02117064, -29373.05982792, -24205.91319258,
-8557.34473049, 10250.50560918, 23417.23357219,
24781.98113432, 13706.17025059, -4627.70468688,
-21641.01928189, -20284.92926795, -28193.92117514,
-22908.52127321, -6901.82512637, 12336.45557256,
25804.5137786, 27200.49576347, 15871.20847332,
-2882.25080211, -20284.92696256, -18020.92824383,
-25752.96528309, -20585.82211189, -4937.26088706,
13870.58217495, 27037.30698639, 28402.0571686,
17326.25314311, -1007.62313006, -18020.92552769,
-14950.32653444, -22223.73793506, -14402.95155047,
3930.72325162, 22037.66749783, 29311.07826101,
21490.29193529, 3156.62360741, -14950.32373745,
-11210.52665171, -17449.59068509, -6697.54579192,
12949.09948082, 26696.01956077, 24191.50403015,
7321.50684816, -11210.52389393, -6968.87610888,
-11538.7547047, 1886.50525065, 19881.64366561,
24451.52197666, 11026.26396455, -6968.87351156,
-2415.17899385, -2121.44598968, 17434.60465075,
17140.87204017, -2415.1771038, 2246.79688215,
14207.61339552, 2246.79790276, 6808.43888253], u.m/u.s)
coos = _get_test_input_radecvels()
bvcs_astropy = coos.radial_velocity_correction(obstime=test_input_time,
location=test_input_loc)
assert_quantity_allclose(bvcs_astropy, barycorr_bvcs, atol=10*u.mm/u.s)
return bvcs_astropy, barycorr_bvcs # for interactively examination
def _get_test_input_radecvels():
coos = _get_test_input_radecs()
ras = coos.ra
decs = coos.dec
pmra = np.linspace(-1000, 1000, coos.size)*u.mas/u.yr
pmdec = np.linspace(0, 1000, coos.size)*u.mas/u.yr
rvs = np.linspace(0, 100, coos.size)*u.km/u.s
distance = np.linspace(10, 100, coos.size)*u.pc
return SkyCoord(ras, decs, pm_ra_cosdec=pmra, pm_dec=pmdec,
radial_velocity=rvs, distance=distance,
obstime=test_input_time)
def _get_barycorr_bvcs_withvels(coos, loc, injupyter=False):
"""
Gets the barycentric correction of the test data from the
http://astroutils.astronomy.ohio-state.edu/exofast/barycorr.html web site.
Requires the https://github.com/tronsgaard/barycorr python interface to that
site.
Provided to reproduce the test data above, but not required to actually run
the tests.
"""
import barycorr
from astropy.utils.console import ProgressBar
bvcs = []
for coo in ProgressBar(coos, ipython_widget=injupyter):
res = barycorr.bvc(test_input_time.utc.jd,
coo.ra.deg, coo.dec.deg,
lat=loc.geodetic[1].deg,
lon=loc.geodetic[0].deg,
pmra=coo.pm_ra_cosdec.to_value(u.mas/u.yr),
pmdec=coo.pm_dec.to_value(u.mas/u.yr),
parallax=coo.distance.to_value(u.mas, equivalencies=u.parallax()),
rv=coo.radial_velocity.to_value(u.m/u.s),
epoch=test_input_time.utc.jd,
elevation=loc.geodetic[2].to(u.m).value)
bvcs.append(res)
return bvcs*u.m/u.s
def test_warning_no_obstime_on_skycoord():
c = SkyCoord(l=10*u.degree, b=45*u.degree,
pm_l_cosb=34*u.mas/u.yr, pm_b=-117*u.mas/u.yr,
distance=50*u.pc, frame='galactic')
with pytest.warns(Warning):
c.radial_velocity_correction('barycentric', test_input_time,
test_input_loc)
@pytest.mark.remote_data
def test_regression_10094():
"""
Make sure that when we include the proper motion and radial velocity of
a SkyCoord, our velocity corrections remain close to TEMPO2.
We check that tau Ceti is within 5mm/s
"""
# Wright & Eastman (2014) Table2
# Corrections for tau Ceti
wright_table = Table.read(
get_pkg_data_filename('coordinates/wright_eastmann_2014_tau_ceti.fits')
)
reduced_jds = wright_table['JD-2400000']
tempo2 = wright_table['TEMPO2']
barycorr = wright_table['BARYCORR']
# tau Ceti Hipparchos data
tauCet = SkyCoord('01 44 05.1275 -15 56 22.4006',
unit=(u.hour, u.deg),
pm_ra_cosdec=-1721.05*u.mas/u.yr,
pm_dec=854.16*u.mas/u.yr,
distance=Distance(parallax=273.96*u.mas),
radial_velocity=-16.597*u.km/u.s,
obstime=Time(48348.5625, format='mjd'))
# CTIO location as used in Wright & Eastmann
xyz = u.Quantity([1814985.3, -5213916.8, -3187738.1], u.m)
obs = EarthLocation(*xyz)
times = Time(2400000, reduced_jds, format='jd')
tempo2 = tempo2 * speed_of_light
barycorr = barycorr * speed_of_light
astropy = tauCet.radial_velocity_correction(location=obs, obstime=times)
assert_quantity_allclose(astropy, tempo2, atol=5*u.mm/u.s)
assert_quantity_allclose(astropy, barycorr, atol=5*u.mm/u.s)
|
0bb3bcf35cb6025c4158cefe7a064559931a270b090c9600f8f2c30a868e9ced | """Test helper functions for coordinates."""
import numpy as np
def skycoord_equal(sc1, sc2):
"""SkyCoord equality useful for testing
"""
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation_type is not sc2.representation_type:
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
|
94a395d453334df260272924aed0cf203d7af1e5c6c31c3ae92c89ec113841fb | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy import units as u
from astropy.tests.helper import (assert_quantity_allclose as
assert_allclose_quantity)
from astropy.utils import isiterable
from astropy.utils.exceptions import DuplicateRepresentationWarning
from astropy.coordinates.angles import Longitude, Latitude, Angle
from astropy.coordinates.distances import Distance
from astropy.coordinates.matrix_utilities import rotation_matrix
from astropy.coordinates.representation import (
REPRESENTATION_CLASSES, DIFFERENTIAL_CLASSES, DUPLICATE_REPRESENTATIONS,
BaseRepresentation, SphericalRepresentation, UnitSphericalRepresentation,
SphericalCosLatDifferential, CartesianRepresentation, RadialRepresentation,
RadialDifferential, CylindricalRepresentation,
PhysicsSphericalRepresentation, CartesianDifferential,
SphericalDifferential, CylindricalDifferential,
PhysicsSphericalDifferential, UnitSphericalDifferential,
UnitSphericalCosLatDifferential)
# create matrices for use in testing ``.transform()`` methods
matrices = {
"rotation": rotation_matrix(-10, "z", u.deg),
"general": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
}
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def components_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= getattr(rep1, component) == getattr(rep2, component)
return result
def components_allclose(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
for component in rep1.components:
result &= u.allclose(getattr(rep1, component), getattr(rep2, component))
return result
def representation_equal(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, '_differentials', False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_equal(diff1, rep2._differentials[key])
elif getattr(rep2, '_differentials', False):
return False
return result & components_equal(rep1, rep2)
def representation_equal_up_to_angular_type(rep1, rep2):
result = True
if type(rep1) is not type(rep2):
return False
if getattr(rep1, '_differentials', False):
if rep1._differentials.keys() != rep2._differentials.keys():
return False
for key, diff1 in rep1._differentials.items():
result &= components_allclose(diff1, rep2._differentials[key])
elif getattr(rep2, '_differentials', False):
return False
return result & components_allclose(rep1, rep2)
class TestRadialRepresentation:
def test_transform(self):
"""Test the ``transform`` method. Only multiplication matrices pass."""
rep = RadialRepresentation(distance=10 * u.kpc)
# a rotation matrix does not work
matrix = rotation_matrix(10 * u.deg)
with pytest.raises(ValueError, match="scaled identity matrix"):
rep.transform(matrix)
# only a scaled identity matrix
matrix = 3 * np.identity(3)
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
# let's also check with differentials
dif = RadialDifferential(d_distance=-3 * u.km / u.s)
rep = rep.with_differentials(dict(s=dif))
newrep = rep.transform(matrix)
assert newrep.distance == 30 * u.kpc
assert newrep.differentials["s"].d_distance == -9 * u.km / u.s
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == 'spherical'
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_no_mutate_input(self):
lon = -1 * u.hourangle
s = SphericalRepresentation(lon=lon, lat=-1 * u.deg, distance=1 * u.kpc, copy=True)
# The longitude component should be wrapped at 24 hours
assert_allclose_quantity(s.lon, 23 * u.hourangle)
# The input should not have been mutated by the constructor
assert_allclose_quantity(lon, -1 * u.hourangle)
def test_init_lonlat(self):
s2 = SphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg),
Distance(10, u.kpc))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert s2.distance == 10. * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(Longitude(-90, u.degree,
wrap_angle=180*u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert s3.lon == -90. * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_subclass(self):
class Longitude180(Longitude):
_default_wrap_angle = 180*u.degree
s = SphericalRepresentation(Longitude180(-90, u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert isinstance(s.lon, Longitude180)
assert s.lon == -90. * u.degree
assert s.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1., 2.]), u.degree)
lat = Latitude(np.float32([3., 4.]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values['lon'].dtype == np.float32
assert s1._values['lat'].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
s3 = SphericalRepresentation(s1)
assert representation_equal(s1, s3)
def test_broadcasting(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=10 * u.kpc)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast"
def test_broadcasting_and_nocopy(self):
s1 = SphericalRepresentation(lon=[200] * u.deg,
lat=[0] * u.deg,
distance=[0] * u.kpc,
copy=False)
# With no copying, we should be able to modify the wrap angle of the longitude component
s1.lon.wrap_angle = 180 * u.deg
s2 = SphericalRepresentation(lon=[200] * u.deg,
lat=0 * u.deg,
distance=0 * u.kpc,
copy=False)
# We should be able to modify the wrap angle of the longitude component even if other
# components need to be broadcasted
s2.lon.wrap_angle = 180 * u.deg
def test_readonly(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg,
distance=1. * u.kpc)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
with pytest.raises(AttributeError):
s1.distance = 1. * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg,
distance=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg,
distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
def test_setitem(self):
s = SphericalRepresentation(lon=np.arange(5) * u.deg,
lat=-np.arange(5) * u.deg,
distance=1 * u.kpc)
s[:2] = SphericalRepresentation(lon=10.*u.deg, lat=2.*u.deg,
distance=5.*u.kpc)
assert_allclose_quantity(s.lon, [10, 10, 2, 3, 4] * u.deg)
assert_allclose_quantity(s.lat, [2, 2, -2, -3, -4] * u.deg)
assert_allclose_quantity(s.distance, [5, 5, 1, 1, 1] * u.kpc)
def test_negative_distance(self):
"""Only allowed if explicitly passed on."""
with pytest.raises(ValueError, match='allow_negative'):
SphericalRepresentation(10*u.deg, 20*u.deg, -10*u.m)
s1 = SphericalRepresentation(10*u.deg, 20*u.deg,
Distance(-10*u.m, allow_negative=True))
assert s1.distance == -10.*u.m
def test_nan_distance(self):
""" This is a regression test: calling represent_as() and passing in the
same class as the object shouldn't round-trip through cartesian.
"""
sph = SphericalRepresentation(1*u.deg, 2*u.deg, np.nan*u.kpc)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
dif = SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr,
3*u.km/u.s)
sph = sph.with_differentials(dif)
new_sph = sph.represent_as(SphericalRepresentation)
assert_allclose_quantity(new_sph.lon, sph.lon)
assert_allclose_quantity(new_sph.lat, sph.lat)
def test_raise_on_extra_arguments(self):
with pytest.raises(TypeError, match='got multiple values'):
SphericalRepresentation(1*u.deg, 2*u.deg, 1.*u.kpc, lat=10)
with pytest.raises(TypeError, match='unexpected keyword.*parrot'):
SphericalRepresentation(1*u.deg, 2*u.deg, 1.*u.kpc, parrot=10)
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = SphericalCosLatDifferential(4*u.mas/u.yr,5*u.mas/u.yr,6*u.km/u.s)
sph = SphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc,
differentials={'s': difs})
got = sph.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
assert np.may_share_memory(sph.distance, got.r)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation, PhysicsSphericalDifferential)
# equal up to angular type
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation,
UnitSphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr, d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s)
s1 = SphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
distance=[5, 6] * u.kpc, differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
# check differentials. they shouldn't have changed.
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_distance, ds1.d_distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
SphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = SphericalDifferential(
d_lon=[1, 2] * u.mas / u.yr, d_lat=[3, 4] * u.mas / u.yr,
d_distance=[-5, 6] * u.km / u.s)
s1 = SphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
distance=[5, np.nan] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = SphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
assert_allclose_quantity(s2.distance, s1.distance)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds2.d_distance, dexpected.d_distance)
# the 2nd component is NaN since the 2nd distance is NaN
# TODO! this will change when ``.transform`` skips Cartesian
assert_array_equal(np.isnan(ds2.d_lon), (False, True))
assert_array_equal(np.isnan(ds2.d_lat), (False, True))
assert_array_equal(np.isnan(ds2.d_distance), (False, True))
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
differential_class=SphericalDifferential))
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.lon), (False, False))
assert_array_equal(np.isnan(s3.lat), (False, False))
assert_array_equal(np.isnan(s3.distance), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_lon), (False, True))
assert_array_equal(np.isnan(ds3.d_lat), (False, True))
assert_array_equal(np.isnan(ds3.d_distance), (False, True))
# through Cartesian should
assert_array_equal(np.isnan(thruC.lon), (False, True))
assert_array_equal(np.isnan(thruC.lat), (False, True))
assert_array_equal(np.isnan(thruC.distance), (False, True))
assert_array_equal(np.isnan(dthruC.d_lon), (False, True))
assert_array_equal(np.isnan(dthruC.d_lat), (False, True))
assert_array_equal(np.isnan(dthruC.d_distance), (False, True))
# test that they are close on the first value
assert_allclose_quantity(s3.lon[0], thruC.lon[0])
assert_allclose_quantity(s3.lat[0], thruC.lat[0])
assert_allclose_quantity(ds3.d_lon[0], dthruC.d_lon[0])
assert_allclose_quantity(ds3.d_lat[0], dthruC.d_lat[0])
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == 'unitspherical'
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
s3 = UnitSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
# TODO! representation transformations with differentials cannot
# (currently) be implemented due to a mismatch between the UnitSpherical
# expected keys (e.g. "s") and that expected in the other class
# (here "s / m"). For more info, see PR #11467
# We leave the test code commented out for future use.
# diffs = UnitSphericalCosLatDifferential(4*u.mas/u.yr, 5*u.mas/u.yr,
# 6*u.km/u.s)
sph = UnitSphericalRepresentation(1*u.deg, 2*u.deg)
# , differentials={'s': diffs}
got = sph.represent_as(PhysicsSphericalRepresentation)
# , PhysicsSphericalDifferential)
assert np.may_share_memory(sph.lon, got.phi)
expected = BaseRepresentation.represent_as(
sph, PhysicsSphericalRepresentation) # PhysicsSphericalDifferential
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(SphericalRepresentation)
# , SphericalDifferential)
assert np.may_share_memory(sph.lon, got.lon)
assert np.may_share_memory(sph.lat, got.lat)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation) # , SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_transform(self):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalDifferential(d_lon=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = UnitSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.lon, s1.lon + 10 * u.deg)
assert_allclose_quantity(s2.lat, s1.lat)
# compare differentials. they should be unchanged (ds1).
assert_allclose_quantity(ds2.d_lon, ds1.d_lon)
assert_allclose_quantity(ds2.d_lat, ds1.d_lat)
assert_allclose_quantity(ds2.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
assert not hasattr(ds2, "d_distance")
# now with a non rotation matrix
# note that the result will be a Spherical, not UnitSpherical
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(SphericalRepresentation,
differential_class=SphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.lon, expected.lon)
assert_allclose_quantity(s3.lat, expected.lat)
assert_allclose_quantity(s3.distance, expected.distance)
assert_allclose_quantity(ds3.d_lon, dexpected.d_lon)
assert_allclose_quantity(ds3.d_lat, dexpected.d_lat)
assert_allclose_quantity(ds3.d_distance, dexpected.d_distance)
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical'
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
assert s3.phi == 8. * u.hourangle
assert s3.theta == 5. * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(Angle(8, u.hour),
Angle(5, u.deg),
Distance(10, u.kpc))
assert s2.phi == 8. * u.hourangle
assert s2.theta == 5. * u.deg
assert s2.r == 10. * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8. * u.hourangle)
assert_allclose_quantity(s2.theta, 5. * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
s3 = PhysicsSphericalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=10 * u.kpc)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast"
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[10, 20] * u.kpc)
with pytest.raises(AttributeError):
s1.phi = 1. * u.deg
with pytest.raises(AttributeError):
s1.theta = 1. * u.deg
with pytest.raises(AttributeError):
s1.r = 1. * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg,
theta=np.arange(5, 15) * u.deg,
r=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg,
theta=2 * u.deg,
r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_representation_shortcuts(self):
"""Test that shortcuts in ``represent_as`` don't fail."""
difs = PhysicsSphericalDifferential(4*u.mas/u.yr,5*u.mas/u.yr,6*u.km/u.s)
sph = PhysicsSphericalRepresentation(1*u.deg, 2*u.deg, 3*u.kpc,
differentials={'s': difs})
got = sph.represent_as(SphericalRepresentation,
SphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
assert np.may_share_memory(sph.r, got.distance)
expected = BaseRepresentation.represent_as(
sph, SphericalRepresentation, SphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
got = sph.represent_as(UnitSphericalRepresentation,
UnitSphericalDifferential)
assert np.may_share_memory(sph.phi, got.lon)
expected = BaseRepresentation.represent_as(
sph, UnitSphericalRepresentation, UnitSphericalDifferential)
assert representation_equal_up_to_angular_type(got, expected)
def test_initialize_with_nan(self):
# Regression test for gh-11558: initialization used to fail.
psr = PhysicsSphericalRepresentation([1., np.nan]*u.deg, [np.nan, 2.]*u.deg,
[3., np.nan]*u.m)
assert_array_equal(np.isnan(psr.phi), [False, True])
assert_array_equal(np.isnan(psr.theta), [True, False])
assert_array_equal(np.isnan(psr.r), [False, True])
def test_transform(self):
"""Test ``.transform()`` on rotation and general transform matrices."""
# set up representation
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr, d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg, theta=[3, 4] * u.deg, r=[5, 6] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
# compare differentials. should be unchanged (ds1).
assert_allclose_quantity(ds2.d_phi, ds1.d_phi)
assert_allclose_quantity(ds2.d_theta, ds1.d_theta)
assert_allclose_quantity(ds2.d_r, ds1.d_r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
# transform representation & get comparison (thru CartesianRep)
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
expected = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential))
dexpected = expected.differentials["s"]
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.theta, expected.theta)
assert_allclose_quantity(s3.r, expected.r)
assert_allclose_quantity(ds3.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds3.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds3.d_r, dexpected.d_r)
def test_transform_with_NaN(self):
# all over again, but with a NaN in the distance
ds1 = PhysicsSphericalDifferential(
d_phi=[1, 2] * u.mas / u.yr, d_theta=[3, 4] * u.mas / u.yr,
d_r=[-5, 6] * u.km / u.s)
s1 = PhysicsSphericalRepresentation(
phi=[1, 2] * u.deg, theta=[3, 4] * u.deg, r=[5, np.nan] * u.kpc,
differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["rotation"])
ds2 = s2.differentials["s"]
dexpected = PhysicsSphericalDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["rotation"]), base=s2)
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.theta, s1.theta)
assert_allclose_quantity(s2.r, s1.r)
assert_allclose_quantity(ds2.d_phi, dexpected.d_phi)
assert_allclose_quantity(ds2.d_theta, dexpected.d_theta)
assert_allclose_quantity(ds2.d_r, dexpected.d_r)
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
ds3 = s3.differentials["s"]
thruC = (s1.represent_as(CartesianRepresentation,
CartesianDifferential)
.transform(matrices["general"])
.represent_as(PhysicsSphericalRepresentation,
PhysicsSphericalDifferential))
dthruC = thruC.differentials["s"]
# s3 should not propagate Nan.
assert_array_equal(np.isnan(s3.phi), (False, False))
assert_array_equal(np.isnan(s3.theta), (False, False))
assert_array_equal(np.isnan(s3.r), (False, True))
# ds3 does b/c currently aren't any shortcuts on the transform
assert_array_equal(np.isnan(ds3.d_phi), (False, True))
assert_array_equal(np.isnan(ds3.d_theta), (False, True))
assert_array_equal(np.isnan(ds3.d_r), (False, True))
# through Cartesian does
assert_array_equal(np.isnan(thruC.phi), (False, True))
assert_array_equal(np.isnan(thruC.theta), (False, True))
assert_array_equal(np.isnan(thruC.r), (False, True))
# so only test on the first value
assert_allclose_quantity(s3.phi[0], thruC.phi[0])
assert_allclose_quantity(s3.theta[0], thruC.theta[0])
assert_allclose_quantity(ds3.d_phi[0], dthruC.d_phi[0])
assert_allclose_quantity(ds3.d_theta[0], dthruC.d_theta[0])
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == 'cartesian'
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc,
z=[3, 4, 5] * u.pc, xyz_axis=0)
assert 'xyz_axis should only be set' in str(exc.value)
def test_init_one_array_yz_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
assert exc.value.args[0] == ("x, y, and z are required to instantiate "
"CartesianRepresentation")
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1., 10.).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s1.x[0] == 0.
assert s1_xyz.value[0, 0] == 0.
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1., 10.).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s2.x[0] == 0.
assert s2_xyz.value[0, 0] == 1.
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
s3 = CartesianRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1. * u.kpc
with pytest.raises(AttributeError):
s1.y = 1. * u.kpc
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s)
banana = u.def_unit('banana')
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m,
y=-2 * u.m,
z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
ds1 = CartesianDifferential(d_x=[1, 2] * u.km / u.s,
d_y=[3, 4] * u.km / u.s,
d_z=[5, 6] * u.km / u.s)
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc, differentials=ds1)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrices["general"])
ds2 = s2.differentials["s"]
dexpected = CartesianDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrices["general"]), base=s2)
assert_allclose_quantity(ds2.d_x, dexpected.d_x)
assert_allclose_quantity(ds2.d_y, dexpected.d_y)
assert_allclose_quantity(ds2.d_z, dexpected.d_z)
# also explicitly calculate, since we can
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert_allclose(ds2.d_x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(ds2.d_y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(ds2.d_z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
assert ds2.d_x.unit == u.km / u.s
assert ds2.d_y.unit == u.km / u.s
assert ds2.d_z.unit == u.km / u.s
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == 'cylindrical'
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[2, 3, 4] * u.deg,
z=[3, 4, 5] * u.kpc)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
s3 = CylindricalRepresentation(s1)
assert representation_equal(s3, s1)
def test_broadcasting(self):
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast"
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc,
phi=20 * u.deg,
z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1. * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(rho=np.arange(10) * u.pc,
phi=-np.arange(10) * u.deg,
z=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc,
phi=-2 * u.deg,
z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CylindricalRepresentation(phi=[1, 2] * u.deg, z=[3, 4] * u.pc,
rho=[5, 6] * u.kpc)
s2 = s1.transform(matrices["rotation"])
assert_allclose_quantity(s2.phi, s1.phi + 10 * u.deg)
assert_allclose_quantity(s2.z, s1.z)
assert_allclose_quantity(s2.rho, s1.rho)
assert s2.phi.unit is u.rad
assert s2.z.unit is u.kpc
assert s2.rho.unit is u.kpc
# now with a non rotation matrix
s3 = s1.transform(matrices["general"])
expected = (s1.to_cartesian().transform(matrices["general"])
).represent_as(CylindricalRepresentation)
assert_allclose_quantity(s3.phi, expected.phi)
assert_allclose_quantity(s3.z, expected.z)
assert_allclose_quantity(s3.rho, expected.rho)
class TestUnitSphericalCosLatDifferential:
@pytest.mark.parametrize("matrix", list(matrices.values()))
def test_transform(self, matrix):
"""Test ``.transform()`` on rotation and general matrices."""
# set up representation
ds1 = UnitSphericalCosLatDifferential(d_lon_coslat=[1, 2] * u.mas / u.yr,
d_lat=[3, 4] * u.mas / u.yr,)
s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg)
# transform representation & get comparison (thru CartesianRep)
s2 = s1.transform(matrix)
ds2 = ds1.transform(matrix, s1, s2)
dexpected = UnitSphericalCosLatDifferential.from_cartesian(
ds1.to_cartesian(base=s1).transform(matrix), base=s2)
assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat)
assert_allclose_quantity(ds2.d_lat, dexpected.d_lat)
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_setting_with_other():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s1[0] = SphericalRepresentation(0.*u.deg, 0.*u.deg, 1*u.kpc)
assert_allclose_quantity(s1.x, [1., 2000.] * u.kpc)
assert_allclose_quantity(s1.y, [0., 4.] * u.pc)
assert_allclose_quantity(s1.z, [0., 6000.] * u.pc)
with pytest.raises(ValueError, match='loss of information'):
s1[1] = UnitSphericalRepresentation(0.*u.deg, 10.*u.deg)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc,
y=np.array([3000., 4.]) * u.pc,
z=np.array([5., 600.]) * u.cm)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n'
' (1., 2.5, 1.)>')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' (1., 2., 3.)>')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>')
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>')
# This was broken before.
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>')
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == '(1., 2.5, 1.) (deg, deg, kpc)'
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == '(1., 2., 3.) kpc'
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert str(r3) == '[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc'
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
assert str(cr) == (
'[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m')
# This was broken before.
assert str(cr.T) == (
'[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m')
def test_subclass_representation():
from astropy.coordinates.builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle,
**kwargs)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = {'lon': Longitude180,
'lat': Latitude,
'distance': u.Quantity}
class ICRSWrap180(ICRS):
frame_specific_representation_info = ICRS._frame_specific_representation_info.copy()
frame_specific_representation_info[SphericalWrap180Representation] = \
frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude,
'logd': u.Dex}
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.*u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar')
# if we define it a second time, even the qualnames are the same,
# so we raise
with pytest.raises(ValueError):
class LogDRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude,
'logr': u.Dex}
def test_duplicate_warning():
from astropy.coordinates.representation import DUPLICATE_REPRESENTATIONS
from astropy.coordinates.representation import REPRESENTATION_CLASSES
with pytest.warns(DuplicateRepresentationWarning):
class UnitSphericalRepresentation(BaseRepresentation):
attr_classes = {'lon': Longitude,
'lat': Latitude}
assert 'unitspherical' in DUPLICATE_REPRESENTATIONS
assert 'unitspherical' not in REPRESENTATION_CLASSES
assert 'astropy.coordinates.representation.UnitSphericalRepresentation' in REPRESENTATION_CLASSES
assert __name__ + '.test_duplicate_warning.<locals>.UnitSphericalRepresentation' in REPRESENTATION_CLASSES
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': diff})
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'1 / s2': diff})
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(x=1, y=2, z=3,
differentials=diff, copy=False, unit=u.kpc)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials='garmonbozia')
# And that one can add it to another representation.
s1 = CartesianRepresentation(
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc),
differentials=diff)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s, differentials=diff)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(d_lon=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
with pytest.raises(TypeError):
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': d1, 'yr': d2})
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km/u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials['s'] is diff
assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == 'cartesian'
assert not r2.differentials
r3 = SphericalRepresentation(r1)
assert r3.differentials
assert representation_equal(r3, r1)
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = 'thing'
def test_represent_as(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == 'spherical'
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(SphericalRepresentation,
SphericalCosLatDifferential)
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(SphericalRepresentation,
{'s': SphericalCosLatDifferential})
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == 'radial':
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `main`
continue
elif name.endswith("geodetic"):
# TODO: Geodetic representations do not have differentials yet
continue
new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name],
DIFFERENTIAL_CLASSES[name])
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials['s'].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as('name')
assert 'use frame object' in str(excinfo.value)
@pytest.mark.parametrize('sph_diff,usph_diff', [
(SphericalDifferential, UnitSphericalDifferential),
(SphericalCosLatDifferential, UnitSphericalCosLatDifferential)])
def test_represent_as_unit_spherical_with_diff(self, sph_diff, usph_diff):
"""Test that differential angles are correctly reduced."""
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
sph = rep.represent_as(SphericalRepresentation, sph_diff)
usph = rep.represent_as(UnitSphericalRepresentation, usph_diff)
assert components_equal(usph, sph.represent_as(UnitSphericalRepresentation))
assert components_equal(usph.differentials['s'],
sph.differentials['s'].represent_as(usph_diff))
# Just to be sure components_equal and the represent_as work as advertised,
# a sanity check: d_lat is always defined and should be the same.
assert_array_equal(sph.differentials['s'].d_lat,
usph.differentials['s'].d_lat)
def test_getitem(self):
d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s,
d_y=-np.arange(10) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km,
differentials=d)
s_slc = s[2:8:2]
s_dif = s_slc.differentials['s']
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s)
def test_setitem(self):
d = CartesianDifferential(d_x=np.arange(5) * u.m/u.s,
d_y=-np.arange(5) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(5) * u.m,
y=-np.arange(5) * u.m,
z=3 * u.km,
differentials=d)
s[:2] = s[2]
assert_array_equal(s.x, [2, 2, 2, 3, 4] * u.m)
assert_array_equal(s.y, [-2, -2, -2, -3, -4] * u.m)
assert_array_equal(s.z, [3, 3, 3, 3, 3] * u.km)
assert_array_equal(s.differentials['s'].d_x,
[2, 2, 2, 3, 4] * u.m/u.s)
assert_array_equal(s.differentials['s'].d_y,
[-2, -2, -2, -3, -4] * u.m/u.s)
assert_array_equal(s.differentials['s'].d_z,
[1, 1, 1, 1, 1] * u.m/u.s)
s2 = s.represent_as(SphericalRepresentation,
SphericalDifferential)
s[0] = s2[3]
assert_allclose_quantity(s.x, [3, 2, 2, 3, 4] * u.m)
assert_allclose_quantity(s.y, [-3, -2, -2, -3, -4] * u.m)
assert_allclose_quantity(s.z, [3, 3, 3, 3, 3] * u.km)
assert_allclose_quantity(s.differentials['s'].d_x,
[3, 2, 2, 3, 4] * u.m/u.s)
assert_allclose_quantity(s.differentials['s'].d_y,
[-3, -2, -2, -3, -4] * u.m/u.s)
assert_allclose_quantity(s.differentials['s'].d_z,
[1, 1, 1, 1, 1] * u.m/u.s)
s3 = CartesianRepresentation(s.xyz, differentials={
's': d,
's2': CartesianDifferential(np.ones((3, 5))*u.m/u.s**2)})
with pytest.raises(ValueError, match='same differentials'):
s[0] = s3[2]
s4 = SphericalRepresentation(0.*u.deg, 0.*u.deg, 1.*u.kpc,
differentials=RadialDifferential(
10*u.km/u.s))
with pytest.raises(ValueError, match='loss of information'):
s[0] = s4
def test_transform(self):
d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s,
d_y=[3, 4] * u.km/u.s,
d_z=[5, 6] * u.km/u.s)
r1 = CartesianRepresentation(x=[1, 2] * u.kpc,
y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc,
differentials=d1)
r2 = r1.transform(matrices["general"])
d2 = r2.differentials['s']
assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s)
assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s)
assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3]*u.kpc)
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials['s'] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s)
cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials['s'] != cr3.differentials['s']
assert cr4.differentials['s'] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials['s'] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2)
r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc,
differentials=d1)
r2 = r1.with_differentials(d2)
assert r1.differentials['s'] is r2.differentials['s']
assert 's2' not in r1.differentials
assert 's2' in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m)
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m,
differentials=sd)
cart = sr.to_cartesian()
assert cart.get_name() == 'cartesian'
assert not cart.differentials
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, '_unit_representation'):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = {'phi': Angle,
'theta': Angle}
def __init__(self, *args, copy=True, **kwargs):
super().__init__(*args, copy=copy, **kwargs)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {}'.format(self._theta.to(u.degree)))
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return {
'phi': CartesianRepresentation(-sinphi, cosphi, 0., copy=False),
'theta': CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False)}
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return {'phi', sintheta,
'theta', l}
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
PhysicsSphericalRepresentation._unit_representation = UnitPhysicsSphericalRepresentation
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0*u.deg, theta=10*u.deg)
objkw = unitphysics(phi=0*u.deg, theta=10*u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert_allclose(asphys.theta, obj.theta)
assert_allclose_quantity(asphys.r, 1*u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert assph.lat == 80*u.deg
assert_allclose_quantity(assph.distance, 1*u.dimensionless_unscaled)
with pytest.raises(TypeError, match='got multiple values'):
unitphysics(1*u.deg, 2*u.deg, theta=10)
with pytest.raises(TypeError, match='unexpected keyword.*parrot'):
unitphysics(1*u.deg, 2*u.deg, parrot=10)
def test_distance_warning(recwarn):
SphericalRepresentation(1*u.deg, 2*u.deg, 1*u.kpc)
with pytest.raises(ValueError) as excinfo:
SphericalRepresentation(1*u.deg, 2*u.deg, -1*u.kpc)
assert 'Distance must be >= 0' in str(excinfo.value)
# second check is because the "originating" ValueError says the above,
# while the representation one includes the below
assert 'you must explicitly pass' in str(excinfo.value)
def test_dtype_preservation_in_indexing():
# Regression test for issue #8614 (fixed in #8876)
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]], dtype='f4')
cr = CartesianRepresentation(xyz, xyz_axis=-1, unit="km")
assert cr.xyz.dtype == xyz.dtype
cr0 = cr[0]
# This used to fail.
assert cr0.xyz.dtype == xyz.dtype
class TestInfo:
def setup_class(cls):
cls.rep = SphericalRepresentation([0, 1]*u.deg, [2, 3]*u.deg,
10*u.pc)
cls.diff = SphericalDifferential([10, 20]*u.mas/u.yr,
[30, 40]*u.mas/u.yr,
[50, 60]*u.km/u.s)
cls.rep_w_diff = SphericalRepresentation(cls.rep,
differentials=cls.diff)
def test_info_unit(self):
assert self.rep.info.unit == 'deg, deg, pc'
assert self.diff.info.unit == 'mas / yr, mas / yr, km / s'
assert self.rep_w_diff.info.unit == 'deg, deg, pc'
@pytest.mark.parametrize('item', ['rep', 'diff', 'rep_w_diff'])
def test_roundtrip(self, item):
rep_or_diff = getattr(self, item)
as_dict = rep_or_diff.info._represent_as_dict()
new = rep_or_diff.__class__.info._construct_from_dict(as_dict)
assert np.all(representation_equal(new, rep_or_diff))
@pytest.mark.parametrize('cls',
[SphericalDifferential,
SphericalCosLatDifferential,
CylindricalDifferential,
PhysicsSphericalDifferential,
UnitSphericalDifferential,
UnitSphericalCosLatDifferential])
def test_differential_norm_noncartesian(cls):
# The norm of a non-Cartesian differential without specifying `base` should error
rep = cls(0, 0, 0)
with pytest.raises(ValueError, match=r"`base` must be provided .* " + cls.__name__):
rep.norm()
def test_differential_norm_radial():
# Unlike most non-Cartesian differentials, the norm of a radial differential does not require `base`
rep = RadialDifferential(1*u.km/u.s)
assert_allclose_quantity(rep.norm(), 1*u.km/u.s)
|
dec4d5148c4b719ec77821f62bfbe3f26b74d466d698c81ea3f9bfce243acf0d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for miscellaneous functionality in the `funcs` module
"""
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
def test_sun():
"""
Test that `get_sun` works and it behaves roughly as it should (in GCRS)
"""
from astropy.coordinates.funcs import get_sun
northern_summer_solstice = Time('2010-6-21')
northern_winter_solstice = Time('2010-12-21')
equinox_1 = Time('2010-3-21')
equinox_2 = Time('2010-9-21')
gcrs1 = get_sun(equinox_1)
assert np.abs(gcrs1.dec.deg) < 1
gcrs2 = get_sun(Time([northern_summer_solstice, equinox_2, northern_winter_solstice]))
assert np.all(np.abs(gcrs2.dec - [23.5, 0, -23.5]*u.deg) < 1*u.deg)
def test_constellations(recwarn):
from astropy.coordinates import ICRS, FK5, SkyCoord
from astropy.coordinates.funcs import get_constellation
inuma = ICRS(9*u.hour, 65*u.deg)
n_prewarn = len(recwarn)
res = get_constellation(inuma)
res_short = get_constellation(inuma, short_name=True)
assert len(recwarn) == n_prewarn # neither version should not make warnings
assert res == 'Ursa Major'
assert res_short == 'UMa'
assert isinstance(res, str) or getattr(res, 'shape', None) == tuple()
# these are taken from the ReadMe for Roman 1987
ras = [9, 23.5, 5.12, 9.4555, 12.8888, 15.6687, 19, 6.2222]
decs = [65, -20, 9.12, -19.9, 22, -12.1234, -40, -81.1234]
shortnames = ['UMa', 'Aqr', 'Ori', 'Hya', 'Com', 'Lib', 'CrA', 'Men']
testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1950')
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames)
# test on a SkyCoord, *and* test Boötes, which is special in that it has a
# non-ASCII character
bootest = SkyCoord(15*u.hour, 30*u.deg, frame='icrs')
boores = get_constellation(bootest)
assert boores == 'Boötes'
assert isinstance(boores, str) or getattr(boores, 'shape', None) == tuple()
@pytest.mark.xfail
def test_constellation_edge_cases():
from astropy.coordinates import FK5
from astropy.coordinates.funcs import get_constellation
# Test edge cases close to borders, using B1875.0 coordinates
# Look for HMS / DMS roundoff-to-decimal issues from Roman (1987) data,
# and misuse of PrecessedGeocentric, as documented in
# https://github.com/astropy/astropy/issues/9855
# Define eight test points.
# The first four cross the boundary at 06h14m30 == 6.2416666666666... hours
# with Monoceros on the west side of Orion at Dec +3.0.
ras = [6.24100, 6.24160, 6.24166, 6.24171]
# aka ['6h14m27.6s' '6h14m29.76s' '6h14m29.976s' '6h14m30.156s']
decs = [3.0, 3.0, 3.0, 3.0]
# Correct constellations for given RA/Dec coordinates
shortnames = ['Ori', 'Ori', 'Ori', 'Mon']
# The second four sample northward along RA 22 hours, crossing the boundary
# at 86° 10' == 86.1666... degrees between Cepheus and Ursa Minor
decs += [86.16, 86.1666, 86.16668, 86.1668]
ras += [22.0, 22.0, 22.0, 22.0]
shortnames += ['Cep', 'Cep', 'Umi', 'Umi']
testcoos = FK5(ras*u.hour, decs*u.deg, equinox='B1875')
npt.assert_equal(get_constellation(testcoos, short_name=True), shortnames,
"get_constellation() error: misusing Roman approximations, vs IAU boundaries from Delporte?")
# TODO: When that's fixed, add other tests with coords that are in different constellations
# depending on equinox
def test_concatenate():
from astropy.coordinates import FK5, SkyCoord, ICRS
from astropy.coordinates.funcs import concatenate
# Just positions
fk5 = FK5(1*u.deg, 2*u.deg)
sc = SkyCoord(3*u.deg, 4*u.deg, frame='fk5')
res = concatenate([fk5, sc])
np.testing.assert_allclose(res.ra, [1, 3]*u.deg)
np.testing.assert_allclose(res.dec, [2, 4]*u.deg)
with pytest.raises(TypeError):
concatenate(fk5)
with pytest.raises(TypeError):
concatenate(1*u.deg)
# positions and velocities
fr = ICRS(ra=10*u.deg, dec=11.*u.deg,
pm_ra_cosdec=12*u.mas/u.yr,
pm_dec=13*u.mas/u.yr)
sc = SkyCoord(ra=20*u.deg, dec=21.*u.deg,
pm_ra_cosdec=22*u.mas/u.yr,
pm_dec=23*u.mas/u.yr)
res = concatenate([fr, sc])
with pytest.raises(ValueError):
concatenate([fr, fk5])
fr2 = ICRS(ra=10*u.deg, dec=11.*u.deg)
with pytest.raises(ValueError):
concatenate([fr, fr2])
def test_concatenate_representations():
from astropy.coordinates.funcs import concatenate_representations
from astropy.coordinates import representation as r
reps = [r.CartesianRepresentation([1, 2, 3.]*u.kpc),
r.SphericalRepresentation(lon=1*u.deg, lat=2.*u.deg,
distance=10*u.pc),
r.UnitSphericalRepresentation(lon=1*u.deg, lat=2.*u.deg),
r.CartesianRepresentation(np.ones((3, 100)) * u.kpc),
r.CartesianRepresentation(np.ones((3, 16, 8)) * u.kpc)]
reps.append(reps[0].with_differentials(
r.CartesianDifferential([1, 2, 3.] * u.km/u.s)))
reps.append(reps[1].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.SphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr, 3*u.km/u.s)))
reps.append(reps[2].with_differentials(
r.UnitSphericalCosLatDifferential(1*u.mas/u.yr, 2*u.mas/u.yr)))
reps.append(reps[2].with_differentials(
{'s': r.RadialDifferential(1*u.km/u.s)}))
reps.append(reps[3].with_differentials(
r.CartesianDifferential(*np.ones((3, 100)) * u.km/u.s)))
reps.append(reps[4].with_differentials(
r.CartesianDifferential(*np.ones((3, 16, 8)) * u.km/u.s)))
# Test that combining all of the above with itself succeeds
for rep in reps:
if not rep.shape:
expected_shape = (2, )
else:
expected_shape = (2 * rep.shape[0], ) + rep.shape[1:]
tmp = concatenate_representations((rep, rep))
assert tmp.shape == expected_shape
if 's' in rep.differentials:
assert tmp.differentials['s'].shape == expected_shape
# Try combining 4, just for something different
for rep in reps:
if not rep.shape:
expected_shape = (4, )
else:
expected_shape = (4 * rep.shape[0], ) + rep.shape[1:]
tmp = concatenate_representations((rep, rep, rep, rep))
assert tmp.shape == expected_shape
if 's' in rep.differentials:
assert tmp.differentials['s'].shape == expected_shape
# Test that combining pairs fails
with pytest.raises(TypeError):
concatenate_representations((reps[0], reps[1]))
with pytest.raises(ValueError):
concatenate_representations((reps[0], reps[5]))
# Check that passing in a single object fails
with pytest.raises(TypeError):
concatenate_representations(reps[0])
def test_concatenate_representations_different_units():
from astropy.coordinates.funcs import concatenate_representations
from astropy.coordinates import representation as r
reps = [r.CartesianRepresentation([1, 2, 3.]*u.pc),
r.CartesianRepresentation([1, 2, 3.]*u.kpc)]
concat = concatenate_representations(reps)
assert concat.shape == (2,)
assert np.all(concat.xyz ==
([[1., 2., 3.], [1000., 2000., 3000.]] * u.pc).T)
|
c65d092e1da6cd34871c6a6fa4a26756e0c8f7ef056f19ba8a9c72a05af78548 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
from copy import deepcopy
import pytest
import numpy as np
import numpy.testing as npt
from erfa import ErfaWarning
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.coordinates.representation import REPRESENTATION_CLASSES, DUPLICATE_REPRESENTATIONS
from astropy.coordinates import (ICRS, FK4, FK5, Galactic, GCRS, SkyCoord, Angle,
SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation, AltAz,
BaseCoordinateFrame, Attribute,
frame_transform_graph, RepresentationMapping)
from astropy.coordinates import Latitude, EarthLocation
from astropy.coordinates.transformations import FunctionTransform
from astropy.time import Time
from astropy.utils import minversion, isiterable
from astropy.units import allclose as quantity_allclose
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5())
J2001 = Time('J2001')
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1.)
return quantity_allclose(a, b, rtol, atol)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
func.DUPLICATE_REPRESENTATIONS_ORIG = deepcopy(DUPLICATE_REPRESENTATIONS)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
DUPLICATE_REPRESENTATIONS.clear()
DUPLICATE_REPRESENTATIONS.update(func.DUPLICATE_REPRESENTATIONS_ORIG)
def test_is_transformable_to_str_input():
"""Test method ``is_transformable_to`` with string input.
The only difference from the frame method of the same name is that
strings are allowed. As the frame tests cover ``is_transform_to``, here
we only test the added string option.
"""
# make example SkyCoord
c = SkyCoord(90*u.deg, -11*u.deg)
# iterate through some frames, checking consistency
names = frame_transform_graph.get_names()
for name in names:
frame = frame_transform_graph.lookup_name(name)()
assert c.is_transformable_to(name) == c.is_transformable_to(frame)
def test_transform_to():
for frame in (FK5(), FK5(equinox=Time('J1975.0')),
FK4(), FK4(equinox=Time('J1975.0')),
SkyCoord(RA, DEC, frame='fk4', equinox='J1980')):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame='icrs')
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, 'J1975.0'):
for obstime0 in (None, 'J1980.0'):
for equinox1 in (None, 'J1975.0'):
for obstime1 in (None, 'J1980.0'):
rt_sets.append((rt_frame0, rt_frame1,
equinox0, equinox1,
obstime0, obstime1))
rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1')
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {'equinox': equinox0, 'obstime': obstime0}
attrs1 = {'equinox': equinox1, 'obstime': obstime1}
# Remove None values
attrs0 = dict((k, v) for k, v in attrs0.items() if v is not None)
attrs1 = dict((k, v) for k, v in attrs1.items() if v is not None)
# Go out and back
sc = SkyCoord(RA, DEC, frame=frame0, **attrs0)
# Keep only frame attributes for frame1
attrs1 = dict((attr, val) for attr, val in attrs1.items()
if attr in frame1.get_frame_attr_names())
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = dict((attr, val) for attr, val in attrs0.items()
if attr in frame0.get_frame_attr_names())
# also, if any are None, fill in with defaults
for attrnm in frame0.get_frame_attr_names():
if attrs0.get(attrnm, None) is None:
if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None:
if 'equinox' in attrs0:
attrs0[attrnm] = attrs0['equinox']
else:
attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord('1d 2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1d', '2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1°2′3″', '2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
sc = SkyCoord('1°2′3″ 2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
with pytest.raises(ValueError) as err:
SkyCoord('1d 2d 3d')
assert "Cannot parse first argument data" in str(err.value)
sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic')
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg),
np.array(['deg', 'deg'])):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ',
u.hourangle, [u.hourangle, u.hourangle]):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ('hourangle,deg', (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert 'Unit keyword must have one to three unit values' in str(err.value)
for unit in ('m', (u.m, u.deg), ''):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord([('1d', '2d'),
(1 * u.deg, 2 * u.deg),
'1d 2d',
('1°', '2°'),
'1° 2°'], unit='deg')
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord(['1d 2d 3d'])
assert "Cannot parse first argument data" in str(err.value)
with pytest.raises(ValueError) as err:
SkyCoord([('1d', '2d', '3d')])
assert "Cannot parse first argument data" in str(err.value)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
assert "One or more elements of input sequence does not have a length" in str(err.value)
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (['1 2', '3 4'],
[['1', '2'], ['3', '4']],
[[1, 2], [3, 4]]):
sc = SkyCoord(a, unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian representation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, frame='icrs')
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, frame='icrs', ra='1d')
assert "conflicts with keyword argument 'ra'" in str(err.value)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, frame='icrs')
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame='icrs')
assert sc.frame.name == 'icrs'
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == 'icrs'
sc = SkyCoord(sc)
assert sc.frame.name == 'icrs'
sc = SkyCoord(C_ICRS)
assert sc.frame.name == 'icrs'
SkyCoord(C_ICRS, frame='icrs')
assert sc.frame.name == 'icrs'
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame='galactic')
assert 'Cannot override frame=' in str(err.value)
def test_equal():
obstime = 'B1955'
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20]*u.deg, [3, 4]*u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa (numpy True not Python True)
assert (sc1[0] != sc2[0]) == False # noqa
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 20]*u.km/u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert (sc1[0] == sc2[0]) == True # noqa
assert (sc1[0] != sc2[0]) == False # noqa
def test_equal_different_type():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime='B1955')
# Test equals and not equals operators against different types
assert sc1 != 'a string'
assert not (sc1 == 'a string')
def test_equal_exceptions():
sc1 = SkyCoord(1*u.deg, 2*u.deg, obstime='B1955')
sc2 = SkyCoord(1*u.deg, 2*u.deg)
with pytest.raises(ValueError, match=r"cannot compare: extra frame "
r"attribute 'obstime' is not equivalent \(perhaps compare the "
r"frames directly to avoid this exception\)"):
sc1 == sc2
# Note that this exception is the only one raised directly in SkyCoord.
# All others come from lower-level classes and are tested in test_frames.py.
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
@pytest.mark.parametrize('frame', ['fk4', 'fk5', 'icrs'])
def test_setitem_no_velocity(frame):
"""Test different flavors of item setting for a SkyCoord without a velocity
for different frames. Include a frame attribute that is sometimes an
actual frame attribute and sometimes an extra frame attribute.
"""
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime='B1955', frame=frame)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, obstime='B1955', frame=frame)
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert sc1.obstime == Time('B1955')
assert sc1.frame.name == frame
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
def test_setitem_initially_broadcast():
sc = SkyCoord(np.ones((2, 1))*u.deg, np.ones((1, 3))*u.deg)
sc[1, 1] = SkyCoord(0*u.deg, 0*u.deg)
expected = np.ones((2, 3))*u.deg
expected[1, 1] = 0.
assert np.all(sc.ra == expected)
assert np.all(sc.dec == expected)
def test_setitem_velocities():
"""Test different flavors of item setting for a SkyCoord with a velocity.
"""
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s,
obstime='B1950', frame='fk4')
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, radial_velocity=[10, 20]*u.km/u.s,
obstime='B1950', frame='fk4')
sc1 = sc0.copy()
sc1[1] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [1, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [3, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [1, 10])
assert sc1.obstime == Time('B1950')
assert sc1.frame.name == 'fk4'
sc1 = sc0.copy()
sc1[:] = sc2[0]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 10])
sc1 = sc0.copy()
sc1[:] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [10, 20])
assert np.allclose(sc1.dec.to_value(u.deg), [30, 40])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [10, 20])
sc1 = sc0.copy()
sc1[[1, 0]] = sc2[:]
assert np.allclose(sc1.ra.to_value(u.deg), [20, 10])
assert np.allclose(sc1.dec.to_value(u.deg), [40, 30])
assert np.allclose(sc1.radial_velocity.to_value(u.km / u.s), [20, 10])
def test_setitem_exceptions():
class SkyCoordSub(SkyCoord):
pass
obstime = 'B1955'
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, frame='fk4')
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, frame='fk4', obstime=obstime)
sc1 = SkyCoordSub(sc0)
with pytest.raises(TypeError, match='an only set from object of same class: '
'SkyCoordSub vs. SkyCoord'):
sc1[0] = sc2[0]
sc1 = SkyCoord(sc0.ra, sc0.dec, frame='fk4', obstime='B2001')
with pytest.raises(ValueError, match='can only set frame item from an equivalent frame'):
sc1.frame[0] = sc2.frame[0]
sc1 = SkyCoord(sc0.ra[0], sc0.dec[0], frame='fk4', obstime=obstime)
with pytest.raises(TypeError, match="scalar 'FK4' frame object does not support "
'item assignment'):
sc1[0] = sc2[0]
# Different differentials
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg,
pm_ra_cosdec=[1, 2]*u.mas/u.yr, pm_dec=[3, 4]*u.mas/u.yr)
sc2 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg, radial_velocity=[10, 20]*u.km/u.s)
with pytest.raises(TypeError, match='can only set from object of same class: '
'UnitSphericalCosLatDifferential vs. RadialDifferential'):
sc1[0] = sc2[0]
def test_insert():
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc1 = SkyCoord(5*u.deg, 6*u.deg)
sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]]*u.deg,
[[5, 6], [7, 8]]*u.deg)
sc5 = SkyCoord([[10, 2], [30, 4]]*u.deg,
[[50, 6], [70, 8]]*u.deg)
# Insert a scalar
sc = sc0.insert(1, sc1)
assert skycoord_equal(sc, SkyCoord([1, 5, 2]*u.deg, [3, 6, 4]*u.deg))
# Insert length=2 array at start of array
sc = sc0.insert(0, sc3)
assert skycoord_equal(sc, SkyCoord([10, 20, 1, 2]*u.deg, [30, 40, 3, 4]*u.deg))
# Insert length=2 array at end of array
sc = sc0.insert(2, sc3)
assert skycoord_equal(sc, SkyCoord([1, 2, 10, 20]*u.deg, [3, 4, 30, 40]*u.deg))
# Multidimensional
sc = sc4.insert(1, sc5)
assert skycoord_equal(sc, SkyCoord([[1, 2], [10, 2], [30, 4], [3, 4]]*u.deg,
[[5, 6], [50, 6], [70, 8], [7, 8]]*u.deg))
def test_insert_exceptions():
sc0 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc1 = SkyCoord(5*u.deg, 6*u.deg)
# sc3 = SkyCoord([10, 20]*u.deg, [30, 40]*u.deg)
sc4 = SkyCoord([[1, 2], [3, 4]]*u.deg,
[[5, 6], [7, 8]]*u.deg)
with pytest.raises(TypeError, match='cannot insert into scalar'):
sc1.insert(0, sc0)
with pytest.raises(ValueError, match='axis must be 0'):
sc0.insert(0, sc1, axis=1)
with pytest.raises(TypeError, match='obj arg must be an integer'):
sc0.insert(slice(None), sc0)
with pytest.raises(IndexError, match='index -100 is out of bounds for axis 0 '
'with size 2'):
sc0.insert(-100, sc0)
# Bad shape
with pytest.raises(ValueError, match='could not broadcast input array from '
r'shape \(2,2\) into shape \(2,?\)'):
sc0.insert(0, sc4)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox='J1999', obstime='J2100')
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err.value)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox='J1999', obstime='J2002')
assert "Frame attribute 'obstime' has conflicting" in str(err.value)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == 'J1999' # Just the raw value (not validated)
assert sc.obstime == 'J2001'
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == Time('J1999') # Coming from the self.frame object
assert sc.obstime == Time('J2001')
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999')
assert sc.equinox == Time('J1999')
assert sc.obstime == Time('J1999')
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = '1h2m3s 1d2m3s'
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap('15.5125 1.03417')
assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s')
assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s')
with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s')
@pytest.mark.parametrize('cls_other', [SkyCoord, ICRS])
def test_seps(cls_other):
sc1 = SkyCoord(0 * u.deg, 1 * u.deg)
sc2 = cls_other(0 * u.deg, 2 * u.deg)
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg)/u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc)
sc4 = cls_other(1 * u.deg, 1 * u.deg, distance=2 * u.kpc)
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
assert repr(sc1) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' (0., 1.)>')
assert repr(sc2) == ('<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n'
' (1., 1., 1.)>')
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs')
assert repr(sc3).startswith('<SkyCoord (ICRS): (ra, dec) in deg\n')
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' (0., 1.)>')
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith("<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., "
"-4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, "
"obswl=1.0 micron): (az, alt, distance) in "
"(deg, deg, kpc)\n")
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame='icrs')
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame='icrs')
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to('fk5')
assert sc5.ra == sc2.transform_to('fk5').ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to('fk5')
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to('fk5').ra)
def test_position_angle():
c1 = SkyCoord(0*u.deg, 0*u.deg)
c2 = SkyCoord(1*u.deg, 0*u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg)
c3 = SkyCoord(1*u.deg, 0.1*u.deg)
assert c1.position_angle(c3) < 90*u.deg
c4 = SkyCoord(0*u.deg, 1*u.deg)
assert_allclose(c1.position_angle(c4), 0*u.deg)
carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg)
carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360*u.degree)
assert np.all(res > 270*u.degree)
cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs')
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from astropy.coordinates.angle_utilities import position_angle
result = position_angle(10., 20., 10., 20.)
assert result.unit is u.radian
assert result.value == 0.
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950')
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181
dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc)
dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950',
distance=1.*u.pc)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_directional_offset_by():
# Round-trip tests: where is sc2 from sc1?
# Use those offsets from sc1 and verify you get to sc2.
npoints = 7 # How many points when doing vectors of SkyCoords
for sc1 in [SkyCoord(0*u.deg,-90*u.deg), # South pole
SkyCoord(0 * u.deg, 90 * u.deg), # North pole
SkyCoord(1*u.deg,2*u.deg),
SkyCoord(np.linspace(0,359,npoints),np.linspace(-90, 90,npoints),
unit=u.deg, frame='fk4'),
SkyCoord(np.linspace(359,0,npoints),np.linspace(-90, 90,npoints),
unit=u.deg, frame='icrs'),
SkyCoord(np.linspace(-3,3,npoints),np.linspace(-90, 90,npoints),
unit=(u.rad, u.deg), frame='barycentricmeanecliptic')]:
for sc2 in [SkyCoord(5*u.deg,10*u.deg),
SkyCoord(np.linspace(0, 359, npoints), np.linspace(-90, 90, npoints),
unit=u.deg, frame='galactic')]:
# Find the displacement from sc1 to sc2,
posang = sc1.position_angle(sc2)
sep = sc1.separation(sc2)
# then do the offset from sc1 and verify that you are at sc2
sc2a = sc1.directional_offset_by(position_angle=posang, separation=sep)
assert np.max(np.abs(sc2.separation(sc2a).arcsec)) < 1e-3
# Specific test cases
# Go over the North pole a little way, and
# over the South pole a long way, to get to same spot
sc1 = SkyCoord(0*u.deg, 89*u.deg)
for posang,sep in [(0*u.deg, 2*u.deg), (180*u.deg, 358*u.deg)]:
sc2 = sc1.directional_offset_by(posang, sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 89])
# Go twice as far to ensure that dec is actually changing
# and that >360deg is supported
sc2 = sc1.directional_offset_by(posang, 2*sep)
assert allclose([sc2.ra.degree, sc2.dec.degree], [180, 87])
# Verify that a separation of 180 deg in any direction gets to the antipode
# and 360 deg returns to start
sc1 = SkyCoord(10*u.deg, 47*u.deg)
for posang in np.linspace(0, 377, npoints):
sc2 = sc1.directional_offset_by(posang, 180*u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [190, -47])
sc2 = sc1.directional_offset_by(posang, 360*u.deg)
assert allclose([sc2.ra.degree, sc2.dec.degree], [10, 47])
# Verify that a 90 degree posang, which means East
# corresponds to an increase in RA, by ~separation/cos(dec) and
# a slight convergence to equator
sc1 = SkyCoord(10*u.deg, 60*u.deg)
sc2 = sc1.directional_offset_by(90*u.deg, 1.0*u.deg)
assert 11.9 < sc2.ra.degree < 12.0
assert 59.9 < sc2.dec.degree < 60.0
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from astropy.table import Table, Column
t = Table()
t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg))
c = SkyCoord(t['ra'], t['dec'])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'),
('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None),
('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'),
('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'),
('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z')
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3))
units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3')
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(c1, c2, c3, unit=(unit1, unit2, unit3),
representation_type=representation,
frame=Galactic)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), frame=Galactic,
unit=(unit1, unit2, unit3), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3}
sc = SkyCoord(c1, c2, unit=(unit1, unit2, unit3),
frame=Galactic,
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(frame=Galactic, unit=(unit1, unit2, unit3),
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(c1, c2, unit=(unit1, unit2), frame=Galactic,
representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
frame=Galactic,
unit=(unit1, unit2, unit3), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(frame=Galactic, unit=(unit1, unit2),
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3*unit3}
sc = Galactic(c1*unit1, c2*unit2,
representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
sc = Galactic(c1*unit1, c2*unit2, representation_type=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
kwargs = {attr1: c1*unit1, attr2: c2*unit2}
sc = Galactic(representation_type=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
@pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'),
[x for x in base_unit_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation_type=repr_name,
frame='galactic')
assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3))
c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3
sc = SkyCoord([(c1, c2, c3)], representation_type=repr_name, frame='galactic')
assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3))
def test_skycoord_string_coordinate_input():
sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation_type='unitspherical')
assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'),
Angle('02:03:04', unit='deg')),
('ra', 'dec'))
sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation_type='unitspherical')
assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'),
Angle(['02:03:04'], unit='deg')),
('ra', 'dec'))
def test_units():
sc = SkyCoord(1, 2, 3, unit='m', representation_type='cartesian') # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2*u.km, 3, unit='m', representation_type='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation_type='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation_type='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation_type='cartesian')
assert 'should have matching physical types' in str(err.value)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation_type='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation_type='spherical')
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0),
('all', 0),
('all', 1)])
def test_wcs_methods(mode, origin):
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_contents
from astropy.wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents('../../wcs/tests/data/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == 'B1950.000'
assert c2.obstime.value == 'B1950.000'
c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980'))
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c2 = c.transform_to(FK5(equinox='J1990'))
assert c2.equinox.value == 'J1990.000'
assert c2.obstime.value == 'J1980.000'
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5')
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000')
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == 'B1950.000'
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == 'J2000.000'
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation_type='cartesian', frame='fk5',
obstime='J1999.9', equinox='J1988.8')
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation_type == c4.representation_type
def test_no_copy():
c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif('not HAS_SCIPY')
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from astropy.utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(equinox='J2010'))
assert c1.equinox == Time('J2010')
# Frame instance with data (data gets ignored)
c2 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(1. * u.deg, 2 * u.deg,
equinox='J2010'))
assert c2.equinox == Time('J2010')
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time('J2010')
# Check duplicate arguments
with pytest.raises(ValueError) as err:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001')
assert "Cannot specify frame attribute 'equinox'" in str(err.value)
def test_guess_from_table():
from astropy.table import Table, Column
from astropy.utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(10), unit='deg', name='RA[J2000]'))
tab.add_column(Column(data=np.random.rand(10), unit='deg', name='DEC[J2000]'))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# try without units in the table
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc2.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc2.dec.deg, tab['DEC[J2000]'])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(10), name='RA_J1900'))
with pytest.raises(ValueError) as excinfo:
SkyCoord.guess_from_table(tab, unit=u.deg)
assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0]
tab.remove_column('RA_J1900')
tab['RA[J2000]'].unit = u.deg
tab['DEC[J2000]'].unit = u.deg
# but should succeed if the ambiguity can be broken b/c one of the matches
# is the name of a different component
tab.add_column(Column(data=np.random.rand(10)*u.mas/u.yr,
name='pm_ra_cosdec'))
tab.add_column(Column(data=np.random.rand(10)*u.mas/u.yr,
name='pm_dec'))
sc3 = SkyCoord.guess_from_table(tab)
assert u.allclose(sc3.ra, tab['RA[J2000]'])
assert u.allclose(sc3.dec, tab['DEC[J2000]'])
assert u.allclose(sc3.pm_ra_cosdec, tab['pm_ra_cosdec'])
assert u.allclose(sc3.pm_dec, tab['pm_dec'])
# should fail if stuff doesn't have proper units
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
with pytest.raises(u.UnitTypeError, match="no unit was given."):
SkyCoord.guess_from_table(tab)
tab.remove_column('pm_ra_cosdec')
tab.remove_column('pm_dec')
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg)
oldra = tab['RA[J2000]']
tab.remove_column('RA[J2000]')
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]'])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=('x', 'y', 'z'))
tabgal = Table([b, l], names=('b', 'l'))
sc_cart = SkyCoord.guess_from_table(tabcart, representation_type='cartesian')
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic')
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal['b'].name = 'gal_b'
tabgal['l'].name = 'gal_l'
SkyCoord.guess_from_table(tabgal, frame='galactic')
tabgal['gal_b'].name = 'blob'
tabgal['gal_l'].name = 'central'
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame='galactic')
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3]*u.deg)
assert np.all(scnew.dec == [4, 6]*u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2*u.deg, 5*u.deg)
reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5')
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010')
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010')
scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010')
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time('J2010')
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg'))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == '1 1')
def test_equiv_skycoord():
sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs')
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5')
scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005')
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005')
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox='J2005'))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox='J2005'))
def test_equiv_skycoord_with_extra_attrs():
"""Regression test for #10658."""
# GCRS has a CartesianRepresentationAttribute called obsgeoloc
gcrs = GCRS(1*u.deg, 2*u.deg, obsgeoloc=CartesianRepresentation([1, 2, 3], unit=u.m))
# Create a SkyCoord where obsgeoloc tags along as an extra attribute
sc1 = SkyCoord(gcrs).transform_to(ICRS)
# Now create a SkyCoord with an equivalent frame but without the extra attribute
sc2 = SkyCoord(sc1.frame)
# The SkyCoords are therefore not equivalent, but check both directions
assert not sc1.is_equivalent_frame(sc2)
# This way around raised a TypeError which is fixed by #10658
assert not sc2.is_equivalent_frame(sc1)
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135*u.deg, 65*u.deg)
assert sc.get_constellation() == 'Ursa Major'
assert sc.get_constellation(short_name=True) == 'UMa'
scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg)
npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2)
npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And'
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus'
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces'
assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia'
# ok maybe not
# ok, but at least some of the others do make sense...
assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com'
assert SkyCoord.from_name('Orion Nebula').get_constellation() == 'Orion'
assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum'
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation_type = 'cartesian'
assert sc[0].representation_type is CartesianRepresentation
def test_spherical_offsets_to_api():
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs')
fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5')
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1*u.deg, 1*u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1*u.deg)
assert_allclose(ddec, 1*u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs')
i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs')
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, np.arange(4)*u.arcmin)
@pytest.mark.parametrize('frame', ['icrs', 'galactic'])
@pytest.mark.parametrize('comparison_data', [(0*u.arcmin, 1*u.arcmin),
(1*u.arcmin, 0*u.arcmin),
(1*u.arcmin, 1*u.arcmin)])
def test_spherical_offsets_roundtrip(frame, comparison_data):
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame=frame)
comparison = SkyCoord(*comparison_data, frame=frame)
dlon, dlat = i00.spherical_offsets_to(comparison)
assert_allclose(dlon, comparison.data.lon)
assert_allclose(dlat, comparison.data.lat)
i00_back = comparison.spherical_offsets_by(-dlon, -dlat)
# This reaches machine precision when only one component is changed, but for
# the third parametrized case (both lon and lat change), the transformation
# will have finite accuracy:
assert_allclose(i00_back.data.lon, i00.data.lon, atol=1e-10*u.rad)
assert_allclose(i00_back.data.lat, i00.data.lat, atol=1e-10*u.rad)
# Test roundtripping the other direction:
init_c = SkyCoord(40.*u.deg, 40.*u.deg, frame=frame)
new_c = init_c.spherical_offsets_by(3.534*u.deg, 2.2134*u.deg)
dlon, dlat = new_c.spherical_offsets_to(init_c)
back_c = new_c.spherical_offsets_by(dlon, dlat)
assert init_c.separation(back_c) < 1e-10*u.deg
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert 'fakeattr' in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert 'fakeattr' not in dir(sc_before)
assert 'fakeattr' not in dir(sc_after1)
assert 'fakeattr' not in dir(sc_after2)
def test_cache_clear_sc():
from astropy.coordinates import SkyCoord
i = SkyCoord(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5')
assert hasattr(sc.frame, 'equinox')
with pytest.raises(AttributeError):
sc.equinox = 'B1950'
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, 'relative_humidity')
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10']
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, 'obstime')
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to('fk4')
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to('icrs')
assert not hasattr(sc2.frame, 'obstime')
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time('2000-01-01T00:00')
t2 = Time('2012-01-01T00:00')
# Check a very simple case first:
frame = ICRS(ra=10.*u.deg, dec=0*u.deg,
distance=10.*u.pc,
pm_ra_cosdec=0.1*u.deg/u.yr,
pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101*u.kPa)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12*u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra_cosdec, applied2.pm_ra_cosdec)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9*u.second < adt.to(u.second) < 2.1*u.second
c2 = SkyCoord(frame)
with pytest.warns(ErfaWarning, match='ERFA function "pmsafe" yielded .*'):
# warning raised due to high PM chosen above
applied3 = c2.apply_space_motion(dt=6*u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5*u.deg < applied3.ra-c1.ra < .7*u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-3*u.deg)
# but *not* this close
assert not quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-4*u.deg)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')
]
}
SkyCoord(lat=1*u.deg, lon=2*u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert 'pm_ra_cosdec' in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(l=150*u.deg, b=-11*u.deg,
pm_l=100*u.mas/u.yr, pm_b=10*u.mas/u.yr,
frame='galactic')
assert 'pm_l_cosb' in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(x=1*u.pc, y=2*u.pc, z=3*u.pc,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr,
representation_type='cartesian')
assert 'pm_ra_cosdec' not in str(e.value)
def test_contained_by():
"""
Test Skycoord.contained(wcs,image)
"""
header = """
WCSAXES = 2 / Number of coordinate axes
CRPIX1 = 1045.0 / Pixel coordinate of reference point
CRPIX2 = 1001.0 / Pixel coordinate of reference point
PC1_1 = -0.00556448550786 / Coordinate transformation matrix element
PC1_2 = -0.001042120133257 / Coordinate transformation matrix element
PC2_1 = 0.001181477028705 / Coordinate transformation matrix element
PC2_2 = -0.005590809742987 / Coordinate transformation matrix element
CDELT1 = 1.0 / [deg] Coordinate increment at reference point
CDELT2 = 1.0 / [deg] Coordinate increment at reference point
CUNIT1 = 'deg' / Units of coordinate increment and value
CUNIT2 = 'deg' / Units of coordinate increment and value
CTYPE1 = 'RA---TAN' / TAN (gnomonic) projection + SIP distortions
CTYPE2 = 'DEC--TAN' / TAN (gnomonic) projection + SIP distortions
CRVAL1 = 250.34971683647 / [deg] Coordinate value at reference point
CRVAL2 = 2.2808772582495 / [deg] Coordinate value at reference point
LONPOLE = 180.0 / [deg] Native longitude of celestial pole
LATPOLE = 2.2808772582495 / [deg] Native latitude of celestial pole
RADESYS = 'ICRS' / Equatorial coordinate system
MJD-OBS = 58612.339199259 / [d] MJD of observation matching DATE-OBS
DATE-OBS= '2019-05-09T08:08:26.816Z' / ISO-8601 observation date matching MJD-OB
NAXIS = 2 / NAXIS
NAXIS1 = 2136 / length of first array dimension
NAXIS2 = 2078 / length of second array dimension
"""
header = fits.Header.fromstring(header.strip(),'\n')
test_wcs = WCS(header)
coord = SkyCoord(254,2,unit='deg')
assert coord.contained_by(test_wcs) == True
coord = SkyCoord(240,2,unit='deg')
assert coord.contained_by(test_wcs) == False
img = np.zeros((2136,2078))
coord = SkyCoord(250,2,unit='deg')
assert coord.contained_by(test_wcs, img) == True
coord = SkyCoord(240,2,unit='deg')
assert coord.contained_by(test_wcs, img) == False
ra = np.array([254.2, 254.1])
dec = np.array([2, 12.1])
coords = SkyCoord(ra, dec, unit='deg')
assert np.all(test_wcs.footprint_contains(coords) == np.array([True, False]))
def test_none_differential_type():
"""
This is a regression test for #8021
"""
from astropy.coordinates import BaseCoordinateFrame
class MockHeliographicStonyhurst(BaseCoordinateFrame):
default_representation = SphericalRepresentation
frame_specific_representation_info = {
SphericalRepresentation: [RepresentationMapping(reprname='lon',
framename='lon',
defaultunit=u.deg),
RepresentationMapping(reprname='lat',
framename='lat',
defaultunit=u.deg),
RepresentationMapping(reprname='distance',
framename='radius',
defaultunit=None)]
}
fr = MockHeliographicStonyhurst(lon=1*u.deg, lat=2*u.deg, radius=10*u.au)
SkyCoord(0*u.deg, fr.lat, fr.radius, frame=fr) # this was the failure
def test_multiple_aliases():
# Define a frame with multiple aliases
class MultipleAliasesFrame(BaseCoordinateFrame):
name = ['alias_1', 'alias_2']
default_representation = SphericalRepresentation
# Register a transform, which adds the aliases to the transform graph
tfun = lambda c, f: f.__class__(lon=c.lon, lat=c.lat)
ftrans = FunctionTransform(tfun, MultipleAliasesFrame, MultipleAliasesFrame,
register_graph=frame_transform_graph)
coord = SkyCoord(lon=1*u.deg, lat=2*u.deg, frame=MultipleAliasesFrame)
# Test attribute-style access returns self (not a copy)
assert coord.alias_1 is coord
assert coord.alias_2 is coord
# Test for aliases in __dir__()
assert 'alias_1' in coord.__dir__()
assert 'alias_2' in coord.__dir__()
# Test transform_to() calls
assert isinstance(coord.transform_to('alias_1').frame, MultipleAliasesFrame)
assert isinstance(coord.transform_to('alias_2').frame, MultipleAliasesFrame)
ftrans.unregister(frame_transform_graph)
@pytest.mark.parametrize("kwargs, error_message", [
(
{"ra": 1, "dec": 1, "distance": 1 * u.pc, "unit": "deg"},
r"Unit 'deg' \(angle\) could not be applied to 'distance'. ",
),
(
{"rho": 1 * u.m, "phi": 1, "z": 1 * u.m, "unit": "deg", "representation_type": "cylindrical"},
r"Unit 'deg' \(angle\) could not be applied to 'rho'. ",
),
])
def test_passing_inconsistent_coordinates_and_units_raises_helpful_error(kwargs, error_message):
# https://github.com/astropy/astropy/issues/10725
with pytest.raises(ValueError, match=error_message):
SkyCoord(**kwargs)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy.")
def test_match_to_catalog_3d_and_sky():
# Test for issue #5857. See PR #11449
cfk5_default = SkyCoord([1, 2, 3, 4] * u.degree, [0, 0, 0, 0] * u.degree, distance=[1, 1, 1.5, 1] * u.kpc,
frame='fk5')
cfk5_J1950 = cfk5_default.transform_to(FK5(equinox='J1950'))
idx, angle, quantity = cfk5_J1950.match_to_catalog_3d(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0*u.deg, atol=2e-15*u.deg, rtol=0)
assert_allclose(quantity, 0*u.kpc, atol=1e-15*u.kpc, rtol=0)
idx, angle, distance = cfk5_J1950.match_to_catalog_sky(cfk5_default)
npt.assert_array_equal(idx, [0, 1, 2, 3])
assert_allclose(angle, 0 * u.deg, atol=2e-15*u.deg, rtol=0)
assert_allclose(distance, 0*u.kpc, atol=2e-15*u.kpc, rtol=0)
|
c498fb982dd2486c89874eb1a51156c0c4e4e277436d2e44875983784530a729 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import re
import sys
import warnings
import weakref
import numbers
from functools import reduce
from collections import OrderedDict
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .card import Card, CARD_LENGTH
from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,
NotifierMixin)
from .verify import VerifyError, VerifyWarning
from astropy.utils import lazyproperty, isiterable, indent
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Column', 'ColDefs', 'Delayed']
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS['b1'] = 'L'
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS['u2'] = 'I'
NUMPY2FITS['u4'] = 'J'
NUMPY2FITS['u8'] = 'K'
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS['f2'] = 'E'
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {'E': 'D', 'C': 'M'}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),
'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)|')
TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \
re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|')
TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \
TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \
re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)'
r'(?:\.{0,1}(?P<precision>[0-9]+))?))|')
TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \
TDISP_RE_DICT['D'] = \
re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.'
r'(?P<precision>[0-9]+))+)'
r'(?:E{0,1}(?P<exponential>[0-9]+)?)|')
TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \
re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)')
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {
'I': '{{:{width}d}}',
'B': '{{:{width}b}}',
'O': '{{:{width}o}}',
'Z': '{{:{width}x}}',
'F': '{{:{width}.{precision}f}}',
'G': '{{:{width}.{precision}g}}'
}
TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'
TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \
TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] = '{{:{width}.{precision}e}}'
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',
'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS')
KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',
'disp', 'start', 'dim', 'coord_type', 'coord_unit',
'coord_ref_point', 'coord_ref_value', 'coord_inc',
'time_ref_pos')
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'
r'(?P<option>[!-~]*)', re.I)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'
r'(?:(?P<formatf>[FED])'
r'(?:(?P<widthf>[0-9]+)\.'
r'(?P<precision>[0-9]+))?)')
TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+\s*)(?:,\s*\d+\s*)*\s*)\)\s*')
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = '---'
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ('P', 'Q'):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == 'P':
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ''
else:
repeat = str(self.repeat)
return f'{repeat}{self.format}{self.option}'
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = \
_parse_ascii_tformat(format, strict)
# If no width has been specified, set the dtype here to default as well
if format == self.format:
self.recformat = ASCII2NUMPY[format]
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == 'L':
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ('E', 'F', 'D'):
return f'{self.format}{self.width}.{self.precision}'
return f'{self.format}{self.width}'
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + 'u1')
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return f'{self.repeat}X'
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])'
r'(?:\((?P<max>\d*)\))?')
_format_code = 'P'
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = '2i4'
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group('dtype') not in FITS2NUMPY:
raise VerifyError(f'Invalid column format: {format}')
repeat = m.group('repeat')
array_dtype = m.group('dtype')
max = m.group('max')
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = '' if self.repeat is None else self.repeat
max = '' if self.max is None else self.max
return f'{repeat}{self._format_code}{self.format}({max})'
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = 'Q'
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = '2i8'
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify('column_attribute_changed', obj, self._attr[1:], old_value,
value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return f"{self.__class__.__name__}('{self._keyword}')"
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(self, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None, dim=None,
array=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None, coord_inc=None,
time_ref_pos=None):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`astropy:column_creation`
and :ref:`astropy:creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError('Must specify format to construct Column.')
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {'ascii': ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ['The following keyword arguments to Column were invalid:']
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError('\n'.join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs['recformat']
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array,
(np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError('Data is inconsistent with the '
'format `{}`.'.format(format))
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ''
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + ' = ' + repr(value) + '; '
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if 'array' in self.__dict__:
return self.__dict__['array']
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if (hasattr(base, '_coldefs') and
isinstance(base._coldefs, ColDefs)):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if 'array' in self.__dict__:
del self.__dict__['array']
return
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self.__dict__['array'] = array
@array.deleter
def array(self):
try:
del self.__dict__['array']
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute('TTYPE')
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
'It is strongly recommended that column names contain only '
'upper and lower-case ASCII letters, digits, or underscores '
'for maximum compatibility with other software '
'(got {!r}).'.format(name), VerifyWarning)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if (not isinstance(name, str)
or len(str(Card('TTYPE', name))) != CARD_LENGTH):
raise AssertionError(
'Column name must be a string able to fit in a single '
'FITS card--typically this means a maximum of 68 '
'characters, though it may be fewer if the string '
'contains special characters like quotes.')
@ColumnAttribute('TCTYP')
def coord_type(col, coord_type):
if coord_type is None:
return
if (not isinstance(coord_type, str)
or len(coord_type) > 8):
raise AssertionError(
'Coordinate/axis type must be a string of atmost 8 '
'characters.')
@ColumnAttribute('TCUNI')
def coord_unit(col, coord_unit):
if (coord_unit is not None
and not isinstance(coord_unit, str)):
raise AssertionError(
'Coordinate/axis unit must be a string.')
@ColumnAttribute('TCRPX')
def coord_ref_point(col, coord_ref_point):
if (coord_ref_point is not None
and not isinstance(coord_ref_point, numbers.Real)):
raise AssertionError(
'Pixel coordinate of the reference point must be '
'real floating type.')
@ColumnAttribute('TCRVL')
def coord_ref_value(col, coord_ref_value):
if (coord_ref_value is not None
and not isinstance(coord_ref_value, numbers.Real)):
raise AssertionError(
'Coordinate value at reference point must be real '
'floating type.')
@ColumnAttribute('TCDLT')
def coord_inc(col, coord_inc):
if (coord_inc is not None
and not isinstance(coord_inc, numbers.Real)):
raise AssertionError(
'Coordinate increment must be real floating type.')
@ColumnAttribute('TRPOS')
def time_ref_pos(col, time_ref_pos):
if (time_ref_pos is not None
and not isinstance(time_ref_pos, str)):
raise AssertionError(
'Time reference position must be a string.')
format = ColumnAttribute('TFORM')
unit = ColumnAttribute('TUNIT')
null = ColumnAttribute('TNULL')
bscale = ColumnAttribute('TSCAL')
bzero = ColumnAttribute('TZERO')
disp = ColumnAttribute('TDISP')
start = ColumnAttribute('TBCOL')
dim = ColumnAttribute('TDIM')
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError(f'Illegal format `{format}`.')
return format, recformat
@classmethod
def _verify_keywords(cls, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None,
dim=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None,
coord_inc=None, time_ref_pos=None):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
try:
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
except (ValueError, VerifyError) as err:
msg = (
f'Column format option (TFORMn) failed verification: {err!s} '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
except AttributeError as err:
msg = (
f'Column format option (TFORMn) must be a string with a valid '
f'FITS table format (got {format!s}: {err!s}). '
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.')
invalid['format'] = (format, msg)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [('name', name), ('unit', unit), ('bscale', bscale),
('bzero', bzero)]:
if v is not None and v != '':
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != '':
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null))
else:
tnull_formats = ('B', 'I', 'J', 'K')
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
'Column null option (TNULLn) must be an integer for '
'binary table columns (got {!r}). The invalid value '
'will be ignored for the purpose of formatting '
'the data in this column.'.format(null))
elif not (format.format in tnull_formats or
(format.format in ('P', 'Q') and
format.p_format in tnull_formats)):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
'Column null option (TNULLn) is invalid for binary '
'table columns of type {!r} (got {!r}). The invalid '
'value will be ignored for the purpose of formatting '
'the data in this column.'.format(format, null))
if msg is None:
valid['null'] = null
else:
invalid['null'] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != '':
msg = None
if not isinstance(disp, str):
msg = (
f'Column disp option (TDISPn) must be a string (got '
f'{disp!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
elif (isinstance(format, _AsciiColumnFormat) and
disp[0].upper() == 'L'):
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column.")
if msg is None:
try:
_parse_tdisp_format(disp)
valid['disp'] = disp
except VerifyError as err:
msg = (
f'Column disp option (TDISPn) failed verification: '
f'{err!s} The invalid value will be ignored for the '
'purpose of formatting the data in this column.')
invalid['disp'] = (disp, msg)
else:
invalid['disp'] = (disp, msg)
# Validate the start option
if start is not None and start != '':
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
'Column start option (TBCOLn) is not allowed for binary '
'table columns (got {!r}). The invalid keyword will be '
'ignored for the purpose of formatting the data in this '
'column.'.format(start))
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
'Column start option (TBCOLn) must be a positive integer '
'(got {!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.'.format(start))
if msg is None:
valid['start'] = start
else:
invalid['start'] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != '':
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
'Column dim option (TDIMn) is not allowed for ASCII table '
'columns (got {!r}). The invalid keyword will be ignored '
'for the purpose of formatting this column.'.format(dim))
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column.")
if dims_tuple:
if reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim))
if msg is None:
valid['dim'] = dims_tuple
else:
invalid['dim'] = (dim, msg)
if coord_type is not None and coord_type != '':
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type))
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type))
if msg is None:
valid['coord_type'] = coord_type
else:
invalid['coord_type'] = (coord_type, msg)
if coord_unit is not None and coord_unit != '':
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit))
if msg is None:
valid['coord_unit'] = coord_unit
else:
invalid['coord_unit'] = (coord_unit, msg)
for k, v in [('coord_ref_point', coord_ref_point),
('coord_ref_value', coord_ref_value),
('coord_inc', coord_inc)]:
if v is not None and v != '':
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got {!r}). "
"The invalid value will be ignored for the purpose of formatting "
"the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v))
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != '':
msg = None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos))
if msg is None:
valid['time_ref_pos'] = time_ref_pos
else:
invalid['time_ref_pos'] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format,
_AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
'Columns cannot have both a start (TCOLn) and dim '
'(TDIMn) option, since the former is only applies to '
'ASCII tables, and the latter is only valid for binary '
'tables.')
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (_AsciiColumnFormat
if guess_format is _ColumnFormat
else _ColumnFormat)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims:
shape = dims[:-1] if 'A' in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if 'P' in format or 'Q' in format:
return array
elif 'A' in format:
if array.dtype.char in 'SU':
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif 'L' in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype('bool'):
return np.where(array == np.False_, ord('F'), ord('T'))
else:
return np.where(array == 0, ord('F'), ord('T'))
elif 'X' in format:
return _convert_array(array, np.dtype('uint8'))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31),
8: np.uint64(2**63)}
if (array.dtype.kind == 'u' and
array.dtype.itemsize in bzeros and
self.bscale in (1, None, '') and
self.bzero == bzeros[array.dtype.itemsize]):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace('i', 'u')
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = '\x00'
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if (hasattr(input, '_columns_type') and
issubclass(input._columns_type, ColDefs)):
klass = input._columns_type
elif (hasattr(input, '_col_format_cls') and
issubclass(input._col_format_cls, _AsciiColumnFormat)):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column` or `ColDefs` or ndarray or `~numpy.recarray`
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .hdu.table import _TableBaseHDU
from .fitsrec import FITS_rec
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and
input._coldefs):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError('Input to ColDefs must be a table HDU, a list '
'of Columns, or a record/field array.')
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError(f'Element {idx} in the ColDefs input is not a Column.')
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 0 or 'A' in format):
if 'A' in format:
# should take into account multidimensional items in the column
dimel = int(re.findall('[0-9]+', str(ftype.subdtype[0]))[0])
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (dimel,) + dim
dim = '(' + ','.join(str(d) for d in dim) + ')'
else:
dim = None
# Check for unsigned ints.
bzero = None
if ftype.base.kind == 'u':
if 'I' in format:
bzero = np.uint16(2**15)
elif 'J' in format:
bzero = np.uint32(2**31)
elif 'K' in format:
bzero = np.uint64(2**63)
c = Column(name=cname, format=format,
array=array.view(np.ndarray)[cname], bzero=bzero,
dim=dim)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr['TFIELDS']
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword in hdr:
key = TDEF_RE.match(keyword)
try:
label = key.group('label')
except Exception:
continue # skip if there is no match
if label in KEYWORD_NAMES:
col = int(key.group('num'))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[label]
value = hdr[keyword]
if attr == 'format':
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
f'Invalid keyword for column {idx + 1}: {val[1]}',
VerifyWarning)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs['recformat']
if 'dim' in valid_kwargs:
valid_kwargs['dim'] = kwargs['dim']
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]['array'] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if (new_column.disp is not None and
new_column.disp.upper().startswith('L')):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == 's':
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else '')
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim:
if format_.format == 'A':
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({'names': self.names,
'formats': formats,
'offsets': offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = 'ColDefs('
if hasattr(self, 'columns') and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += '\n '
rep += '\n '.join([repr(c) for c in self.columns])
rep += '\n'
rep += ')'
return rep
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError('Wrong type of input.')
if option == 'left':
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value,
new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == 'name':
del self.names
elif attr == 'format':
del self.formats
self._notify('column_attribute_changed', column, idx, attr, old_value,
new_value)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify('column_added', self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
# Ask the HDU object to load the data before we modify our columns
self._notify('load_data')
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify('column_removed', self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError(f'New name {new_name} already exists.')
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all', output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file-like, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ['all', '']:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(',')
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == 's':
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write("'{}' is not an attribute of the column "
"definitions.\n".format(attr))
continue
output.write(f"{attr}:\n")
output.write(f" {getattr(self, attr + 's')}\n")
else:
ret[attr] = getattr(self, attr + 's')
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = ' '
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = 'S' + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ['a' + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype='a'):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == 'a':
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(
f'Inconsistent input data array: {input}')
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a,
dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
self.max = max(self.max, len(value))
def tolist(self):
return [list(item) for item in super().tolist()]
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError(f"Key '{key}' does not exist.")
else: # multiple match
raise KeyError(f"Ambiguous key name '{key}'.")
else:
raise KeyError(f"Illegal key '{key!r}'.")
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8')
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == 'a':
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == 'a':
rowval = ' ' * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == 'a':
data_output[idx] = chararray.array(encode_ascii(rowval),
itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
descr_output[idx, 0] = len(data_output[idx])
descr_output[idx, 1] = _offset
_offset += len(data_output[idx]) * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError(f'Format {tform!r} is not recognized.')
if repeat == '':
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError(f'Format {tform!r} is not recognized.')
# Be flexible on case
format = match.group('format')
if format is None:
# Floating point format
format = match.group('formatf').upper()
width = match.group('widthf')
precision = match.group('precision')
if width is None or precision is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group('width')
if width is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = ('Format {!r} is not valid--field width and decimal precision '
'must be integers.')
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError("Format {!r} not valid--field width must be a "
"positive integeter.".format(tform))
if precision >= width:
raise VerifyError("Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width))
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group('dims')
return tuple(int(d.strip()) for d in dims.split(','))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == 'a' and f2[0] == 'a':
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == 'A':
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == 'A' and option != '':
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ''
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == 'X':
output_format = _FormatX(repeat)
elif dtype == 'P':
output_format = _FormatP.from_tform(format)
elif dtype == 'Q':
output_format = _FormatQ.from_tform(format)
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == 'U' or (dtype.subdtype is not None
and dtype.subdtype[0].char == 'U'):
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base and dtype.subdtype --dtype for multi-dimensional items
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype='i8').prod()
if nel > 1:
repeat = nel
if kind == 'a':
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + 'A'
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ''
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError(f'Illegal format `{format}`.')
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ('U', 'S'):
recformat = kind = 'a'
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == 'a':
return 'A' + str(itemsize)
elif NUMPY2FITS.get(recformat) == 'L':
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return 'A1'
elif kind == 'i':
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS['I'][0])
return 'I' + str(width)
elif kind == 'f':
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = 'D'
else:
format = 'E'
width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values, if width >= 10 we may need to accommodate 64-bit ints.
# values [for the non-standard J format code just always force 64-bit]
if format == 'I':
if width <= 4:
recformat = 'i2'
elif width > 9:
recformat = 'i8'
elif format == 'A':
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = tdisp[0] if tdisp[0] != 'E' or (
len(tdisp) > 1 and tdisp[1] not in 'NS') else tdisp[:2]
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError(f'Format {tdisp} is not recognized.')
match = tdisp_re.match(tdisp.strip())
if not match or match.group('formatc') is None:
raise VerifyError(f'Format {tdisp} is not recognized.')
formatc = match.group('formatc')
width = match.group('width')
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'):
precision = match.group('precision')
if precision is None:
precision = 1
if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'):
exponential = match.group('exponential')
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp : str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError(f'Format {format_type} is not recognized.')
def python_to_tdisp(format_string, logical_dtype=False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string : str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype : bool
True is this format type should be a logical type, 'L'. Needs special
handling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z',
'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E',
'E': 'E'}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == '{' and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip('}')
elif format_string[0] == '%':
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = '', ''
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0':
ftype = fmt_to_tdisp['a']
width = fmt_str[1:]
elif fmt_str[-1] == 's' and fmt_str != 's':
ftype = fmt_to_tdisp['a']
width = fmt_str[:-1].lstrip('0')
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0':
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if '.' in fmt_str:
width, precision = fmt_str.split('.')
sep = '.'
if width == "":
ascii_key = ftype if ftype != 'G' else 'F'
width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] -
ASCII_DEFAULT_WIDTHS[ascii_key][1]))
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn('Format {} cannot be mapped to the accepted '
'TDISPn keyword values. Format will not be '
'moved into TDISPn keyword.'.format(format_string),
AstropyUserWarning)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = 'L'
return ftype + width + sep + precision
|
24245b2060e465c2bd045544b90565507dc58cc65dcd28dd6cb30a06591b47e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import re
from collections import OrderedDict
import warnings
import json
import numpy as np
from . import core, basic
from astropy.table import meta, serialize
from astropy.utils.data_info import serialize_context_as
from astropy.utils.exceptions import AstropyUserWarning
from astropy.io.ascii.core import convert_numpy
ECSV_VERSION = '1.0'
DELIMITERS = (' ', ',')
ECSV_DATATYPES = (
'bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16',
'uint32', 'uint64', 'float16', 'float32', 'float64',
'float128', 'string') # Raise warning if not one of these standard dtypes
class InvalidEcsvDatatypeWarning(AstropyUserWarning):
"""
ECSV specific Astropy warning class.
"""
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace."""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end():]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format.
This function is called at the point when preprocessing has been done to
convert the input table columns to `self.cols` which is a list of
`astropy.io.ascii.core.Column` objects. In particular `col.str_vals`
is available for each column with the string representation of each
column item for output.
This format starts with a delimiter separated list of the column names
in order to make this format readable by humans and simple csv-type
readers. It then encodes the full table meta and column attributes and
meta as YAML and pretty-prints this in the header. Finally the
delimited column names are repeated again, for humans and readers that
look for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
# Now assemble the header dict that will be serialized by the YAML dumper
header = {'cols': self.cols, 'schema': 'astropy-2.0'}
if self.table_meta:
header['meta'] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != ' ':
header['delimiter'] = self.splitter.delimiter
header_yaml_lines = ([f'%ECSV {ECSV_VERSION}',
'---']
+ meta.get_yaml_from_header(header))
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
WRITE: Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
READ: Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
READ: Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = ('ECSV header line like "# %ECSV <version>" not found as first line.'
' This is required for a ECSV file.')
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
try:
header = meta.get_header_from_yaml(lines)
except meta.YamlParseError:
raise core.InconsistentTableError('unable to parse yaml in meta header')
if 'meta' in header:
self.table_meta = header['meta']
if 'delimiter' in header:
delimiter = header['delimiter']
if delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x['name'], x) for x in header['datatype'])
self.names = [x['name'] for x in header['datatype']]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError('column names from ECSV header {} do not '
'match names from header line of CSV data {}'
.format(self.names, header_names))
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ('description', 'format', 'unit', 'meta', 'subtype'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]['datatype']
# Warn if col dtype is not a valid ECSV datatype, but allow reading for
# back-compatibility with existing older files that have numpy datatypes
# like datetime64 or object or python str, which are not in the ECSV standard.
if col.dtype not in ECSV_DATATYPES:
msg = (f'unexpected datatype {col.dtype!r} of column {col.name!r} '
f'is not in allowed ECSV datatypes {ECSV_DATATYPES}. '
'Using anyway as a numpy dtype but beware since unexpected '
'results are possible.')
warnings.warn(msg, category=InvalidEcsvDatatypeWarning)
# Subtype is written like "int64[2,null]" and we want to split this
# out to "int64" and [2, None].
subtype = col.subtype
if subtype and '[' in subtype:
idx = subtype.index('[')
col.subtype = subtype[:idx]
col.shape = json.loads(subtype[idx:])
# Convert ECSV "string" to numpy "str"
for attr in ('dtype', 'subtype'):
if getattr(col, attr) == 'string':
setattr(col, attr, 'str')
# ECSV subtype of 'json' maps to numpy 'object' dtype
if col.subtype == 'json':
col.subtype = 'object'
def _check_dtype_is_str(col):
if col.dtype != 'str':
raise ValueError(f'datatype of column {col.name!r} must be "string"')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
def _convert_vals(self, cols):
"""READ: Convert str_vals in `cols` to final arrays with correct dtypes.
This is adapted from ``BaseOutputter._convert_vals``. In the case of ECSV
there is no guessing and all types are known in advance. A big change
is handling the possibility of JSON-encoded values, both unstructured
object data and structured values that may contain masked data.
"""
for col in cols:
try:
# 1-d or N-d object columns are serialized as JSON.
if col.subtype == 'object':
_check_dtype_is_str(col)
col_vals = [json.loads(val) for val in col.str_vals]
col.data = np.empty([len(col_vals)] + col.shape, dtype=object)
col.data[...] = col_vals
# Variable length arrays with shape (n, m, ..., *) for fixed
# n, m, .. and variable in last axis. Masked values here are
# not currently supported.
elif col.shape and col.shape[-1] is None:
_check_dtype_is_str(col)
# Empty (blank) values in original ECSV are changed to "0"
# in str_vals with corresponding col.mask being created and
# set accordingly. Instead use an empty list here.
if hasattr(col, 'mask'):
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = '[]'
# Remake as a 1-d object column of numpy ndarrays or
# MaskedArray using the datatype specified in the ECSV file.
col_vals = []
for str_val in col.str_vals:
obj_val = json.loads(str_val) # list or nested lists
try:
arr_val = np.array(obj_val, dtype=col.subtype)
except TypeError:
# obj_val has entries that are inconsistent with
# dtype. For a valid ECSV file the only possibility
# is None values (indicating missing values).
data = np.array(obj_val, dtype=object)
# Replace all the None with an appropriate fill value
mask = (data == None) # noqa: E711
kind = np.dtype(col.subtype).kind
data[mask] = {'U': '', 'S': b''}.get(kind, 0)
arr_val = np.ma.array(data.astype(col.subtype), mask=mask)
col_vals.append(arr_val)
col.shape = ()
col.dtype = np.dtype(object)
# np.array(col_vals_arr, dtype=object) fails ?? so this workaround:
col.data = np.empty(len(col_vals), dtype=object)
col.data[:] = col_vals
# Multidim columns with consistent shape (n, m, ...). These
# might be masked.
elif col.shape:
_check_dtype_is_str(col)
# Change empty (blank) values in original ECSV to something
# like "[[null, null],[null,null]]" so subsequent JSON
# decoding works. Delete `col.mask` so that later code in
# core TableOutputter.__call__() that deals with col.mask
# does not run (since handling is done here already).
if hasattr(col, 'mask'):
all_none_arr = np.full(shape=col.shape, fill_value=None, dtype=object)
all_none_json = json.dumps(all_none_arr.tolist())
for idx in np.nonzero(col.mask)[0]:
col.str_vals[idx] = all_none_json
del col.mask
col_vals = [json.loads(val) for val in col.str_vals]
# Make a numpy object array of col_vals to look for None
# (masked values)
data = np.array(col_vals, dtype=object)
mask = (data == None) # noqa: E711
if not np.any(mask):
# No None's, just convert to required dtype
col.data = data.astype(col.subtype)
else:
# Replace all the None with an appropriate fill value
kind = np.dtype(col.subtype).kind
data[mask] = {'U': '', 'S': b''}.get(kind, 0)
# Finally make a MaskedArray with the filled data + mask
col.data = np.ma.array(data.astype(col.subtype), mask=mask)
# Regular scalar value column
else:
if col.subtype:
warnings.warn(f'unexpected subtype {col.subtype!r} set for column '
f'{col.name!r}, using dtype={col.dtype!r} instead.',
category=InvalidEcsvDatatypeWarning)
converter_func, _ = convert_numpy(col.dtype)
col.data = converter_func(col.str_vals)
if col.data.shape[1:] != tuple(col.shape):
raise ValueError('shape mismatch between value and column specifier')
except json.JSONDecodeError:
raise ValueError(f'column {col.name!r} failed to convert: '
'column value is not valid JSON')
except Exception as exc:
raise ValueError(f'column {col.name!r} failed to convert: {exc}')
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""READ: Set the fill values of the individual cols based on fill_values of BaseData
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta['__serialized_columns__']
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (col.dtype == 'str' and col.name in scs
and scs[col.name]['__class__'] == 'astropy.table.column.MaskedColumn'):
col.fill_values = {} # No data value replacement
def str_vals(self):
"""WRITE: convert all values in table to a list of lists of strings
This version considerably simplifies the base method:
- No need to set fill values and column formats
- No per-item formatting, just use repr()
- Use JSON for object-type or multidim values
- Only Column or MaskedColumn can end up as cols here.
- Only replace masked values with "", not the generalized filling
"""
for col in self.cols:
if len(col.shape) > 1 or col.info.dtype.kind == 'O':
def format_col_item(idx):
obj = col[idx]
try:
obj = obj.tolist()
except AttributeError:
pass
return json.dumps(obj, separators=(',', ':'))
else:
def format_col_item(idx):
return str(col[idx])
try:
col.str_vals = [format_col_item(idx) for idx in range(len(col))]
except TypeError as exc:
raise TypeError(f'could not convert column {col.info.name!r}'
f' to string: {exc}') from exc
# Replace every masked value in a 1-d column with an empty string.
# For multi-dim columns this gets done by JSON via "null".
if hasattr(col, 'mask') and col.ndim == 1:
for idx in col.mask.nonzero()[0]:
col.str_vals[idx] = ""
out = [col.str_vals for col in self.cols]
return out
class Ecsv(basic.Basic):
"""ECSV (Enhanced Character Separated Values) format table.
Th ECSV format allows for specification of key table and column meta-data, in
particular the data type and unit.
See: https://github.com/astropy/astropy-APEs/blob/main/APE6.rst
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = 'ecsv'
_description = 'Enhanced CSV'
_io_registry_suffix = '.ecsv'
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
max_ndim = None # No limit on column dimensionality
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as('ecsv'):
out = serialize.represent_mixins_as_columns(table)
return out
|
e469ac95e14bdd44c39df878a3541f115ac94c8ce2d393094c9642648eef64d5 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import copy
import gc
import pickle
import re
import sys
import warnings
import pytest
import numpy as np
from numpy import char as chararray
try:
import objgraph
HAVE_OBJGRAPH = True
except ImportError:
HAVE_OBJGRAPH = False
from astropy.io import fits
from astropy.table import Table
from astropy.units import UnitsWarning, Unit, UnrecognizedUnit
from astropy.utils.compat import NUMPY_LT_1_22, NUMPY_LT_1_22_1
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning
from astropy.io.fits.column import ColumnAttribute, Delayed, NUMPY2FITS
from astropy.io.fits.util import decode_ascii
from astropy.io.fits.verify import VerifyError
from . import FitsTestCase
def comparefloats(a, b):
"""
Compare two float scalars or arrays and see if they are consistent
Consistency is determined ensuring the difference is less than the
expected amount. Return True if consistent, False if any differences.
"""
aa = a
bb = b
# compute expected precision
if aa.dtype.name == 'float32' or bb.dtype.name == 'float32':
precision = 0.000001
else:
precision = 0.0000000000000001
precision = 0.00001 # until precision problem is fixed in astropy.io.fits
diff = np.absolute(aa - bb)
mask0 = aa == 0
masknz = aa != 0.
if np.any(mask0):
if diff[mask0].max() != 0.:
return False
if np.any(masknz):
if (diff[masknz] / np.absolute(aa[masknz])).max() > precision:
return False
return True
def comparerecords(a, b):
"""
Compare two record arrays
Does this field by field, using approximation testing for float columns
(Complex not yet handled.)
Column names not compared, but column types and sizes are.
"""
nfieldsa = len(a.dtype.names)
nfieldsb = len(b.dtype.names)
if nfieldsa != nfieldsb:
print("number of fields don't match")
return False
for i in range(nfieldsa):
fielda = a.field(i)
fieldb = b.field(i)
if fielda.dtype.char == 'S':
fielda = decode_ascii(fielda)
if fieldb.dtype.char == 'S':
fieldb = decode_ascii(fieldb)
if (not isinstance(fielda, type(fieldb)) and not
isinstance(fieldb, type(fielda))):
print("type(fielda): ", type(fielda), " fielda: ", fielda)
print("type(fieldb): ", type(fieldb), " fieldb: ", fieldb)
print(f'field {i} type differs')
return False
if len(fielda) and isinstance(fielda[0], np.floating):
if not comparefloats(fielda, fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
elif (isinstance(fielda, fits.column._VLF) or
isinstance(fieldb, fits.column._VLF)):
for row in range(len(fielda)):
if np.any(fielda[row] != fieldb[row]):
print(f'fielda[{row}]: {fielda[row]}')
print(f'fieldb[{row}]: {fieldb[row]}')
print(f'field {i} differs in row {row}')
else:
if np.any(fielda != fieldb):
print("fielda: ", fielda)
print("fieldb: ", fieldb)
print(f'field {i} differs')
return False
return True
def _assert_attr_col(new_tbhdu, tbhdu):
"""
Helper function to compare column attributes
"""
# Double check that the headers are equivalent
assert tbhdu.columns.names == new_tbhdu.columns.names
attrs = [k for k, v in fits.Column.__dict__.items()
if isinstance(v, ColumnAttribute)]
for name in tbhdu.columns.names:
col = tbhdu.columns[name]
new_col = new_tbhdu.columns[name]
for attr in attrs:
if getattr(col, attr) and getattr(new_col, attr):
assert getattr(col, attr) == getattr(new_col, attr)
class TestTableFunctions(FitsTestCase):
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
This is like the test of the same name in test_image, but tests this
for tables as well.
"""
ifd = fits.HDUList([fits.PrimaryHDU(), fits.BinTableHDU()])
thdr = ifd[1].header
thdr['FILENAME'] = 'labq01i3q_rawtag.fits'
thdu = fits.BinTableHDU(header=thdr)
ofd = fits.HDUList(thdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert thdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# open some existing FITS files:
tt = fits.open(self.data('tb.fits'))
fd = fits.open(self.data('test0.fits'))
# create some local arrays
a1 = chararray.array(['abc', 'def', 'xx'])
r1 = np.array([11., 12., 13.], dtype=np.float32)
# create a table from scratch, using a mixture of columns from existing
# tables and locally created arrays:
# first, create individual column definitions
c1 = fits.Column(name='abc', format='3A', array=a1)
c2 = fits.Column(name='def', format='E', array=r1)
a3 = np.array([3, 4, 5], dtype='i2')
c3 = fits.Column(name='xyz', format='I', array=a3)
a4 = np.array([1, 2, 3], dtype='i2')
c4 = fits.Column(name='t1', format='I', array=a4)
a5 = np.array([3 + 3j, 4 + 4j, 5 + 5j], dtype='c8')
c5 = fits.Column(name='t2', format='C', array=a5)
# Note that X format must be two-D array
a6 = np.array([[0], [1], [0]], dtype=np.uint8)
c6 = fits.Column(name='t3', format='X', array=a6)
a7 = np.array([101, 102, 103], dtype='i4')
c7 = fits.Column(name='t4', format='J', array=a7)
a8 = np.array([[1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1]], dtype=np.uint8)
c8 = fits.Column(name='t5', format='11X', array=a8)
# second, create a column-definitions object for all columns in a table
x = fits.ColDefs([c1, c2, c3, c4, c5, c6, c7, c8])
tbhdu = fits.BinTableHDU.from_columns(x)
# another way to create a table is by using existing table's
# information:
x2 = fits.ColDefs(tt[1])
t2 = fits.BinTableHDU.from_columns(x2, nrows=2)
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t2.data, ra)
# the table HDU's data is a subclass of a record array, so we can
# access one row like this:
assert tbhdu.data[1][0] == a1[1]
assert tbhdu.data[1][1] == r1[1]
assert tbhdu.data[1][2] == a3[1]
assert tbhdu.data[1][3] == a4[1]
assert tbhdu.data[1][4] == a5[1]
assert (tbhdu.data[1][5] == a6[1].view('bool')).all()
assert tbhdu.data[1][6] == a7[1]
assert (tbhdu.data[1][7] == a8[1]).all()
# and a column like this:
assert str(tbhdu.data.field('abc')) == "['abc' 'def' 'xx']"
# An alternative way to create a column-definitions object is from an
# existing table.
_ = fits.ColDefs(tt[1])
# now we write out the newly created table HDU to a FITS file:
fout = fits.HDUList(fits.PrimaryHDU())
fout.append(tbhdu)
fout.writeto(self.temp('tableout1.fits'), overwrite=True)
with fits.open(self.temp('tableout1.fits')) as f2:
temp = f2[1].data.field(7)
assert (temp[0] == [True, True, False, True, False, True,
True, True, False, False, True]).all()
# An alternative way to create an output table FITS file:
fout2 = fits.open(self.temp('tableout2.fits'), 'append')
fout2.append(fd[0])
fout2.append(tbhdu)
fout2.close()
tt.close()
fd.close()
def test_binary_table(self):
# binary table:
t = fits.open(self.data('tb.fits'))
assert t[1].header['tform1'] == '1J'
info = {'name': ['c1', 'c2', 'c3', 'c4'],
'format': ['1J', '3A', '1E', '1L'],
'unit': ['', '', '', ''],
'null': [-2147483647, '', '', ''],
'bscale': ['', '', 3, ''],
'bzero': ['', '', 0.4, ''],
'disp': ['I11', 'A3', 'G15.7', 'L6'],
'start': ['', '', '', ''],
'dim': ['', '', '', ''],
'coord_inc': ['', '', '', ''],
'coord_type': ['', '', '', ''],
'coord_unit': ['', '', '', ''],
'coord_ref_point': ['', '', '', ''],
'coord_ref_value': ['', '', '', ''],
'time_ref_pos': ['', '', '', '']}
assert t[1].columns.info(output=False) == info
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
(2, 'xy ', 6.6999998092651367, 1)], names='c1, c2, c3, c4')
assert comparerecords(t[1].data, ra[:2])
# Change scaled field and scale back to the original array
t[1].data.field('c4')[0] = 1
t[1].data._scale_back()
assert str(np.rec.recarray.field(t[1].data, 'c4')) == '[84 84]'
# look at data column-wise
assert (t[1].data.field(0) == np.array([1, 2])).all()
# When there are scaled columns, the raw data are in data._parent
t.close()
def test_ascii_table(self):
# ASCII table
a = fits.open(self.data('ascii.fits'))
ra1 = np.rec.array([
(10.123000144958496, 37),
(5.1999998092651367, 23),
(15.609999656677246, 17),
(0.0, 0),
(345.0, 345)], names='c1, c2')
assert comparerecords(a[1].data, ra1)
# Test slicing
a2 = a[1].data[2:][2:]
ra2 = np.rec.array([(345.0, 345)], names='c1, c2')
assert comparerecords(a2, ra2)
assert (a2.field(1) == np.array([345])).all()
ra3 = np.rec.array([
(10.123000144958496, 37),
(15.609999656677246, 17),
(345.0, 345)
], names='c1, c2')
assert comparerecords(a[1].data[::2], ra3)
# Test Start Column
a1 = chararray.array(['abcd', 'def'])
r1 = np.array([11., 12.])
c1 = fits.Column(name='abc', format='A3', start=19, array=a1)
c2 = fits.Column(name='def', format='E', start=3, array=r1)
c3 = fits.Column(name='t1', format='I', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c2, c1, c3])
assert (dict(hdu.data.dtype.fields) ==
{'abc': (np.dtype('|S3'), 18),
'def': (np.dtype('|S15'), 2),
't1': (np.dtype('|S10'), 21)})
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
# Test Scaling
r1 = np.array([11., 12.])
c2 = fits.Column(name='def', format='D', array=r1, bscale=2.3,
bzero=0.6)
hdu = fits.TableHDU.from_columns([c2])
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with open(self.temp('toto.fits')) as f:
assert '4.95652173913043548D+00' in f.read()
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
# Test Integer precision according to width
c1 = fits.Column(name='t2', format='I2', array=[91, 92, 93])
c2 = fits.Column(name='t4', format='I5', array=[91, 92, 93])
c3 = fits.Column(name='t8', format='I10', array=[91, 92, 93])
hdu = fits.TableHDU.from_columns([c1, c2, c3])
assert c1.array.dtype == np.int16
assert c2.array.dtype == np.int32
assert c3.array.dtype == np.int64
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
a.close()
def test_endianness(self):
x = np.ndarray((1,), dtype=object)
channelsIn = np.array([3], dtype='uint8')
x[0] = channelsIn
col = fits.Column(name="Channels", format="PB()", array=x)
cols = fits.ColDefs([col])
tbhdu = fits.BinTableHDU.from_columns(cols)
tbhdu.name = "RFI"
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
hduL = fits.open(self.temp('testendian.fits'))
rfiHDU = hduL['RFI']
data = rfiHDU.data
channelsOut = data.field('Channels')[0]
assert (channelsIn == channelsOut).all()
hduL.close()
def test_column_endianness(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/77
(Astropy doesn't preserve byte order of non-native order column arrays)
"""
a = [1., 2., 3., 4.]
a1 = np.array(a, dtype='<f8')
a2 = np.array(a, dtype='>f8')
col1 = fits.Column(name='a', format='D', array=a1)
col2 = fits.Column(name='b', format='D', array=a2)
cols = fits.ColDefs([col1, col2])
tbhdu = fits.BinTableHDU.from_columns(cols)
assert (tbhdu.data['a'] == a1).all()
assert (tbhdu.data['b'] == a2).all()
# Double check that the array is converted to the correct byte-order
# for FITS (big-endian).
tbhdu.writeto(self.temp('testendian.fits'), overwrite=True)
with fits.open(self.temp('testendian.fits')) as hdul:
assert (hdul[1].data['a'] == a2).all()
assert (hdul[1].data['b'] == a2).all()
def test_recarray_to_bintablehdu(self):
bright = np.rec.array(
[(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float32,a10',
names='order,name,mag,Sp')
hdu = fits.BinTableHDU(bright)
assert comparerecords(hdu.data, bright)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
assert comparerecords(bright, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'S20', 'float32', 'S10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_numpy_ndarray_to_bintablehdu_with_unicode(self):
desc = np.dtype({'names': ['order', 'name', 'mag', 'Sp'],
'formats': ['int', 'U20', 'float32', 'U10']})
a = np.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')], dtype=desc)
hdu = fits.BinTableHDU(a)
assert comparerecords(hdu.data, a.view(fits.FITS_rec))
hdu.writeto(self.temp('toto.fits'), overwrite=True)
hdul = fits.open(self.temp('toto.fits'))
assert comparerecords(hdu.data, hdul[1].data)
hdul.close()
def test_new_table_from_recarray(self):
bright = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib'),
(3, 'Rigil Kent', -0.1, 'G2V')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
hdu = fits.TableHDU.from_columns(bright, nrows=2)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
hdu.data[0][0] = 213
assert hdu.data[0][0] == 213
assert hdu.data._coldefs._arrays[0][0] == 213
assert hdu.data._coldefs.columns[0].array[0] == 213
assert hdu.columns._arrays[0][0] == 213
assert hdu.columns.columns[0].array[0] == 213
hdu.data._coldefs._arrays[0][0] = 100
assert hdu.data[0][0] == 100
assert hdu.data._coldefs._arrays[0][0] == 100
assert hdu.data._coldefs.columns[0].array[0] == 100
assert hdu.columns._arrays[0][0] == 100
assert hdu.columns.columns[0].array[0] == 100
hdu.data._coldefs.columns[0].array[0] = 500
assert hdu.data[0][0] == 500
assert hdu.data._coldefs._arrays[0][0] == 500
assert hdu.data._coldefs.columns[0].array[0] == 500
assert hdu.columns._arrays[0][0] == 500
assert hdu.columns.columns[0].array[0] == 500
hdu.columns._arrays[0][0] = 600
assert hdu.data[0][0] == 600
assert hdu.data._coldefs._arrays[0][0] == 600
assert hdu.data._coldefs.columns[0].array[0] == 600
assert hdu.columns._arrays[0][0] == 600
assert hdu.columns.columns[0].array[0] == 600
hdu.columns.columns[0].array[0] = 800
assert hdu.data[0][0] == 800
assert hdu.data._coldefs._arrays[0][0] == 800
assert hdu.data._coldefs.columns[0].array[0] == 800
assert hdu.columns._arrays[0][0] == 800
assert hdu.columns.columns[0].array[0] == 800
assert (hdu.data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdu.data[0][1] == 'Serius'
assert hdu.data[1][1] == 'Canopys'
assert (hdu.data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdu.data[0][3] == 'A1V'
assert hdu.data[1][3] == 'F0Ib'
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert (hdul[1].data.field(0) ==
np.array([800, 2], dtype=np.int16)).all()
assert hdul[1].data[0][1] == 'Serius'
assert hdul[1].data[1][1] == 'Canopys'
assert (hdul[1].data.field(2) ==
np.array([-1.45, -0.73], dtype=np.float64)).all()
assert hdul[1].data[0][3] == 'A1V'
assert hdul[1].data[1][3] == 'F0Ib'
del hdul
hdu = fits.BinTableHDU.from_columns(bright, nrows=2)
tmp = np.rec.array([(1, 'Serius', -1.45, 'A1V'),
(2, 'Canopys', -0.73, 'F0Ib')],
formats='int16,a20,float64,a10',
names='order,name,mag,Sp')
assert comparerecords(hdu.data, tmp)
hdu.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as hdul:
assert comparerecords(hdu.data, hdul[1].data)
def test_new_fitsrec(self):
"""
Tests creating a new FITS_rec object from a multi-field ndarray.
"""
with fits.open(self.data('tb.fits')) as h:
data = h[1].data
new_data = np.array([(3, 'qwe', 4.5, False)], dtype=data.dtype)
appended = np.append(data, new_data).view(fits.FITS_rec)
assert repr(appended).startswith('FITS_rec(')
# This test used to check the entire string representation of FITS_rec,
# but that has problems between different numpy versions. Instead just
# check that the FITS_rec was created, and we'll let subsequent tests
# worry about checking values and such
def test_appending_a_column(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Append the rows of table 2 after the rows of table 1
# The column definitions are assumed to be the same
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
# Get the number of rows in the table from the first file
nrows1 = t1[1].data.shape[0]
# Get the total number of rows in the resulting appended table
nrows = t1[1].data.shape[0] + t2[1].data.shape[0]
assert (t1[1].columns._arrays[1] is t1[1].columns.columns[1].array)
# Create a new table that consists of the data from the first table
# but has enough space in the ndarray to hold the data from both tables
hdu = fits.BinTableHDU.from_columns(t1[1].columns, nrows=nrows)
# For each column in the tables append the data from table 2 after the
# data from table 1.
for i in range(len(t1[1].columns)):
hdu.data.field(i)[nrows1:] = t2[1].data.field(i)
hdu.writeto(self.temp('newtable.fits'))
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 19, '8R x 5C', '[10A, J, 10A, 5E, L]',
'')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True),
('NGC5', 412, '', z, False),
('NGC6', 434, '', z, True),
('NGC7', 408, '', z, False),
('NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
# Same verification from the file
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_adding_a_column(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
coldefs1 = coldefs + c5
tbhdu1 = fits.BinTableHDU.from_columns(coldefs1)
assert tbhdu1.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu1.data, array)
def test_adding_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
tbhdu.columns.add_col(c5)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True),
('NGC2', 334, '', z, False),
('NGC3', 308, '', z, True),
('NCG4', 317, '', z, True)],
formats='a10,u4,a10,5f4,l')
assert comparerecords(tbhdu.data, array)
def test_adding_a_column_to_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
col = fits.Column(name='a', array=np.array([1, 2]), format='K')
tbhdu.columns.add_col(col)
assert tbhdu.columns.names == ['target', 'V_mag', 'a']
array = np.rec.array(
[('NGC1001', 11.1, 1),
('NGC1002', 12.3, 2),
('NGC1003', 15.2, 0)],
formats='a20,f4,i8')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_removing_a_column_inplace(self):
# Tests adding a column to a table.
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
assert tbhdu.columns.names == ['target', 'counts', 'notes',
'spectrum', 'flag']
tbhdu.columns.del_col('flag')
assert tbhdu.columns.names == ['target', 'counts', 'notes', 'spectrum']
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z),
('NGC2', 334, '', z),
('NGC3', 308, '', z),
('NCG4', 317, '', z)],
formats='a10,u4,a10,5f4')
assert comparerecords(tbhdu.data, array)
tbhdu.columns.del_col('counts')
tbhdu.columns.del_col('notes')
assert tbhdu.columns.names == ['target', 'spectrum']
array = np.rec.array(
[('NGC1', z),
('NGC2', z),
('NGC3', z),
('NCG4', z)],
formats='a10,5f4')
assert comparerecords(tbhdu.data, array)
def test_removing_a_column_from_file(self):
hdul = fits.open(self.data('table.fits'))
tbhdu = hdul[1]
tbhdu.columns.del_col('V_mag')
assert tbhdu.columns.names == ['target']
array = np.rec.array(
[('NGC1001', ),
('NGC1002', ),
('NGC1003', )],
formats='a20')
assert comparerecords(tbhdu.data, array)
hdul.close()
def test_merge_tables(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
counts = np.array([412, 434, 408, 417])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target1', format='10A', array=names)
c2 = fits.Column(name='counts1', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes1', format='A10')
c4 = fits.Column(name='spectrum1', format='5E')
c5 = fits.Column(name='flag1', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table2.fits'))
# Merge the columns of table 2 after the columns of table 1
# The column names are assumed to be different
# Open the two files we want to append
t1 = fits.open(self.temp('table1.fits'))
t2 = fits.open(self.temp('table2.fits'))
hdu = fits.BinTableHDU.from_columns(t1[1].columns + t2[1].columns)
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
hdu.writeto(self.temp('newtable.fits'))
# Verify that all of the references to the data point to the same
# numarray
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 30, '4R x 10C',
'[10A, J, 10A, 5E, L, 10A, J, 10A, 5E, L]', '')]
assert fits.info(self.temp('newtable.fits'), output=False) == info
hdul = fits.open(self.temp('newtable.fits'))
hdu = hdul[1]
assert (hdu.columns.names ==
['target', 'counts', 'notes', 'spectrum', 'flag', 'target1',
'counts1', 'notes1', 'spectrum1', 'flag1'])
z = np.array([0., 0., 0., 0., 0.], dtype=np.float32)
array = np.rec.array(
[('NGC1', 312, '', z, True, 'NGC5', 412, '', z, False),
('NGC2', 334, '', z, False, 'NGC6', 434, '', z, True),
('NGC3', 308, '', z, True, 'NGC7', 408, '', z, False),
('NCG4', 317, '', z, True, 'NCG8', 417, '', z, False)],
formats='a10,u4,a10,5f4,l,a10,u4,a10,5f4,l')
assert comparerecords(hdu.data, array)
# Same verification from the file
hdu.data[0][1] = 300
assert hdu.data._coldefs._arrays[1][0] == 300
assert hdu.data._coldefs.columns[1].array[0] == 300
assert hdu.columns._arrays[1][0] == 300
assert hdu.columns.columns[1].array[0] == 300
assert hdu.data[0][1] == 300
hdu.data._coldefs._arrays[1][0] = 200
assert hdu.data._coldefs._arrays[1][0] == 200
assert hdu.data._coldefs.columns[1].array[0] == 200
assert hdu.columns._arrays[1][0] == 200
assert hdu.columns.columns[1].array[0] == 200
assert hdu.data[0][1] == 200
hdu.data._coldefs.columns[1].array[0] = 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert hdu.data[0][1] == 100
hdu.columns._arrays[1][0] = 90
assert hdu.data._coldefs._arrays[1][0] == 90
assert hdu.data._coldefs.columns[1].array[0] == 90
assert hdu.columns._arrays[1][0] == 90
assert hdu.columns.columns[1].array[0] == 90
assert hdu.data[0][1] == 90
hdu.columns.columns[1].array[0] = 80
assert hdu.data._coldefs._arrays[1][0] == 80
assert hdu.data._coldefs.columns[1].array[0] == 80
assert hdu.columns._arrays[1][0] == 80
assert hdu.columns.columns[1].array[0] == 80
assert hdu.data[0][1] == 80
t1.close()
t2.close()
hdul.close()
def test_modify_column_attributes(self):
"""Regression test for https://github.com/astropy/astropy/issues/996
This just tests one particular use case, but it should apply pretty
well to other similar cases.
"""
NULLS = {'a': 2, 'b': 'b', 'c': 2.3}
data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[('a', int), ('b', 'S1'), ('c', float)])
b = fits.BinTableHDU(data=data)
for col in b.columns:
col.null = NULLS[col.name]
b.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
header = hdul[1].header
assert header['TNULL1'] == 2
assert header['TNULL2'] == 'b'
assert header['TNULL3'] == 2.3
def test_multidimension_table_from_numpy_rec_columns(self):
"""Regression test for https://github.com/astropy/astropy/issues/5280
and https://github.com/astropy/astropy/issues/5287
multidimentional tables can now be written with the correct TDIM.
Author: Stephen Bailey.
"""
dtype = [
(str('x'), (str, 5)), # 1D column of 5-character strings
(str('y'), (str, 3), (4,)), # 2D column; each row is four 3-char strings
]
data = np.zeros(2, dtype=dtype)
data['x'] = ['abcde', 'xyz']
data['y'][0] = ['A', 'BC', 'DEF', '123']
data['y'][1] = ['X', 'YZ', 'PQR', '999']
table = Table(data)
# Test convenience functions io.fits.writeto / getdata
fits.writeto(self.temp('test.fits'), data)
dx = fits.getdata(self.temp('test.fits'))
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test fits.BinTableHDU(data) and avoid convenience functions
hdu0 = fits.PrimaryHDU()
hdu1 = fits.BinTableHDU(data)
hx = fits.HDUList([hdu0, hdu1])
hx.writeto(self.temp('test2.fits'))
fx = fits.open(self.temp('test2.fits'))
dx = fx[1].data
fx.close()
assert data['x'].dtype == dx['x'].dtype
assert data['y'].dtype == dx['y'].dtype
assert np.all(data['x'] == dx['x']), 'x: {} != {}'.format(data['x'], dx['x'])
assert np.all(data['y'] == dx['y']), 'y: {} != {}'.format(data['y'], dx['y'])
# Test Table write and read
table.write(self.temp('test3.fits'))
tx = Table.read(self.temp('test3.fits'), character_as_bytes=False)
assert table['x'].dtype == tx['x'].dtype
assert table['y'].dtype == tx['y'].dtype
assert np.all(table['x'] == tx['x']), 'x: {} != {}'.format(table['x'], tx['x'])
assert np.all(table['y'] == tx['y']), 'y: {} != {}'.format(table['y'], tx['y'])
def test_mask_array(self):
t = fits.open(self.data('table.fits'))
tbdata = t[1].data
mask = tbdata.field('V_mag') > 12
newtbdata = tbdata[mask]
hdu = fits.BinTableHDU(newtbdata)
hdu.writeto(self.temp('newtable.fits'))
hdul = fits.open(self.temp('newtable.fits'))
# match to a regex rather than a specific string.
expect = r"\[\('NGC1002',\s+12.3[0-9]*\) \(\'NGC1003\',\s+15.[0-9]+\)\]"
assert re.match(expect, str(hdu.data))
assert re.match(expect, str(hdul[1].data))
t.close()
hdul.close()
def test_slice_a_row(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
row = t1[1].data[2]
assert row['counts'] == 308
a, b, c = row[1:4]
assert a == counts[2]
assert b == ''
assert (c == np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
row['counts'] = 310
assert row['counts'] == 310
row[1] = 315
assert row['counts'] == 315
assert row[1:4]['counts'] == 315
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
assert row['counts'] == 300
row[1:4][0] = 400
assert row[1:4]['counts'] == 400
row[1:4]['counts'] = 300
assert row[1:4]['counts'] == 300
# Test stepping for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/59
row[1:4][::-1][-1] = 500
assert row[1:4]['counts'] == 500
row[1:4:2][0] = 300
assert row[1:4]['counts'] == 300
pytest.raises(KeyError, lambda r: r[1:4]['flag'], row)
assert row[1:4].field(0) == 300
assert row[1:4].field('counts') == 300
pytest.raises(KeyError, row[1:4].field, 'flag')
row[1:4].setfield('counts', 500)
assert row[1:4].field(0) == 500
pytest.raises(KeyError, row[1:4].setfield, 'flag', False)
assert t1[1].data._coldefs._arrays[1][2] == 500
assert t1[1].data._coldefs.columns[1].array[2] == 500
assert t1[1].columns._arrays[1][2] == 500
assert t1[1].columns.columns[1].array[2] == 500
assert t1[1].data[2][1] == 500
t1.close()
def test_fits_record_len(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
assert len(t1[1].data[0]) == 5
assert len(t1[1].data[0][0:4]) == 4
assert len(t1[1].data[0][0:5]) == 5
assert len(t1[1].data[0][0:6]) == 5
assert len(t1[1].data[0][0:7]) == 5
assert len(t1[1].data[0][1:4]) == 3
assert len(t1[1].data[0][1:5]) == 4
assert len(t1[1].data[0][1:6]) == 4
assert len(t1[1].data[0][1:7]) == 4
t1.close()
def test_add_data_by_rows(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
c1 = fits.Column(name='target', format='10A')
c2 = fits.Column(name='counts', format='J', unit='DN')
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L')
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs, nrows=5)
# Test assigning data to a tables row using a FITS_record
tbhdu.data[0] = tbhdu1.data[0]
tbhdu.data[4] = tbhdu1.data[3]
# Test assigning data to a tables row using a tuple
tbhdu.data[2] = ('NGC1', 312, 'A Note',
np.array([1.1, 2.2, 3.3, 4.4, 5.5], dtype=np.float32),
True)
# Test assigning data to a tables row using a list
tbhdu.data[3] = ['JIM1', '33', 'A Note',
np.array([1., 2., 3., 4., 5.], dtype=np.float32),
True]
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
assert tbhdu.data[0][1] == 312
assert tbhdu.data._coldefs._arrays[1][0] == 312
assert tbhdu.data._coldefs.columns[1].array[0] == 312
assert tbhdu.columns._arrays[1][0] == 312
assert tbhdu.columns.columns[1].array[0] == 312
assert tbhdu.columns.columns[0].array[0] == 'NGC1'
assert tbhdu.columns.columns[2].array[0] == ''
assert (tbhdu.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[0] == True # noqa
assert tbhdu.data[3][1] == 33
assert tbhdu.data._coldefs._arrays[1][3] == 33
assert tbhdu.data._coldefs.columns[1].array[3] == 33
assert tbhdu.columns._arrays[1][3] == 33
assert tbhdu.columns.columns[1].array[3] == 33
assert tbhdu.columns.columns[0].array[3] == 'JIM1'
assert tbhdu.columns.columns[2].array[3] == 'A Note'
assert (tbhdu.columns.columns[3].array[3] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu.columns.columns[4].array[3] == True # noqa
def test_assign_multiple_rows_to_table(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
counts = np.array([112, 134, 108, 117])
names = np.array(['NGC5', 'NGC6', 'NGC7', 'NCG8'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[0, 1, 0, 0])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][3] = np.array([1., 2., 3., 4., 5.], dtype=np.float32)
tbhdu2 = fits.BinTableHDU.from_columns(tbhdu1.data, nrows=9)
# Assign the 4 rows from the second table to rows 5 thru 8 of the
# new table. Note that the last row of the new table will still be
# initialized to the default values.
tbhdu2.data[4:] = tbhdu.data
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.data._coldefs._arrays[0]))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns.columns[0].array))
assert (id(tbhdu2.data._coldefs.columns[0].array) ==
id(tbhdu2.columns._arrays[0]))
assert tbhdu2.data[0][1] == 312
assert tbhdu2.data._coldefs._arrays[1][0] == 312
assert tbhdu2.data._coldefs.columns[1].array[0] == 312
assert tbhdu2.columns._arrays[1][0] == 312
assert tbhdu2.columns.columns[1].array[0] == 312
assert tbhdu2.columns.columns[0].array[0] == 'NGC1'
assert tbhdu2.columns.columns[2].array[0] == ''
assert (tbhdu2.columns.columns[3].array[0] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[0] == True # noqa
assert tbhdu2.data[4][1] == 112
assert tbhdu2.data._coldefs._arrays[1][4] == 112
assert tbhdu2.data._coldefs.columns[1].array[4] == 112
assert tbhdu2.columns._arrays[1][4] == 112
assert tbhdu2.columns.columns[1].array[4] == 112
assert tbhdu2.columns.columns[0].array[4] == 'NGC5'
assert tbhdu2.columns.columns[2].array[4] == ''
assert (tbhdu2.columns.columns[3].array[4] ==
np.array([1., 2., 3., 4., 5.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[4] == False # noqa
assert tbhdu2.columns.columns[1].array[8] == 0
assert tbhdu2.columns.columns[0].array[8] == ''
assert tbhdu2.columns.columns[2].array[8] == ''
assert (tbhdu2.columns.columns[3].array[8] ==
np.array([0., 0., 0., 0., 0.], dtype=np.float32)).all()
assert tbhdu2.columns.columns[4].array[8] == False # noqa
def test_verify_data_references(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
# Verify that original ColDefs object has independent Column
# objects.
assert id(coldefs.columns[0]) != id(c1)
# Verify that original ColDefs object has independent ndarray
# objects.
assert id(coldefs.columns[0].array) != id(names)
# Verify that original ColDefs object references the same data
# object as the original Column object.
assert id(coldefs.columns[0].array) == id(c1.array)
assert id(coldefs.columns[0].array) == id(coldefs._arrays[0])
# Verify new HDU has an independent ColDefs object.
assert id(coldefs) != id(tbhdu.columns)
# Verify new HDU has independent Column objects.
assert id(coldefs.columns[0]) != id(tbhdu.columns.columns[0])
# Verify new HDU has independent ndarray objects.
assert (id(coldefs.columns[0].array) !=
id(tbhdu.columns.columns[0].array))
# Verify that both ColDefs objects in the HDU reference the same
# Coldefs object.
assert id(tbhdu.columns) == id(tbhdu.data._coldefs)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.data._coldefs._arrays[0]))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns.columns[0].array))
assert (id(tbhdu.data._coldefs.columns[0].array) ==
id(tbhdu.columns._arrays[0]))
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_ndarray(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu1 = fits.BinTableHDU.from_columns(tbhdu.data.view(np.ndarray))
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.data._coldefs._arrays[0]))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns.columns[0].array))
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(tbhdu1.columns._arrays[0]))
# Ensure I can change the value of one data element and it effects
# all of the others.
tbhdu1.data[0][1] = 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
tbhdu1.data._coldefs.columns[1].array[0] = 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
tbhdu1.columns._arrays[1][0] = 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
tbhdu1.columns.columns[1].array[0] = 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
tbhdu1.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 213
assert t1[1].data[0][1] == 213
assert t1[1].data._coldefs._arrays[1][0] == 213
assert t1[1].data._coldefs.columns[1].array[0] == 213
assert t1[1].columns._arrays[1][0] == 213
assert t1[1].columns.columns[1].array[0] == 213
t1[1].data._coldefs._arrays[1][0] = 100
assert t1[1].data[0][1] == 100
assert t1[1].data._coldefs._arrays[1][0] == 100
assert t1[1].data._coldefs.columns[1].array[0] == 100
assert t1[1].columns._arrays[1][0] == 100
assert t1[1].columns.columns[1].array[0] == 100
t1[1].data._coldefs.columns[1].array[0] = 500
assert t1[1].data[0][1] == 500
assert t1[1].data._coldefs._arrays[1][0] == 500
assert t1[1].data._coldefs.columns[1].array[0] == 500
assert t1[1].columns._arrays[1][0] == 500
assert t1[1].columns.columns[1].array[0] == 500
t1[1].columns._arrays[1][0] = 600
assert t1[1].data[0][1] == 600
assert t1[1].data._coldefs._arrays[1][0] == 600
assert t1[1].data._coldefs.columns[1].array[0] == 600
assert t1[1].columns._arrays[1][0] == 600
assert t1[1].columns.columns[1].array[0] == 600
t1[1].columns.columns[1].array[0] = 800
assert t1[1].data[0][1] == 800
assert t1[1].data._coldefs._arrays[1][0] == 800
assert t1[1].data._coldefs.columns[1].array[0] == 800
assert t1[1].columns._arrays[1][0] == 800
assert t1[1].columns.columns[1].array[0] == 800
t1.close()
def test_new_table_with_fits_rec(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu = fits.BinTableHDU.from_columns(coldefs)
tbhdu.data[0][1] = 213
assert tbhdu.data[0][1] == 213
assert tbhdu.data._coldefs._arrays[1][0] == 213
assert tbhdu.data._coldefs.columns[1].array[0] == 213
assert tbhdu.columns._arrays[1][0] == 213
assert tbhdu.columns.columns[1].array[0] == 213
tbhdu.data._coldefs._arrays[1][0] = 100
assert tbhdu.data[0][1] == 100
assert tbhdu.data._coldefs._arrays[1][0] == 100
assert tbhdu.data._coldefs.columns[1].array[0] == 100
assert tbhdu.columns._arrays[1][0] == 100
assert tbhdu.columns.columns[1].array[0] == 100
tbhdu.data._coldefs.columns[1].array[0] = 500
assert tbhdu.data[0][1] == 500
assert tbhdu.data._coldefs._arrays[1][0] == 500
assert tbhdu.data._coldefs.columns[1].array[0] == 500
assert tbhdu.columns._arrays[1][0] == 500
assert tbhdu.columns.columns[1].array[0] == 500
tbhdu.columns._arrays[1][0] = 600
assert tbhdu.data[0][1] == 600
assert tbhdu.data._coldefs._arrays[1][0] == 600
assert tbhdu.data._coldefs.columns[1].array[0] == 600
assert tbhdu.columns._arrays[1][0] == 600
assert tbhdu.columns.columns[1].array[0] == 600
tbhdu.columns.columns[1].array[0] = 800
assert tbhdu.data[0][1] == 800
assert tbhdu.data._coldefs._arrays[1][0] == 800
assert tbhdu.data._coldefs.columns[1].array[0] == 800
assert tbhdu.columns._arrays[1][0] == 800
assert tbhdu.columns.columns[1].array[0] == 800
tbhdu.columns.columns[1].array[0] = 312
tbhdu.writeto(self.temp('table1.fits'))
t1 = fits.open(self.temp('table1.fits'))
t1[1].data[0][1] = 1
fr = t1[1].data
assert t1[1].data[0][1] == 1
assert t1[1].data._coldefs._arrays[1][0] == 1
assert t1[1].data._coldefs.columns[1].array[0] == 1
assert t1[1].columns._arrays[1][0] == 1
assert t1[1].columns.columns[1].array[0] == 1
assert fr[0][1] == 1
assert fr._coldefs._arrays[1][0] == 1
assert fr._coldefs.columns[1].array[0] == 1
fr._coldefs.columns[1].array[0] = 312
tbhdu1 = fits.BinTableHDU.from_columns(fr)
i = 0
for row in tbhdu1.data:
for j in range(len(row)):
if isinstance(row[j], np.ndarray):
assert (row[j] == tbhdu.data[i][j]).all()
else:
assert row[j] == tbhdu.data[i][j]
i = i + 1
tbhdu1.data[0][1] = 213
assert t1[1].data[0][1] == 312
assert t1[1].data._coldefs._arrays[1][0] == 312
assert t1[1].data._coldefs.columns[1].array[0] == 312
assert t1[1].columns._arrays[1][0] == 312
assert t1[1].columns.columns[1].array[0] == 312
assert fr[0][1] == 312
assert fr._coldefs._arrays[1][0] == 312
assert fr._coldefs.columns[1].array[0] == 312
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
t1[1].data[0][1] = 10
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
tbhdu1.data._coldefs._arrays[1][0] = 666
assert t1[1].data[0][1] == 10
assert t1[1].data._coldefs._arrays[1][0] == 10
assert t1[1].data._coldefs.columns[1].array[0] == 10
assert t1[1].columns._arrays[1][0] == 10
assert t1[1].columns.columns[1].array[0] == 10
assert fr[0][1] == 10
assert fr._coldefs._arrays[1][0] == 10
assert fr._coldefs.columns[1].array[0] == 10
assert tbhdu1.data[0][1] == 666
assert tbhdu1.data._coldefs._arrays[1][0] == 666
assert tbhdu1.data._coldefs.columns[1].array[0] == 666
assert tbhdu1.columns._arrays[1][0] == 666
assert tbhdu1.columns.columns[1].array[0] == 666
t1.close()
def test_bin_table_hdu_constructor(self):
counts = np.array([312, 334, 308, 317])
names = np.array(['NGC1', 'NGC2', 'NGC3', 'NCG4'])
c1 = fits.Column(name='target', format='10A', array=names)
c2 = fits.Column(name='counts', format='J', unit='DN', array=counts)
c3 = fits.Column(name='notes', format='A10')
c4 = fits.Column(name='spectrum', format='5E')
c5 = fits.Column(name='flag', format='L', array=[1, 0, 1, 1])
coldefs = fits.ColDefs([c1, c2, c3, c4, c5])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
hdu = fits.BinTableHDU(tbhdu1.data)
# Verify that all ndarray objects within the HDU reference the
# same ndarray.
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns.columns[0].array))
assert (id(hdu.data._coldefs.columns[0].array) ==
id(hdu.columns._arrays[0]))
# Verify that the references in the original HDU are the same as the
# references in the new HDU.
assert (id(tbhdu1.data._coldefs.columns[0].array) ==
id(hdu.data._coldefs._arrays[0]))
# Verify that a change in the new HDU is reflected in both the new
# and original HDU.
hdu.data[0][1] = 213
assert hdu.data[0][1] == 213
assert hdu.data._coldefs._arrays[1][0] == 213
assert hdu.data._coldefs.columns[1].array[0] == 213
assert hdu.columns._arrays[1][0] == 213
assert hdu.columns.columns[1].array[0] == 213
assert tbhdu1.data[0][1] == 213
assert tbhdu1.data._coldefs._arrays[1][0] == 213
assert tbhdu1.data._coldefs.columns[1].array[0] == 213
assert tbhdu1.columns._arrays[1][0] == 213
assert tbhdu1.columns.columns[1].array[0] == 213
hdu.data._coldefs._arrays[1][0] = 100
assert hdu.data[0][1] == 100
assert hdu.data._coldefs._arrays[1][0] == 100
assert hdu.data._coldefs.columns[1].array[0] == 100
assert hdu.columns._arrays[1][0] == 100
assert hdu.columns.columns[1].array[0] == 100
assert tbhdu1.data[0][1] == 100
assert tbhdu1.data._coldefs._arrays[1][0] == 100
assert tbhdu1.data._coldefs.columns[1].array[0] == 100
assert tbhdu1.columns._arrays[1][0] == 100
assert tbhdu1.columns.columns[1].array[0] == 100
hdu.data._coldefs.columns[1].array[0] = 500
assert hdu.data[0][1] == 500
assert hdu.data._coldefs._arrays[1][0] == 500
assert hdu.data._coldefs.columns[1].array[0] == 500
assert hdu.columns._arrays[1][0] == 500
assert hdu.columns.columns[1].array[0] == 500
assert tbhdu1.data[0][1] == 500
assert tbhdu1.data._coldefs._arrays[1][0] == 500
assert tbhdu1.data._coldefs.columns[1].array[0] == 500
assert tbhdu1.columns._arrays[1][0] == 500
assert tbhdu1.columns.columns[1].array[0] == 500
hdu.columns._arrays[1][0] = 600
assert hdu.data[0][1] == 600
assert hdu.data._coldefs._arrays[1][0] == 600
assert hdu.data._coldefs.columns[1].array[0] == 600
assert hdu.columns._arrays[1][0] == 600
assert hdu.columns.columns[1].array[0] == 600
assert tbhdu1.data[0][1] == 600
assert tbhdu1.data._coldefs._arrays[1][0] == 600
assert tbhdu1.data._coldefs.columns[1].array[0] == 600
assert tbhdu1.columns._arrays[1][0] == 600
assert tbhdu1.columns.columns[1].array[0] == 600
hdu.columns.columns[1].array[0] = 800
assert hdu.data[0][1] == 800
assert hdu.data._coldefs._arrays[1][0] == 800
assert hdu.data._coldefs.columns[1].array[0] == 800
assert hdu.columns._arrays[1][0] == 800
assert hdu.columns.columns[1].array[0] == 800
assert tbhdu1.data[0][1] == 800
assert tbhdu1.data._coldefs._arrays[1][0] == 800
assert tbhdu1.data._coldefs.columns[1].array[0] == 800
assert tbhdu1.columns._arrays[1][0] == 800
assert tbhdu1.columns.columns[1].array[0] == 800
def test_constructor_name_arg(self):
"""testConstructorNameArg
Passing name='...' to the BinTableHDU and TableHDU constructors
should set the .name attribute and 'EXTNAME' header keyword, and
override any name in an existing 'EXTNAME' value.
"""
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = hducls(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = hducls(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
for hducls in [fits.BinTableHDU, fits.TableHDU]:
# First test some default assumptions
hdu = hducls()
assert hdu.ver == 1
assert 'EXTVER' not in hdu.header
hdu.ver = 2
assert hdu.ver == 2
assert hdu.header['EXTVER'] == 2
# Passing name to constructor
hdu = hducls(ver=3)
assert hdu.ver == 3
assert hdu.header['EXTVER'] == 3
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 4
hdu = hducls(header=hdr, ver=5)
assert hdu.ver == 5
assert hdu.header['EXTVER'] == 5
def test_unicode_colname(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5204
"Handle unicode FITS BinTable column names on Python 2"
"""
col = fits.Column(name='spam', format='E', array=[42.])
# This used to raise a TypeError, now it works
fits.BinTableHDU.from_columns([col])
def test_bin_table_with_logical_array(self):
c1 = fits.Column(name='flag', format='2L',
array=[[True, False], [False, True]])
coldefs = fits.ColDefs([c1])
tbhdu1 = fits.BinTableHDU.from_columns(coldefs)
assert (tbhdu1.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu1.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
tbhdu = fits.BinTableHDU.from_columns(tbhdu1.data)
assert (tbhdu.data.field('flag')[0] ==
np.array([True, False], dtype=bool)).all()
assert (tbhdu.data.field('flag')[1] ==
np.array([False, True], dtype=bool)).all()
def test_fits_rec_column_access(self):
tbdata = fits.getdata(self.data('table.fits'))
assert (tbdata.V_mag == tbdata.field('V_mag')).all()
assert (tbdata.V_mag == tbdata['V_mag']).all()
# Table with scaling (c3) and tnull (c1)
tbdata = fits.getdata(self.data('tb.fits'))
for col in ('c1', 'c2', 'c3', 'c4'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# ascii table
tbdata = fits.getdata(self.data('ascii.fits'))
for col in ('a', 'b'):
data = getattr(tbdata, col)
assert (data == tbdata.field(col)).all()
assert (data == tbdata[col]).all()
# with VLA column
col1 = fits.Column(name='x', format='PI()',
array=np.array([[45, 56], [11, 12, 13]],
dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col1])
assert type(hdu.data['x']) == type(hdu.data.x) # noqa
assert (hdu.data['x'][0] == hdu.data.x[0]).all()
assert (hdu.data['x'][1] == hdu.data.x[1]).all()
def test_table_with_zero_width_column(self):
hdul = fits.open(self.data('zerowidth.fits'))
tbhdu = hdul[2] # This HDU contains a zero-width column 'ORBPARM'
assert 'ORBPARM' in tbhdu.columns.names
# The ORBPARM column should not be in the data, though the data should
# be readable
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
# Verify that some of the data columns are still correctly accessible
# by name
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.writeto(self.temp('newtable.fits'))
hdul.close()
hdul = fits.open(self.temp('newtable.fits'))
tbhdu = hdul[2]
# Verify that the previous tests still hold after writing
assert 'ORBPARM' in tbhdu.columns.names
assert 'ORBPARM' in tbhdu.data.names
assert 'ORBPARM' in tbhdu.data.dtype.names
assert tbhdu.data[0]['ANNAME'] == 'VLA:_W16'
assert comparefloats(
tbhdu.data[0]['STABXYZ'],
np.array([499.85566663, -1317.99231554, -735.18866164],
dtype=np.float64))
assert tbhdu.data[0]['NOSTA'] == 1
assert tbhdu.data[0]['MNTSTA'] == 0
assert tbhdu.data[-1]['ANNAME'] == 'VPT:_OUT'
assert comparefloats(
tbhdu.data[-1]['STABXYZ'],
np.array([0.0, 0.0, 0.0], dtype=np.float64))
assert tbhdu.data[-1]['NOSTA'] == 29
assert tbhdu.data[-1]['MNTSTA'] == 0
hdul.close()
def test_string_column_padding(self):
a = ['img1', 'img2', 'img3a', 'p']
s = 'img1\x00\x00\x00\x00\x00\x00' \
'img2\x00\x00\x00\x00\x00\x00' \
'img3a\x00\x00\x00\x00\x00' \
'p\x00\x00\x00\x00\x00\x00\x00\x00\x00'
acol = fits.Column(name='MEMNAME', format='A10',
array=chararray.array(a))
ahdu = fits.BinTableHDU.from_columns([acol])
assert ahdu.data.tobytes().decode('raw-unicode-escape') == s
ahdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
del hdul
ahdu = fits.TableHDU.from_columns([acol])
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert (hdul[1].data.tobytes().decode('raw-unicode-escape') ==
s.replace('\x00', ' '))
assert (hdul[1].data['MEMNAME'] == a).all()
ahdu = fits.BinTableHDU.from_columns(hdul[1].data.copy())
del hdul
# Now serialize once more as a binary table; padding bytes should
# revert to zeroes
ahdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].data.tobytes().decode('raw-unicode-escape') == s
assert (hdul[1].data['MEMNAME'] == a).all()
def test_multi_dimensional_columns(self):
"""
Tests the multidimensional column implementation with both numeric
arrays and string arrays.
"""
data = np.rec.array(
[([0, 1, 2, 3, 4, 5], 'row1' * 2),
([6, 7, 8, 9, 0, 1], 'row2' * 2),
([2, 3, 4, 5, 6, 7], 'row3' * 2)], formats='6i4,a8')
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('newtable.fits'))
with fits.open(self.temp('newtable.fits'), mode='update') as hdul:
# Modify the TDIM fields to my own specification
hdul[1].header['TDIM1'] = '(2,3)'
hdul[1].header['TDIM2'] = '(4,2)'
with fits.open(self.temp('newtable.fits')) as hdul:
thdu = hdul[1]
c1 = thdu.data.field(0)
c2 = thdu.data.field(1)
assert c1.shape == (3, 3, 2)
assert c2.shape == (3, 2)
assert (c1 == np.array([[[0, 1], [2, 3], [4, 5]],
[[6, 7], [8, 9], [0, 1]],
[[2, 3], [4, 5], [6, 7]]])).all()
assert (c2 == np.array([['row1', 'row1'],
['row2', 'row2'],
['row3', 'row3']])).all()
del c1
del c2
del thdu
del hdul
# Test setting the TDIMn header based on the column data
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', 4)])
data['x'] = 1, 2, 3
data['s'] = 'ok'
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4)
# Like the previous test, but with an extra dimension (a bit more
# complicated)
data = np.zeros(3, dtype=[('x', 'f4'), ('s', 'S5', (4, 3))])
data['x'] = 1, 2, 3
data['s'] = 'ok'
del t
fits.writeto(self.temp('newtable.fits'), data, overwrite=True)
t = fits.getdata(self.temp('newtable.fits'))
assert t.field(1).dtype.str[-1] == '5'
assert t.field(1).shape == (3, 4, 3)
def test_oned_array_single_element(self):
# a table with rows that are 1d arrays of a single value
data = np.array([(1, ), (2, )], dtype=([('x', 'i4', (1, ))]))
thdu = fits.BinTableHDU.from_columns(data)
thdu.writeto(self.temp('onedtable.fits'))
with fits.open(self.temp('onedtable.fits')) as hdul:
thdu = hdul[1]
c = thdu.data.field(0)
assert c.shape == (2, 1)
assert thdu.header['TDIM1'] == '(1)'
def test_bin_table_init_from_string_array_column(self):
"""
Tests two ways of creating a new `BinTableHDU` from a column of
string arrays.
This tests for a couple different regressions, and ensures that
both BinTableHDU(data=arr) and BinTableHDU.from_columns(arr) work
equivalently.
Some of this is redundant with the following test, but checks some
subtly different cases.
"""
data = [[b'abcd', b'efgh'],
[b'ijkl', b'mnop'],
[b'qrst', b'uvwx']]
arr = np.array([(data,), (data,), (data,), (data,), (data,)],
dtype=[('S', '(3, 2)S4')])
tbhdu1 = fits.BinTableHDU(data=arr)
def test_dims_and_roundtrip(tbhdu):
assert tbhdu.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(4,2,3)'
assert tbhdu2.data['S'].shape == (5, 3, 2)
assert tbhdu.data['S'].dtype.str.endswith('U4')
assert np.all(tbhdu2.data['S'] == tbhdu.data['S'])
test_dims_and_roundtrip(tbhdu1)
tbhdu2 = fits.BinTableHDU.from_columns(arr)
test_dims_and_roundtrip(tbhdu2)
def test_columns_with_truncating_tdim(self):
"""
According to the FITS standard (section 7.3.2):
If the number of elements in the array implied by the TDIMn is less
than the allocated size of the ar- ray in the FITS file, then the
unused trailing elements should be interpreted as containing
undefined fill values.
*deep sigh* What this means is if a column has a repeat count larger
than the number of elements indicated by its TDIM (ex: TDIM1 = '(2,2)',
but TFORM1 = 6I), then instead of this being an outright error we are
to take the first 4 elements as implied by the TDIM and ignore the
additional two trailing elements.
"""
# It's hard to even successfully create a table like this. I think
# it *should* be difficult, but once created it should at least be
# possible to read.
arr1 = [[b'ab', b'cd'], [b'ef', b'gh'], [b'ij', b'kl']]
arr2 = [1, 2, 3, 4, 5]
arr = np.array([(arr1, arr2), (arr1, arr2)],
dtype=[('a', '(3, 2)S2'), ('b', '5i8')])
tbhdu = fits.BinTableHDU(data=arr)
tbhdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
raw_bytes = f.read()
# Artificially truncate TDIM in the header; this seems to be the
# easiest way to do this while getting around Astropy's insistence on the
# data and header matching perfectly; again, we have no interest in
# making it possible to write files in this format, only read them
with open(self.temp('test.fits'), 'wb') as f:
f.write(raw_bytes.replace(b'(2,2,3)', b'(2,2,2)'))
with fits.open(self.temp('test.fits')) as hdul:
tbhdu2 = hdul[1]
assert tbhdu2.header['TDIM1'] == '(2,2,2)'
assert tbhdu2.header['TFORM1'] == '12A'
for row in tbhdu2.data:
assert np.all(row['a'] == [['ab', 'cd'], ['ef', 'gh']])
assert np.all(row['b'] == [1, 2, 3, 4, 5])
def test_string_array_round_trip(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201"""
data = [['abc', 'def', 'ghi'],
['jkl', 'mno', 'pqr'],
['stu', 'vwx', 'yz ']]
recarr = np.rec.array([(data,), (data,)], formats=['(3,3)S3'])
t = fits.BinTableHDU(data=recarr)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
with fits.open(self.temp('test.fits')) as h:
# Access the data; I think this is necessary to exhibit the bug
# reported in https://aeon.stsci.edu/ssb/trac/pyfits/ticket/201
h[1].data[:]
h.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as h:
assert 'TDIM1' in h[1].header
assert h[1].header['TDIM1'] == '(3,3,3)'
assert len(h[1].data) == 2
assert len(h[1].data[0]) == 1
assert (h[1].data.field(0)[0] ==
np.char.decode(recarr.field(0)[0], 'ascii')).all()
def test_new_table_with_nd_column(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/3
"""
arra = np.array(['a', 'b'], dtype='|S1')
arrb = np.array([['a', 'bc'], ['cd', 'e']], dtype='|S2')
arrc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
cols = [
fits.Column(name='str', format='1A', array=arra),
fits.Column(name='strarray', format='4A', dim='(2,2)',
array=arrb),
fits.Column(name='intarray', format='4I', dim='(2, 2)',
array=arrc)
]
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
# Need to force string arrays to byte arrays in order to compare
# correctly on Python 3
assert (h[1].data['str'].encode('ascii') == arra).all()
assert (h[1].data['strarray'].encode('ascii') == arrb).all()
assert (h[1].data['intarray'] == arrc).all()
def test_mismatched_tform_and_tdim(self):
"""Normally the product of the dimensions listed in a TDIMn keyword
must be less than or equal to the repeat count in the TFORMn keyword.
This tests that this works if less than (treating the trailing bytes
as unspecified fill values per the FITS standard) and fails if the
dimensions specified by TDIMn are greater than the repeat count.
"""
arra = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
arrb = np.array([[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
cols = [fits.Column(name='a', format='20I', dim='(2,2)',
array=arra),
fits.Column(name='b', format='4I', dim='(2,2)',
array=arrb)]
# The first column has the mismatched repeat count
hdu = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM1'] == '20I'
assert h[1].header['TFORM2'] == '4I'
assert h[1].header['TDIM1'] == h[1].header['TDIM2'] == '(2,2)'
assert (h[1].data['a'] == arra).all()
assert (h[1].data['b'] == arrb).all()
assert h[1].data.itemsize == 48 # 16-bits times 24
# If dims is more than the repeat count in the format specifier raise
# an error
pytest.raises(VerifyError, fits.Column, name='a', format='2I',
dim='(2,2)', array=arra)
def test_tdim_of_size_one(self):
"""Regression test for https://github.com/astropy/astropy/pull/3580"""
with fits.open(self.data('tdim.fits')) as hdulist:
assert hdulist[1].data['V_mag'].shape == (3, 1, 1)
def test_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/52"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
targets = data.field('target')
s = data[:]
assert (s.field('target') == targets).all()
for n in range(len(targets) + 2):
s = data[:n]
assert (s.field('target') == targets[:n]).all()
s = data[n:]
assert (s.field('target') == targets[n:]).all()
s = data[::2]
assert (s.field('target') == targets[::2]).all()
s = data[::-1]
assert (s.field('target') == targets[::-1]).all()
def test_array_slicing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/55"""
with fits.open(self.data('table.fits')) as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
def test_array_broadcasting(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/pull/48
"""
with fits.open(self.data('table.fits')) as hdu:
data = hdu[1].data
data['V_mag'] = 0
assert np.all(data['V_mag'] == 0)
data['V_mag'] = 1
assert np.all(data['V_mag'] == 1)
for container in (list, tuple, np.array):
data['V_mag'] = container([1, 2, 3])
assert np.array_equal(data['V_mag'], np.array([1, 2, 3]))
def test_array_slicing_readonly(self):
"""
Like test_array_slicing but with the file opened in 'readonly' mode.
Regression test for a crash when slicing readonly memmap'd tables.
"""
with fits.open(self.data('table.fits'), mode='readonly') as f:
data = f[1].data
s1 = data[data['target'] == 'NGC1001']
s2 = data[np.where(data['target'] == 'NGC1001')]
s3 = data[[0]]
s4 = data[:1]
for s in [s1, s2, s3, s4]:
assert isinstance(s, fits.FITS_rec)
assert comparerecords(s1, s2)
assert comparerecords(s2, s3)
assert comparerecords(s3, s4)
@pytest.mark.parametrize('tablename', ['table.fits', 'tb.fits'])
def test_dump_load_round_trip(self, tablename):
"""
A simple test of the dump/load methods; dump the data, column, and
header files and try to reload the table from them.
"""
with fits.open(self.data(tablename)) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
_assert_attr_col(new_tbhdu, hdul[1])
def test_dump_load_array_colums(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/22
Ensures that a table containing a multi-value array column can be
dumped and loaded successfully.
"""
data = np.rec.array([('a', [1, 2, 3, 4], 0.1),
('b', [5, 6, 7, 8], 0.2)],
formats='a1,4i4,f8')
tbhdu = fits.BinTableHDU.from_columns(data)
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
new_tbhdu = fits.BinTableHDU.load(datafile, cdfile, hfile)
assert comparerecords(tbhdu.data, new_tbhdu.data)
assert str(tbhdu.header) == str(new_tbhdu.header)
def test_load_guess_format(self):
"""
Tests loading a table dump with no supplied coldefs or header, so that
the table format has to be guessed at. There is of course no exact
science to this; the table that's produced simply uses sensible guesses
for that format. Ideally this should never have to be used.
"""
# Create a table containing a variety of data types.
a0 = np.array([False, True, False], dtype=bool)
c0 = fits.Column(name='c0', format='L', array=a0)
# Format X currently not supported by the format
# a1 = np.array([[0], [1], [0]], dtype=np.uint8)
# c1 = fits.Column(name='c1', format='X', array=a1)
a2 = np.array([1, 128, 255], dtype=np.uint8)
c2 = fits.Column(name='c2', format='B', array=a2)
a3 = np.array([-30000, 1, 256], dtype=np.int16)
c3 = fits.Column(name='c3', format='I', array=a3)
a4 = np.array([-123123123, 1234, 123123123], dtype=np.int32)
c4 = fits.Column(name='c4', format='J', array=a4)
a5 = np.array(['a', 'abc', 'ab'])
c5 = fits.Column(name='c5', format='A3', array=a5)
a6 = np.array([1.1, 2.2, 3.3], dtype=np.float64)
c6 = fits.Column(name='c6', format='D', array=a6)
a7 = np.array([1.1 + 2.2j, 3.3 + 4.4j, 5.5 + 6.6j],
dtype=np.complex128)
c7 = fits.Column(name='c7', format='M', array=a7)
a8 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
c8 = fits.Column(name='c8', format='PJ()', array=a8)
tbhdu = fits.BinTableHDU.from_columns([c0, c2, c3, c4, c5, c6, c7, c8])
datafile = self.temp('data.txt')
tbhdu.dump(datafile)
new_tbhdu = fits.BinTableHDU.load(datafile)
# In this particular case the record data at least should be equivalent
assert comparerecords(tbhdu.data, new_tbhdu.data)
def test_attribute_field_shadowing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/86
Numpy recarray objects have a poorly-considered feature of allowing
field access by attribute lookup. However, if a field name coincides
with an existing attribute/method of the array, the existing name takes
presence (making the attribute-based field lookup completely unreliable
in general cases).
This ensures that any FITS_rec attributes still work correctly even
when there is a field with the same name as that attribute.
"""
c1 = fits.Column(name='names', format='I', array=[1])
c2 = fits.Column(name='formats', format='I', array=[2])
c3 = fits.Column(name='other', format='I', array=[3])
t = fits.BinTableHDU.from_columns([c1, c2, c3])
assert t.data.names == ['names', 'formats', 'other']
assert t.data.formats == ['I'] * 3
assert (t.data['names'] == [1]).all()
assert (t.data['formats'] == [2]).all()
assert (t.data.other == [3]).all()
def test_table_from_bool_fields(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/113
Tests creating a table from a recarray containing numpy.bool columns.
"""
array = np.rec.array([(True, False), (False, True)], formats='|b1,|b1')
thdu = fits.BinTableHDU.from_columns(array)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(thdu.data, array)
# Test round trip
thdu.writeto(self.temp('table.fits'))
data = fits.getdata(self.temp('table.fits'), ext=1)
assert thdu.columns.formats == ['L', 'L']
assert comparerecords(data, array)
def test_table_from_bool_fields2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/215
Tests the case where a multi-field ndarray (not a recarray) containing
a bool field is used to initialize a `BinTableHDU`.
"""
arr = np.array([(False,), (True,), (False,)], dtype=[('a', '?')])
hdu = fits.BinTableHDU(data=arr)
assert (hdu.data['a'] == arr['a']).all()
def test_bool_column_update(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/139"""
c1 = fits.Column('F1', 'L', array=[True, False])
c2 = fits.Column('F2', 'L', array=[False, True])
thdu = fits.BinTableHDU.from_columns(fits.ColDefs([c1, c2]))
thdu.writeto(self.temp('table.fits'))
with fits.open(self.temp('table.fits'), mode='update') as hdul:
hdul[1].data['F1'][1] = True
hdul[1].data['F2'][0] = True
with fits.open(self.temp('table.fits')) as hdul:
assert (hdul[1].data['F1'] == [True, True]).all()
assert (hdul[1].data['F2'] == [True, True]).all()
def test_missing_tnull(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/197"""
c = fits.Column('F1', 'A3', null='---',
array=np.array(['1.0', '2.0', '---', '3.0']),
ascii=True)
table = fits.TableHDU.from_columns([c])
table.writeto(self.temp('test.fits'))
# Now let's delete the TNULL1 keyword, making this essentially
# unreadable
with fits.open(self.temp('test.fits'), mode='update') as h:
h[1].header['TFORM1'] = 'E3'
del h[1].header['TNULL1']
with fits.open(self.temp('test.fits')) as h:
pytest.raises(ValueError, lambda: h[1].data['F1'])
try:
with fits.open(self.temp('test.fits')) as h:
h[1].data['F1']
except ValueError as e:
assert str(e).endswith(
"the header may be missing the necessary TNULL1 "
"keyword or the table contains invalid data")
def test_blank_field_zero(self):
"""Regression test for https://github.com/astropy/astropy/issues/5134
Blank values in numerical columns of ASCII tables should be replaced
with zeros, so they can be loaded into numpy arrays.
When a TNULL value is set and there are blank fields not equal to that
value, they should be replaced with zeros.
"""
# Test an integer column with blank string as null
nullval1 = ' '
c1 = fits.Column('F1', format='I8', null=nullval1,
array=np.array([0, 1, 2, 3, 4]),
ascii=True)
table = fits.TableHDU.from_columns([c1])
table.writeto(self.temp('ascii_null.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null.fits'), mode='r+') as h:
nulled = h.read().replace('2 ', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null.fits'), memmap=True) as f:
assert f[1].data[2][0] == 0
# Test a float column with a null value set and blank fields.
nullval2 = 'NaN'
c2 = fits.Column('F1', format='F12.8', null=nullval2,
array=np.array([1.0, 2.0, 3.0, 4.0]),
ascii=True)
table = fits.TableHDU.from_columns([c2])
table.writeto(self.temp('ascii_null2.fits'))
# Replace the 1st col, 3rd row, with a null field.
with open(self.temp('ascii_null2.fits'), mode='r+') as h:
nulled = h.read().replace('3.00000000', ' ')
h.seek(0)
h.write(nulled)
with fits.open(self.temp('ascii_null2.fits'), memmap=True) as f:
# (Currently it should evaluate to 0.0, but if a TODO in fitsrec is
# completed, then it should evaluate to NaN.)
assert f[1].data[2][0] == 0.0 or np.isnan(f[1].data[2][0])
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_table_none(self):
"""Regression test
for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('tb.fits')) as h:
h[1].data
h[1].data = None
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['NAXIS'] == 2
assert h[1].header['NAXIS1'] == 12
assert h[1].header['NAXIS2'] == 0
assert isinstance(h[1].data, fits.FITS_rec)
assert len(h[1].data) == 0
def test_unncessary_table_load(self):
"""Test unnecessary parsing and processing of FITS tables when writing
directly from one FITS file to a new file without first reading the
data for user manipulation.
In other words, it should be possible to do a direct copy of the raw
data without unnecessary processing of the data.
"""
with fits.open(self.data('table.fits')) as h:
h[1].writeto(self.temp('test.fits'))
# Since this was a direct copy the h[1].data attribute should not have
# even been accessed (since this means the data was read and parsed)
assert 'data' not in h[1].__dict__
with fits.open(self.data('table.fits')) as h1:
with fits.open(self.temp('test.fits')) as h2:
assert str(h1[1].header) == str(h2[1].header)
assert comparerecords(h1[1].data, h2[1].data)
def test_table_from_columns_of_other_table(self):
"""Tests a rare corner case where the columns of an existing table
are used to create a new table with the new_table function. In this
specific case, however, the existing table's data has not been read
yet, so new_table has to get at it through the Delayed proxy.
Note: Although this previously tested new_table it now uses
BinTableHDU.from_columns directly, around which new_table is a mere
wrapper.
"""
hdul = fits.open(self.data('table.fits'))
# Make sure the column array is in fact delayed...
assert isinstance(hdul[1].columns._arrays[0], Delayed)
# Create a new table...
t = fits.BinTableHDU.from_columns(hdul[1].columns)
# The original columns should no longer be delayed...
assert not isinstance(hdul[1].columns._arrays[0], Delayed)
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul2:
assert comparerecords(hdul[1].data, hdul2[1].data)
hdul.close()
def test_bintable_to_asciitable(self):
"""Tests initializing a TableHDU with the data from a BinTableHDU."""
with fits.open(self.data('tb.fits')) as hdul:
tbdata = hdul[1].data
tbhdu = fits.TableHDU(data=tbdata)
tbhdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
tbdata2 = hdul2[1].data
assert np.all(tbdata['c1'] == tbdata2['c1'])
assert np.all(tbdata['c2'] == tbdata2['c2'])
# c3 gets converted from float32 to float64 when writing
# test.fits, so cast to float32 before testing that the correct
# value is retrieved
assert np.all(tbdata['c3'].astype(np.float32) ==
tbdata2['c3'].astype(np.float32))
# c4 is a boolean column in the original table; we want ASCII
# columns to convert these to columns of 'T'/'F' strings
assert np.all(np.where(tbdata['c4'], 'T', 'F') ==
tbdata2['c4'])
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match='Field 2 has a repeat count of 0'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_zero_length_table(self):
array = np.array([], dtype=[
('a', 'i8'),
('b', 'S64'),
('c', ('i4', (3, 2)))])
hdu = fits.BinTableHDU(array)
assert hdu.header['NAXIS1'] == 96
assert hdu.header['NAXIS2'] == 0
assert hdu.header['TDIM3'] == '(2,3)'
field = hdu.data.field(1)
assert field.shape == (0,)
def test_dim_column_byte_order_mismatch(self):
"""
When creating a table column with non-trivial TDIMn, and
big-endian array data read from an existing FITS file, the data
should not be unnecessarily byteswapped.
Regression test for https://github.com/astropy/astropy/issues/3561
"""
data = fits.getdata(self.data('random_groups.fits'))['DATA']
col = fits.Column(name='TEST', array=data, dim='(3,1,128,1,1)',
format='1152E')
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[1].data['TEST'] == data)
def test_fits_rec_from_existing(self):
"""
Tests creating a `FITS_rec` object with `FITS_rec.from_columns`
from an existing `FITS_rec` object read from a FITS file.
This ensures that the per-column arrays are updated properly.
Regression test for https://github.com/spacetelescope/PyFITS/issues/99
"""
# The use case that revealed this problem was trying to create a new
# table from an existing table, but with additional rows so that we can
# append data from a second table (with the same column structure)
data1 = fits.getdata(self.data('tb.fits'))
data2 = fits.getdata(self.data('tb.fits'))
nrows = len(data1) + len(data2)
merged = fits.FITS_rec.from_columns(data1, nrows=nrows)
merged[len(data1):] = data2
mask = merged['c1'] > 1
masked = merged[mask]
# The test table only has two rows, only the second of which is > 1 for
# the 'c1' column
assert comparerecords(data1[1:], masked[:1])
assert comparerecords(data1[1:], masked[1:])
# Double check that the original data1 table hasn't been affected by
# its use in creating the "merged" table
assert comparerecords(data1, fits.getdata(self.data('tb.fits')))
def test_update_string_column_inplace(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4452
Ensure that changes to values in a string column are saved when
a file is opened in ``mode='update'``.
"""
data = np.array([('abc',)], dtype=[('a', 'S3')])
fits.writeto(self.temp('test.fits'), data)
with fits.open(self.temp('test.fits'), mode='update') as hdul:
hdul[1].data['a'][0] = 'XYZ'
assert hdul[1].data['a'][0] == 'XYZ'
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].data['a'][0] == 'XYZ'
# Test update but with a non-trivial TDIMn
data = np.array([([['abc', 'def', 'geh'],
['ijk', 'lmn', 'opq']],)],
dtype=[('a', ('S3', (2, 3)))])
fits.writeto(self.temp('test2.fits'), data)
expected = [['abc', 'def', 'geh'],
['ijk', 'XYZ', 'opq']]
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
# Note: Previously I wrote data['a'][0][1, 1] to address
# the single row. However, this is broken for chararray because
# data['a'][0] does *not* return a view of the original array--this
# is a bug in chararray though and not a bug in any FITS-specific
# code so we'll roll with it for now...
# (by the way the bug in question is fixed in newer Numpy versions)
hdul[1].data['a'][0, 1, 1] = 'XYZ'
assert np.all(hdul[1].data['a'][0] == expected)
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[1].header['TDIM1'] == '(3,3,2)'
assert np.all(hdul[1].data['a'][0] == expected)
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak(self):
"""Regression test for https://github.com/astropy/astropy/pull/520"""
def readfile(filename):
with fits.open(filename) as hdul:
data = hdul[1].data.copy()
for colname in data.dtype.names:
data[colname]
with _refcounting('FITS_rec'):
readfile(self.data('memtest.fits'))
@pytest.mark.skipif('not HAVE_OBJGRAPH')
def test_reference_leak2(self, tmpdir):
"""
Regression test for https://github.com/astropy/astropy/pull/4539
This actually re-runs a small set of tests that I found, during
careful testing, exhibited the reference leaks fixed by #4539, but
now with reference counting around each test to ensure that the
leaks are fixed.
"""
from .test_core import TestCore
from .test_connect import TestMultipleHDU
t1 = TestCore()
t1.setup()
try:
with _refcounting('FITS_rec'):
t1.test_add_del_columns2()
finally:
t1.teardown()
del t1
t2 = self.__class__()
for test_name in ['test_recarray_to_bintablehdu',
'test_numpy_ndarray_to_bintablehdu',
'test_new_table_from_recarray',
'test_new_fitsrec']:
t2.setup()
try:
with _refcounting('FITS_rec'):
getattr(t2, test_name)()
finally:
t2.teardown()
del t2
t3 = TestMultipleHDU()
t3.setup_class()
try:
with _refcounting('FITS_rec'):
t3.test_read(tmpdir)
finally:
t3.teardown_class()
del t3
def test_dump_overwrite(self):
with fits.open(self.data('table.fits')) as hdul:
tbhdu = hdul[1]
datafile = self.temp('data.txt')
cdfile = self.temp('coldefs.txt')
hfile = self.temp('header.txt')
tbhdu.dump(datafile, cdfile, hfile)
msg = (r"File .* already exists\. File .* already exists\. File "
r".* already exists\. If you mean to replace the "
r"file\(s\) then use the argument 'overwrite=True'\.")
with pytest.raises(OSError, match=msg):
tbhdu.dump(datafile, cdfile, hfile)
tbhdu.dump(datafile, cdfile, hfile, overwrite=True)
def test_pseudo_unsigned_ints(self):
"""
Tests updating a table column containing pseudo-unsigned ints.
"""
data = np.array([1, 2, 3], dtype=np.uint32)
col = fits.Column(name='A', format='1J', bzero=2**31, array=data)
thdu = fits.BinTableHDU.from_columns([col])
thdu.writeto(self.temp('test.fits'))
# Test that the file wrote out correctly
with fits.open(self.temp('test.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == data)
# Test updating the unsigned int data
hdu.data['A'][0] = 99
hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits'), uint=True) as hdul:
hdu = hdul[1]
assert 'TZERO1' in hdu.header
assert hdu.header['TZERO1'] == 2**31
assert hdu.data['A'].dtype == np.dtype('uint32')
assert np.all(hdu.data['A'] == [99, 2, 3])
def test_column_with_scaling(self):
"""Check that a scaled column if correctly saved once it is modified.
Regression test for https://github.com/astropy/astropy/issues/6887
"""
c1 = fits.Column(name='c1', array=np.array([1], dtype='>i2'),
format='1I', bscale=1, bzero=32768)
S = fits.HDUList([fits.PrimaryHDU(),
fits.BinTableHDU.from_columns([c1])])
# Change value in memory
S[1].data['c1'][0] = 2
S.writeto(self.temp("a.fits"))
assert S[1].data['c1'] == 2
# Read and change value in memory
with fits.open(self.temp("a.fits")) as X:
X[1].data['c1'][0] = 10
assert X[1].data['c1'][0] == 10
# Write back to file
X.writeto(self.temp("b.fits"))
# Now check the file
with fits.open(self.temp("b.fits")) as hdul:
assert hdul[1].data['c1'][0] == 10
def test_ascii_inttypes(self):
"""
Test correct integer dtypes according to ASCII table field widths.
Regression for https://github.com/astropy/astropy/issues/9899
"""
i08 = np.array([2**3, 2**23, -2**22, 10, 2**23], dtype='i4')
i10 = np.array([2**8, 2**31-1, -2**29, 30, 2**31-1], dtype='i8')
i20 = np.array([2**16, 2**63-1, -2**63, 40, 2**63-1], dtype='i8')
i02 = np.array([2**8, 2**13, -2**9, 50, 2**13], dtype='i2')
t0 = Table([i08, i08*2, i10, i20, i02])
t1 = Table.read(self.data('ascii_i4-i20.fits'))
assert t1.dtype == t0.dtype
assert comparerecords(t1, t0)
@contextlib.contextmanager
def _refcounting(type_):
"""
Perform the body of a with statement with reference counting for the
given type (given by class name)--raises an assertion error if there
are more unfreed objects of the given type than when we entered the
with statement.
"""
gc.collect()
refcount = len(objgraph.by_type(type_))
yield refcount
gc.collect()
assert len(objgraph.by_type(type_)) <= refcount, \
"More {0!r} objects still in memory than before."
class TestVLATables(FitsTestCase):
"""Tests specific to tables containing variable-length arrays."""
def test_variable_length_columns(self):
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[[0] * 1571] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
with fits.open(self.temp('toto.fits')) as toto:
q = toto[1].data.field('QUAL_SPE')
assert (q[0][4:8] ==
np.array([0, 0, 0, 0], dtype=np.uint8)).all()
assert toto[1].columns[0].format.endswith('J(1571)')
for code in ('PJ()', 'QJ()'):
test(code)
def test_extend_variable_length_array(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/54"""
def test(format_code):
arr = [[1] * 10] * 10
col1 = fits.Column(name='TESTVLF', format=format_code, array=arr)
col2 = fits.Column(name='TESTSCA', format='J', array=[1] * 10)
tb_hdu = fits.BinTableHDU.from_columns([col1, col2], nrows=15)
# This asserts that the normal 'scalar' column's length was extended
assert len(tb_hdu.data['TESTSCA']) == 15
# And this asserts that the VLF column was extended in the same manner
assert len(tb_hdu.data['TESTVLF']) == 15
# We can't compare the whole array since the _VLF is an array of
# objects, but comparing just the edge case rows should suffice
assert (tb_hdu.data['TESTVLF'][0] == arr[0]).all()
assert (tb_hdu.data['TESTVLF'][9] == arr[9]).all()
assert (tb_hdu.data['TESTVLF'][10] == ([0] * 10)).all()
assert (tb_hdu.data['TESTVLF'][-1] == ([0] * 10)).all()
for code in ('PJ()', 'QJ()'):
test(code)
def test_variable_length_table_format_pd_from_object_array(self):
def test(format_code):
a = np.array([np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pd_from_list(self):
def test(format_code):
a = [np.array([7.2e-20, 7.3e-20]), np.array([0.0]),
np.array([0.0])]
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as tbhdu1:
assert tbhdu1[1].columns[0].format.endswith('D(2)')
for j in range(3):
for i in range(len(a[j])):
assert tbhdu1[1].data.field(0)[j][i] == a[j][i]
for code in ('PD()', 'QD()'):
test(code)
def test_variable_length_table_format_pa_from_object_array(self):
def test(format_code):
a = np.array([np.array(['a', 'b', 'c']), np.array(['d', 'e']),
np.array(['f'])], 'O')
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_variable_length_table_format_pa_from_list(self):
def test(format_code):
a = ['a', 'ab', 'abc']
acol = fits.Column(name='testa', format=format_code, array=a)
tbhdu = fits.BinTableHDU.from_columns([acol])
tbhdu.writeto(self.temp('newtable.fits'), overwrite=True)
with fits.open(self.temp('newtable.fits')) as hdul:
assert hdul[1].columns[0].format.endswith('A(3)')
for j in range(3):
for i in range(len(a[j])):
assert hdul[1].data.field(0)[j][i] == a[j][i]
for code in ('PA()', 'QA()'):
test(code)
def test_getdata_vla(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/200"""
def test(format_code):
col = fits.Column(name='QUAL_SPE', format=format_code,
array=[np.arange(1572)] * 225)
tb_hdu = fits.BinTableHDU.from_columns([col])
pri_hdu = fits.PrimaryHDU()
hdu_list = fits.HDUList([pri_hdu, tb_hdu])
hdu_list.writeto(self.temp('toto.fits'), overwrite=True)
data = fits.getdata(self.temp('toto.fits'))
# Need to compare to the original data row by row since the FITS_rec
# returns an array of _VLA objects
for row_a, row_b in zip(data['QUAL_SPE'], col.array):
assert (row_a == row_b).all()
for code in ('PJ()', 'QJ()'):
test(code)
@pytest.mark.skipif(not NUMPY_LT_1_22 and NUMPY_LT_1_22_1 and sys.platform == 'win32',
reason='https://github.com/numpy/numpy/issues/20699')
def test_copy_vla(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/47
"""
# Make a file containing a couple of VLA tables
arr1 = [np.arange(n + 1) for n in range(255)]
arr2 = [np.arange(255, 256 + n) for n in range(255)]
# A dummy non-VLA column needed to reproduce issue #47
c = fits.Column('test', format='J', array=np.arange(255))
c1 = fits.Column('A', format='PJ', array=arr1)
c2 = fits.Column('B', format='PJ', array=arr2)
t1 = fits.BinTableHDU.from_columns([c, c1])
t2 = fits.BinTableHDU.from_columns([c, c2])
hdul = fits.HDUList([fits.PrimaryHDU(), t1, t2])
hdul.writeto(self.temp('test.fits'), overwrite=True)
# Just test that the test file wrote out correctly
with fits.open(self.temp('test.fits')) as h:
assert h[1].header['TFORM2'] == 'PJ(255)'
assert h[2].header['TFORM2'] == 'PJ(255)'
assert comparerecords(h[1].data, t1.data)
assert comparerecords(h[2].data, t2.data)
# Try copying the second VLA and writing to a new file
with fits.open(self.temp('test.fits')) as h:
new_hdu = fits.BinTableHDU(data=h[2].data, header=h[2].header)
new_hdu.writeto(self.temp('test3.fits'))
with fits.open(self.temp('test3.fits')) as h2:
assert comparerecords(h2[1].data, t2.data)
new_hdul = fits.HDUList([fits.PrimaryHDU()])
new_hdul.writeto(self.temp('test2.fits'))
# Open several copies of the test file and append copies of the second
# VLA table
with fits.open(self.temp('test2.fits'), mode='append') as new_hdul:
for _ in range(2):
with fits.open(self.temp('test.fits')) as h:
new_hdul.append(h[2])
new_hdul.flush()
# Test that all the VLA copies wrote correctly
with fits.open(self.temp('test2.fits')) as new_hdul:
for idx in range(1, 3):
assert comparerecords(new_hdul[idx].data, t2.data)
def test_vla_with_gap(self):
hdul = fits.open(self.data('theap-gap.fits'))
data = hdul[1].data
assert data.shape == (500,)
assert data['i'][497] == 497
assert np.array_equal(data['arr'][497], [0, 1, 2, 3, 4])
hdul.close()
def test_tolist(self):
col = fits.Column(
name='var', format='PI()',
array=np.array([[1, 2, 3], [11, 12]], dtype=np.object_))
hdu = fits.BinTableHDU.from_columns([col])
assert hdu.data.tolist() == [[[1, 2, 3]], [[11, 12]]]
assert hdu.data['var'].tolist() == [[1, 2, 3], [11, 12]]
def test_tolist_from_file(self):
filename = self.data('variable_length_table.fits')
with fits.open(filename) as hdul:
hdu = hdul[1]
assert hdu.data.tolist() == [[[45, 56], [11, 3]], [[11, 12, 13], [12, 4]]]
assert hdu.data['var'].tolist() == [[45, 56], [11, 12, 13]]
# These are tests that solely test the Column and ColDefs interfaces and
# related functionality without directly involving full tables; currently there
# are few of these but I expect there to be more as I improve the test coverage
class TestColumnFunctions(FitsTestCase):
def test_column_format_interpretation(self):
"""
Test to ensure that when Numpy-style record formats are passed in to
the Column constructor for the format argument, they are recognized so
long as it's unambiguous (where "unambiguous" here is questionable
since Numpy is case insensitive when parsing the format codes. But
their "proper" case is lower-case, so we can accept that. Basically,
actually, any key in the NUMPY2FITS dict should be accepted.
"""
for recformat, fitsformat in NUMPY2FITS.items():
c = fits.Column('TEST', np.dtype(recformat))
c.format == fitsformat
c = fits.Column('TEST', recformat)
c.format == fitsformat
c = fits.Column('TEST', fitsformat)
c.format == fitsformat
# Test a few cases that are ambiguous in that they *are* valid binary
# table formats though not ones that are likely to be used, but are
# also valid common ASCII table formats
c = fits.Column('TEST', 'I4')
assert c.format == 'I4'
assert c.format.format == 'I'
assert c.format.width == 4
c = fits.Column('TEST', 'F15.8')
assert c.format == 'F15.8'
assert c.format.format == 'F'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'E15.8')
assert c.format.format == 'E'
assert c.format.width == 15
assert c.format.precision == 8
c = fits.Column('TEST', 'D15.8')
assert c.format.format == 'D'
assert c.format.width == 15
assert c.format.precision == 8
# zero-precision should be allowed as well, for float types
# https://github.com/astropy/astropy/issues/3422
c = fits.Column('TEST', 'F10.0')
assert c.format.format == 'F'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'E10.0')
assert c.format.format == 'E'
assert c.format.width == 10
assert c.format.precision == 0
c = fits.Column('TEST', 'D10.0')
assert c.format.format == 'D'
assert c.format.width == 10
assert c.format.precision == 0
# These are a couple cases where the format code is a valid binary
# table format, and is not strictly a valid ASCII table format but
# could be *interpreted* as one by appending a default width. This
# will only happen either when creating an ASCII table or when
# explicitly specifying ascii=True when the column is created
c = fits.Column('TEST', 'I')
assert c.format == 'I'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I', ascii=True)
assert c.format == 'I10'
assert c.format.recformat == 'i4'
# With specified widths, integer precision should be set appropriately
c = fits.Column('TEST', 'I4', ascii=True)
assert c.format == 'I4'
assert c.format.recformat == 'i2'
c = fits.Column('TEST', 'I9', ascii=True)
assert c.format == 'I9'
assert c.format.recformat == 'i4'
c = fits.Column('TEST', 'I12', ascii=True)
assert c.format == 'I12'
assert c.format.recformat == 'i8'
c = fits.Column('TEST', 'E')
assert c.format == 'E'
assert c.format.recformat == 'f4'
c = fits.Column('TEST', 'E', ascii=True)
assert c.format == 'E15.7'
# F is not a valid binary table format so it should be unambiguously
# treated as an ASCII column
c = fits.Column('TEST', 'F')
assert c.format == 'F16.7'
c = fits.Column('TEST', 'D')
assert c.format == 'D'
assert c.format.recformat == 'f8'
c = fits.Column('TEST', 'D', ascii=True)
assert c.format == 'D25.17'
def test_zero_precision_float_column(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3422
"""
c = fits.Column('TEST', 'F5.0', array=[1.1, 2.2, 3.3])
# The decimal places will be clipped
t = fits.TableHDU.from_columns([c])
t.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['TFORM1'] == 'F5.0'
assert hdul[1].data['TEST'].dtype == np.dtype('float64')
assert np.all(hdul[1].data['TEST'] == [1.0, 2.0, 3.0])
# Check how the raw data looks
raw = np.rec.recarray.field(hdul[1].data, 'TEST')
assert raw.tobytes() == b' 1. 2. 3.'
def test_column_array_type_mismatch(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218"""
arr = [-99] * 20
col = fits.Column('mag', format='E', array=arr)
assert (arr == col.array).all()
def test_new_coldefs_with_invalid_seqence(self):
"""Test that a TypeError is raised when a ColDefs is instantiated with
a sequence of non-Column objects.
"""
pytest.raises(TypeError, fits.ColDefs, [1, 2, 3])
def test_coldefs_init_from_array(self):
"""Test that ColDefs._init_from_array works with single element data-
types as well as multi-element data-types
"""
nd_array = np.ndarray((1,), dtype=[('A', '<u4', (2,)), ('B', '>u2')])
col_defs = fits.column.ColDefs(nd_array)
assert 2**31 == col_defs['A'].bzero
assert 2**15 == col_defs['B'].bzero
def test_pickle(self):
"""
Regression test for https://github.com/astropy/astropy/issues/1597
Tests for pickling FITS_rec objects
"""
# open existing FITS tables (images pickle by default, no test needed):
with fits.open(self.data('tb.fits')) as btb:
# Test column array is delayed and can pickle
assert isinstance(btb[1].columns._arrays[0], Delayed)
btb_pd = pickle.dumps(btb[1].data)
btb_pl = pickle.loads(btb_pd)
# It should not be delayed any more
assert not isinstance(btb[1].columns._arrays[0], Delayed)
assert comparerecords(btb_pl, btb[1].data)
with fits.open(self.data('ascii.fits')) as asc:
asc_pd = pickle.dumps(asc[1].data)
asc_pl = pickle.loads(asc_pd)
assert comparerecords(asc_pl, asc[1].data)
with fits.open(self.data('random_groups.fits')) as rgr:
rgr_pd = pickle.dumps(rgr[0].data)
rgr_pl = pickle.loads(rgr_pd)
assert comparerecords(rgr_pl, rgr[0].data)
with fits.open(self.data('zerowidth.fits')) as zwc:
# Doesn't pickle zero-width (_phanotm) column 'ORBPARM'
zwc_pd = pickle.dumps(zwc[2].data)
zwc_pl = pickle.loads(zwc_pd)
with pytest.warns(UserWarning, match=r'Field 2 has a repeat count '
r'of 0 in its format code'):
assert comparerecords(zwc_pl, zwc[2].data)
def test_column_lookup_by_name(self):
"""Tests that a `ColDefs` can be indexed by column name."""
a = fits.Column(name='a', format='D')
b = fits.Column(name='b', format='D')
cols = fits.ColDefs([a, b])
assert cols['a'] == cols[0]
assert cols['b'] == cols[1]
def test_column_attribute_change_after_removal(self):
"""
This is a test of the column attribute change notification system.
After a column has been removed from a table (but other references
are kept to that same column) changes to that column's attributes
should not trigger a notification on the table it was removed from.
"""
# One way we can check this is to ensure there are no further changes
# to the header
table = fits.BinTableHDU.from_columns([
fits.Column('a', format='D'),
fits.Column('b', format='D')])
b = table.columns['b']
table.columns.del_col('b')
assert table.data.dtype.names == ('a',)
b.name = 'HELLO'
assert b.name == 'HELLO'
assert 'TTYPE2' not in table.header
assert table.header['TTYPE1'] == 'a'
assert table.columns.names == ['a']
with pytest.raises(KeyError):
table.columns['b']
# Make sure updates to the remaining column still work
table.columns.change_name('a', 'GOODBYE')
with pytest.raises(KeyError):
table.columns['a']
assert table.columns['GOODBYE'].name == 'GOODBYE'
assert table.data.dtype.names == ('GOODBYE',)
assert table.columns.names == ['GOODBYE']
assert table.data.columns.names == ['GOODBYE']
table.columns['GOODBYE'].name = 'foo'
with pytest.raises(KeyError):
table.columns['GOODBYE']
assert table.columns['foo'].name == 'foo'
assert table.data.dtype.names == ('foo',)
assert table.columns.names == ['foo']
assert table.data.columns.names == ['foo']
def test_x_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the X (bit array) format can be deep-copied.
"""
c = fits.Column('xcol', format='5X', array=[1, 0, 0, 1, 0])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array == c.array)
def test_p_column_deepcopy(self):
"""
Regression test for https://github.com/astropy/astropy/pull/4514
Tests that columns with the P/Q formats (variable length arrays) can be
deep-copied.
"""
c = fits.Column('pcol', format='PJ', array=[[1, 2], [3, 4, 5]])
c2 = copy.deepcopy(c)
assert c2.name == c.name
assert c2.format == c.format
assert np.all(c2.array[0] == c.array[0])
assert np.all(c2.array[1] == c.array[1])
c3 = fits.Column('qcol', format='QJ', array=[[1, 2], [3, 4, 5]])
c4 = copy.deepcopy(c3)
assert c4.name == c3.name
assert c4.format == c3.format
assert np.all(c4.array[0] == c3.array[0])
assert np.all(c4.array[1] == c3.array[1])
def test_column_verify_keywords(self):
"""
Test that the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
are verified to have a valid value.
"""
with pytest.raises(AssertionError) as err:
_ = fits.Column(1, format='I', array=[1, 2, 3, 4, 5])
assert 'Column name must be a string able to fit' in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=0, null='Nan', disp=1, coord_type=1,
coord_unit=2, coord_inc='1', time_ref_pos=1,
coord_ref_point='1', coord_ref_value='1')
err_msgs = ['keyword arguments to Column were invalid',
'TFORM', 'TNULL', 'TDISP', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS']
for msg in err_msgs:
assert msg in str(err.value)
def test_column_verify_start(self):
"""
Regression test for https://github.com/astropy/astropy/pull/6359
Test the validation of the column start position option (ASCII table only),
corresponding to ``TBCOL`` keyword.
Test whether the VerifyError message generated is the one with highest priority,
i.e. the order of error messages to be displayed is maintained.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='B', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) is not allowed for binary table columns" in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='a', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got 'a')." in str(err.value)
with pytest.raises(VerifyError) as err:
_ = fits.Column('a', format='I', start='-56', array=[1, 2, 3])
assert "start option (TBCOLn) must be a positive integer (got -56)." in str(err.value)
@pytest.mark.parametrize('keys',
[{'TFORM': 'Z', 'TDISP': 'E'},
{'TFORM': '2', 'TDISP': '2E'},
{'TFORM': 3, 'TDISP': 6.3},
{'TFORM': float, 'TDISP': np.float64},
{'TFORM': '', 'TDISP': 'E.5'}])
def test_column_verify_formats(self, keys):
"""
Additional tests for verification of 'TFORM' and 'TDISP' keyword
arguments used to initialize a Column.
"""
with pytest.raises(VerifyError) as err:
_ = fits.Column('col', format=keys['TFORM'], disp=keys['TDISP'])
for key in keys.keys():
assert key in str(err.value)
assert str(keys[key]) in str(err.value)
def test_regression_5383():
# Regression test for an undefined variable
x = np.array([1, 2, 3])
col = fits.Column(name='a', array=x, format='E')
hdu = fits.BinTableHDU.from_columns([col])
del hdu._header['TTYPE1']
hdu.columns[0].name = 'b'
def test_table_to_hdu():
from astropy.table import Table
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
table.meta['foo'] = 'bar'
with pytest.warns(UnitsWarning, match="'not-a-unit' did not parse as"
" fits unit") as w:
hdu = fits.BinTableHDU(table, header=fits.Header({'TEST': 1}))
assert len(w) == 1
for name in 'abc':
assert np.array_equal(table[name], hdu.data[name])
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert hdu.header['FOO'] == 'bar'
assert hdu.header['TEST'] == 1
def test_regression_scalar_indexing():
# Indexing a FITS_rec with a tuple that returns a scalar record
# should work
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(fits.FITS_rec)
x1a = x[1]
# this should succeed.
x1b = x[(1,)]
# FITS_record does not define __eq__; so test elements.
assert all(a == b for a, b in zip(x1a, x1b))
def test_new_column_attributes_preserved(tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/7145
# This makes sure that for now we don't clear away keywords that have
# newly been recognized (in Astropy 3.0) as special column attributes but
# instead just warn that we might do so in future. The new keywords are:
# TCTYP, TCUNI, TCRPX, TCRVL, TCDLT, TRPOS
col = []
col.append(fits.Column(name="TIME", format="1E", unit="s"))
col.append(fits.Column(name="RAWX", format="1I", unit="pixel"))
col.append(fits.Column(name="RAWY", format="1I"))
cd = fits.ColDefs(col)
hdr = fits.Header()
# Keywords that will get ignored in favor of these in the data
hdr['TUNIT1'] = 'pixel'
hdr['TUNIT2'] = 'm'
hdr['TUNIT3'] = 'm'
# Keywords that were added in Astropy 3.0 that should eventually be
# ignored and set on the data instead
hdr['TCTYP2'] = 'RA---TAN'
hdr['TCTYP3'] = 'ANGLE'
hdr['TCRVL2'] = -999.0
hdr['TCRVL3'] = -999.0
hdr['TCRPX2'] = 1.0
hdr['TCRPX3'] = 1.0
hdr['TALEN2'] = 16384
hdr['TALEN3'] = 1024
hdr['TCUNI2'] = 'angstrom'
hdr['TCUNI3'] = 'deg'
# Other non-relevant keywords
hdr['RA'] = 1.5
hdr['DEC'] = 3.0
with pytest.warns(AstropyDeprecationWarning) as warning_list:
hdu = fits.BinTableHDU.from_columns(cd, hdr)
assert str(warning_list[0].message).startswith(
"The following keywords are now recognized as special")
# First, check that special keywords such as TUNIT are ignored in the header
# We may want to change that behavior in future, but this is the way it's
# been for a while now.
assert hdu.columns[0].unit == 's'
assert hdu.columns[1].unit == 'pixel'
assert hdu.columns[2].unit is None
assert hdu.header['TUNIT1'] == 's'
assert hdu.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu.columns[0].coord_type is None
assert hdu.columns[1].coord_type is None
assert hdu.columns[2].coord_type is None
assert 'TCTYP1' not in hdu.header
assert hdu.header['TCTYP2'] == 'RA---TAN'
assert hdu.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu.header['RA'] == 1.5
assert hdu.header['DEC'] == 3.0
# Now we can write this HDU to a file and re-load. Re-loading *should*
# cause the special column attribtues to be picked up (it's just that when a
# header is manually specified, these values are ignored)
filename = tmpdir.join('test.fits').strpath
hdu.writeto(filename)
# Make sure we don't emit a warning in this case
with warnings.catch_warnings(record=True) as warning_list:
with fits.open(filename) as hdul:
hdu2 = hdul[1]
assert len(warning_list) == 0
# Check that column attributes are now correctly set
assert hdu2.columns[0].unit == 's'
assert hdu2.columns[1].unit == 'pixel'
assert hdu2.columns[2].unit is None
assert hdu2.header['TUNIT1'] == 's'
assert hdu2.header['TUNIT2'] == 'pixel'
assert 'TUNIT3' not in hdu2.header # TUNIT3 was removed
# Now, check that the new special keywords are actually still there
# but weren't used to set the attributes on the data
assert hdu2.columns[0].coord_type is None
assert hdu2.columns[1].coord_type == 'RA---TAN'
assert hdu2.columns[2].coord_type == 'ANGLE'
assert 'TCTYP1' not in hdu2.header
assert hdu2.header['TCTYP2'] == 'RA---TAN'
assert hdu2.header['TCTYP3'] == 'ANGLE'
# Make sure that other keywords are still there
assert hdu2.header['RA'] == 1.5
assert hdu2.header['DEC'] == 3.0
def test_empty_table(tmpdir):
ofile = str(tmpdir.join('emptytable.fits'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
ofile = str(tmpdir.join('emptytable.fits.gz'))
hdu = fits.BinTableHDU(header=None, data=None, name='TEST')
hdu.writeto(ofile, overwrite=True)
with fits.open(ofile) as hdul:
assert hdul['TEST'].data.size == 0
def test_a3dtable(tmpdir):
testfile = str(tmpdir.join('test.fits'))
hdu = fits.BinTableHDU.from_columns([
fits.Column(name='FOO', format='J', array=np.arange(10))
])
hdu.header['XTENSION'] = 'A3DTABLE'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].header['XTENSION'] == 'A3DTABLE'
with pytest.warns(AstropyUserWarning) as w:
hdul.verify('fix')
assert str(w[0].message) == 'Verification reported errors:'
assert str(w[2].message).endswith(
'Converted the XTENSION keyword to BINTABLE.')
assert hdul[1].header['XTENSION'] == 'BINTABLE'
def test_invalid_file(tmp_path):
hdu = fits.BinTableHDU()
# little trick to write an invalid card ...
hdu.header['FOO'] = None
hdu.header.cards['FOO']._value = np.nan
testfile = tmp_path / 'test.fits'
hdu.writeto(testfile, output_verify='ignore')
with fits.open(testfile) as hdul:
assert hdul[1].data is not None
def test_unit_parse_strict(tmp_path):
path = tmp_path / 'invalid_unit.fits'
# this is a unit parseable by the generic format but invalid for FITS
invalid_unit = '1 / (MeV sr s)'
unit = Unit(invalid_unit)
t = Table({'a': [1, 2, 3]})
t.write(path)
with fits.open(path, mode='update') as hdul:
hdul[1].header['TUNIT1'] = invalid_unit
# default is "warn"
with pytest.warns(UnitsWarning):
t = Table.read(path)
assert isinstance(t['a'].unit, UnrecognizedUnit)
t = Table.read(path, unit_parse_strict='silent')
assert isinstance(t['a'].unit, UnrecognizedUnit)
with pytest.raises(ValueError):
Table.read(path, unit_parse_strict='raise')
with pytest.warns(UnitsWarning):
Table.read(path, unit_parse_strict='warn')
|
989b772db36ab19b3fae3420f85d89125496787be45ad4ab01304c8bb12929af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
from astropy.table.column import MaskedColumn
import os
import copy
import sys
from io import StringIO
from contextlib import nullcontext
import pytest
import numpy as np
import yaml
from astropy.table import Table, Column, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_19_1
from astropy.io.ascii.ecsv import DELIMITERS, InvalidEcsvDatatypeWarning
from astropy.io import ascii
from astropy import units as u
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs
from .common import TEST_DIR
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if not hasattr(np, 'float128') or os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t['a'] = np.arange(24).reshape(2, 3, 4)
t['a'].info.description = 'description'
t['a'].info.meta = {1: 2}
t['b'] = [1, 2]
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t2['a'] == t['a'])
assert t2['a'].shape == t['a'].shape
assert t2['a'].dtype == t['a'].dtype
assert t2['a'].info.description == t['a'].info.description
assert t2['a'].info.meta == t['a'].info.meta
assert np.all(t2['b'] == t['b'])
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
# For a column that is a native astropy Column, ignore the specified
# `attrs`. This happens for a mixin like Quantity that is stored in a
# `Table` (not QTable).
if isinstance(obj1, Column):
attrs = []
assert obj1.shape == obj2.shape
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == 'f':
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
serialized_names = ['ang',
'cr.x', 'cr.y', 'cr.z',
'dt',
'el.x', 'el.y', 'el.z',
'lat',
'lon',
'nd',
'obj',
'qdb',
'qdex',
'qmag',
'sc.ra', 'sc.dec',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime',
'scdc.x', 'scdc.y', 'scdc.z',
'scdc.obstime',
'scpm.ra', 'scpm.dec', 'scpm.distance',
'scpm.pm_ra_cosdec', 'scpm.pm_dec',
'scpmrv.ra', 'scpmrv.dec', 'scpmrv.distance',
'scpmrv.pm_ra_cosdec', 'scpmrv.pm_dec',
'scpmrv.radial_velocity',
'scrv.ra', 'scrv.dec', 'scrv.distance',
'scrv.radial_velocity',
'sd.d_lon_coslat', 'sd.d_lat', 'sd.d_distance',
'sr.lon', 'sr.lat', 'sr.distance',
'srd.lon', 'srd.lat', 'srd.distance',
'srd.differentials.s.d_lon_coslat',
'srd.differentials.s.d_lat',
'srd.differentials.s.d_distance',
'tm', # serialize_method is formatted_value
'tm2', # serialize_method is formatted_value
'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2
'tm3.location.x', 'tm3.location.y', 'tm3.location.z',
'x']
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3 ** ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmpdir):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmpdir):
"""Same as prev but set the serialize_method to 'data_mask' so mask is written out"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'][0] = '' # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t['d'] = [1, 2, 3]
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmpdir):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = str(tmpdir.join('test.ecsv'))
unit = u.def_unit('bandpass_sol_lum')
t = table_cls()
t['l'] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
if table_cls is QTable:
assert np.all(t2['l'].value == t['l'].value)
else:
assert np.all(t2['l'] == t['l'])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3['l'].unit is unit
assert np.all(t3['l'] == t['l'])
# Just to be sure, aloso try writing with unit enabled.
filename2 = str(tmpdir.join('test2.ecsv'))
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4['l'].unit is unit
assert np.all(t4['l'] == t['l'])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format='ecsv')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize('serialize_method', ['null_value', 'data_mask'])
@pytest.mark.parametrize('dtype', [np.int64, np.float64, bool, str])
@pytest.mark.parametrize('delimiter', [',', ' '])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype('U2')
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t['a'] = col
t['b'] = ['x', 'y'] # Add another column for kicks
out = StringIO()
t.write(out, format='ascii.ecsv', serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], 'mask'):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('subtype', ['some-user-type', 'complex'])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'"):
t = ascii.read(txt, format='ecsv')
assert t['a'].dtype.kind == 'U'
assert t['a'][0] == '[1,2]'
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(ValueError, match="column 'a' failed to convert: shape mismatch"):
Table.read(txt, format='ascii.ecsv')
def test_write_not_json_serializable():
t = Table()
t['a'] = np.array([set([1, 2]), 1], dtype=object)
match = "could not convert column 'a' to string: Object of type set is not JSON serializable"
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format='ascii.ecsv')
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'object' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'][0] == "fail"
assert type(t['a'][1]) is str
assert type(t['a'].dtype) == np.dtype("O")
@pytest.mark.skipif(NUMPY_LT_1_19_1,
reason="numpy cannot parse 'complex' as string until 1.19+")
def test_read_complex():
"""Test an ECSV v1.0 file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'complex' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'].dtype.type is np.complex128
def test_read_str():
"""Test an ECSV file with a 'str' instead of 'string' datatype """
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: str}
# schema: astropy-2.0
a
sometext
S""" # also testing single character text
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'str' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert isinstance(t['a'][1], str)
assert isinstance(t['a'][0], np.str_)
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t['a'] = np.array([np.pi, 1/7], dtype=np.float64)
t['a'].info.format = '.2f'
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t['a'] == t2['a'])
assert t2['a'].info.format == '.2f'
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith('#')]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load('\n'.join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr['datatype'])
print(f'exps[{name!r}] =', fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols['scalar'] = np.array([1, 2], dtype=np.int16)
exps['scalar'] = [
{'datatype': 'int16', 'name': 'scalar'}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols['2-d variable array lists'] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps['2-d variable array lists'] = [
{'datatype': 'string',
'name': '2-d variable array lists',
'subtype': 'json'}]
# Array of numpy arrays that is a 2-d variable array
cols['2-d variable array numpy'] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps['2-d variable array numpy'] = [
{'datatype': 'string',
'name': '2-d variable array numpy',
'subtype': 'float32[2,null]'}]
cols['1-d variable array lists'] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps['1-d variable array lists'] = [
{'datatype': 'string',
'name': '1-d variable array lists',
'subtype': 'json'}]
# Variable-length array
cols['1-d variable array numpy'] = np.array(
[np.array([1, 2], dtype=np.uint8),
np.array([3, 4, 5], dtype=np.uint8)], dtype=object)
exps['1-d variable array numpy'] = [
{'datatype': 'string',
'name': '1-d variable array numpy',
'subtype': 'uint8[null]'}]
cols['1-d variable array numpy str'] = np.array(
[np.array(['a', 'b']),
np.array(['c', 'd', 'e'])], dtype=object)
exps['1-d variable array numpy str'] = [
{'datatype': 'string',
'name': '1-d variable array numpy str',
'subtype': 'string[null]'}]
cols['1-d variable array numpy bool'] = np.array(
[np.array([True, False]),
np.array([True, False, True])], dtype=object)
exps['1-d variable array numpy bool'] = [
{'datatype': 'string',
'name': '1-d variable array numpy bool',
'subtype': 'bool[null]'}]
cols['1-d regular array'] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps['1-d regular array'] = [
{'datatype': 'string',
'name': '1-d regular array',
'subtype': 'int8[2]'}]
cols['2-d regular array'] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps['2-d regular array'] = [
{'datatype': 'string',
'name': '2-d regular array',
'subtype': 'float16[2,2]'}]
cols['scalar object'] = np.array([{'a': 1}, {'b':2}], dtype=object)
exps['scalar object'] = [
{'datatype': 'string', 'name': 'scalar object', 'subtype': 'json'}]
cols['1-d object'] = np.array(
[[{'a': 1}, {'b':2}],
[{'a': 1}, {'b':2}]], dtype=object)
exps['1-d object'] = [
{'datatype': 'string',
'name': '1-d object',
'subtype': 'json[2]'}]
@pytest.mark.parametrize('name,col,exp',
list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns.
"""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr['datatype'] == exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, 'data', 'subtypes.ecsv'))
colnames = ('i_index,'
's_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,'
'f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,'
'v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,'
'm_int,m_double').split(',')
assert t.colnames == colnames
type_map = {'byte': 'int8',
'short': 'int16',
'int': 'int32',
'long': 'int64',
'float': 'float32',
'double': 'float64',
'string': 'str',
'boolean': 'bool'}
for col in t.itercols():
info = col.info
if info.name == 'i_index':
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == 's': # Scalar
assert col.shape == (16,)
if subtype == 'f': # Fixed array
assert col.shape == (16, 3)
if subtype == 'v': # Variable array
assert col.shape == (16,)
assert info.dtype.name == 'object'
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format='ascii.ecsv')
assert np.all(t['o'] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t['o'].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t['f'] == exp)
assert np.all(t['f'].mask == exp.mask)
assert np.all(t['v'][0] == [1])
assert np.all(t['v'][2] == [2, 3])
assert np.all(t['v'].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t['f'] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t['v'] = np.empty(2, dtype=object)
t['v'][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t['v'][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format='ascii.ecsv')
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name])
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t['col'].dtype.kind == 'U' # would be int with basic format
assert t['col'].description == 'hello'
|
19d16430b791324b5426aee75d4409c61b2705a1d6accd42bd950025adc4bcdc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
# This file connects ASDF to the astropy.table.Table class
import warnings
from astropy.io import registry as io_registry
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.table import Table
from astropy.utils.compat import optional_deps
def read_table(filename, data_key=None, find_table=None, **kwargs):
"""
Read a `~astropy.table.Table` object from an ASDF file
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will look for a Table object with the key of
``data`` in the top-level ASDF tree. The parameters ``data_key`` and
``find_key`` can be used to override the default behavior.
This function is registered as the Table reader for ASDF files with the
unified I/O interface.
Parameters
----------
filename : str or :class:`py.lath:local`
Name of the file to be read
data_key : str
Optional top-level key to use for finding the Table in the tree. If not
provided, uses ``data`` by default. Use of this parameter is not
compatible with ``find_table``.
find_table : function
Optional function to be used for locating the Table in the tree. The
function takes a single parameter, which is a dictionary representing
the top of the ASDF tree. The function must return a
`~astropy.table.Table` instance.
Returns
-------
table : `~astropy.table.Table`
`~astropy.table.Table` instance
"""
warnings.warn(create_asdf_deprecation_warning())
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and find_table:
raise ValueError("Options 'data_key' and 'find_table' are not compatible")
with asdf.open(filename, **kwargs) as af:
if find_table:
return find_table(af.tree)
else:
return af[data_key or 'data']
def write_table(table, filename, data_key=None, make_tree=None, **kwargs):
"""
Write a `~astropy.table.Table` object to an ASDF file.
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will write a Table object in the top-level ASDF
tree using the key of ``data``. The parameters ``data_key`` and
``make_tree`` can be used to override the default behavior.
This function is registered as the Table writer for ASDF files with the
unified I/O interface.
Parameters
----------
table : `~astropy.table.Table`
`~astropy.table.Table` instance to be written
filename : str or :class:`py.path:local`
Name of the new ASDF file to be created
data_key : str
Optional top-level key in the ASDF tree to use when writing the Table.
If not provided, uses ``data`` by default. Use of this parameter is not
compatible with ``make_tree``.
make_tree : function
Optional function to be used for creating the ASDF tree. The function
takes a single parameter, which is the `~astropy.table.Table` instance
to be written. The function must return a `dict` representing the ASDF
tree to be created.
"""
warnings.warn(create_asdf_deprecation_warning())
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and make_tree:
raise ValueError("Options 'data_key' and 'make_tree' are not compatible")
if make_tree:
tree = make_tree(table)
else:
tree = {data_key or 'data' : table}
with asdf.AsdfFile(tree) as af:
af.write_to(filename, **kwargs)
def asdf_identify(origin, filepath, fileobj, *args, **kwargs):
try:
import asdf
except ImportError:
return False
return filepath is not None and filepath.endswith('.asdf')
if not optional_deps.HAS_ASDF_ASTROPY:
io_registry.register_reader('asdf', Table, read_table)
io_registry.register_writer('asdf', Table, write_table)
io_registry.register_identifier('asdf', Table, asdf_identify)
|
d21492b23e3b12c70989f930504d70cc140a7868bc564b4369e399aeaf2b5c53 | from astropy.utils.exceptions import AstropyDeprecationWarning
def create_asdf_deprecation_warning():
return AstropyDeprecationWarning(
"ASDF functionality for astropy is being moved out of the astropy"
" package to the new asdf-astropy package. Please use this package"
" instead. astropy.io.misc.asdf is deprecated since astropy 5.1 and will be removed in a future release."
)
|
c75a871ef2a5c4ebd86649f6d47b3d834e6e7fffdf7007740d2b6fd42ddc2096 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import warnings
from asdf.types import CustomType, ExtensionTypeMeta
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
__all__ = ['AstropyType', 'AstropyAsdfType']
# Names of AstropyType or AstropyAsdfType subclasses that are base classes
# and aren't used directly for serialization.
_TYPE_BASE_CLASS_NAMES = {'PolynomialTypeBase'}
_astropy_types = set()
_astropy_asdf_types = set()
class AstropyTypeMeta(ExtensionTypeMeta):
"""
Keeps track of `AstropyType` subclasses that are created so that they can
be stored automatically by astropy extensions for ASDF.
"""
def __new__(mcls, name, bases, attrs):
cls = super().__new__(mcls, name, bases, attrs)
# Classes using this metaclass are automatically added to the list of
# astropy extensions
if cls.__name__ not in _TYPE_BASE_CLASS_NAMES:
if cls.organization == 'astropy.org' and cls.standard == 'astropy':
_astropy_types.add(cls)
elif cls.organization == 'stsci.edu' and cls.standard == 'asdf':
_astropy_asdf_types.add(cls)
return cls
class AstropyType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas and tags that are defined by
Astropy.
IMPORTANT: This parent class should **not** be used for types that have
schemas that are defined by the ASDF standard.
"""
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
class AstropyAsdfType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas that are defined in the ASDF
standard, but have tags that are implemented within astropy.
IMPORTANT: This parent class should **not** be used for types that also
have schemas that are defined by astropy.
"""
organization = 'stsci.edu'
standard = 'asdf'
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
|
1d101f79126a220bd419c64604b07a9c421e6089a1b5b5f50302eaccb9c746c0 | from pathlib import Path
from astropy.utils.introspection import minversion
def get_asdf_tests():
asdf_dir = Path(__file__).parent.resolve()
paths = Path(asdf_dir).rglob("test_*.py")
return [str(p.relative_to(asdf_dir)) for p in paths]
collect_ignore = get_asdf_tests()
try:
import asdf
except ImportError:
pass
else:
if not minversion(asdf, "3.0.0"):
collect_ignore = []
|
8508279ce8196001afe163c265858720bdea02c1701334d120c12d2efad21e3f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates.tests.helper import skycoord_equal as _skycoord_equal
from astropy.utils.decorators import deprecated
__all__ = ['skycoord_equal']
@deprecated("5.1", alternative="astropy.coordinates.tests.helper.skycoord_equal")
def skycoord_equal(sc1, sc2):
return _skycoord_equal(sc1, sc2)
|
39caa0483e1e12e176284e7ef93fbbd749c72240ec0c52a6738d364261a57c94 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy import table
from astropy.io import fits
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class FitsType:
name = 'fits/fits'
types = ['astropy.io.fits.HDUList']
requires = ['astropy']
@classmethod
def from_tree(cls, data, ctx):
hdus = []
first = True
for hdu_entry in data:
header = fits.Header([fits.Card(*x) for x in hdu_entry['header']])
data = hdu_entry.get('data')
if data is not None:
try:
data = data.__array__()
except ValueError:
data = None
if first:
hdu = fits.PrimaryHDU(data=data, header=header)
first = False
elif data.dtype.names is not None:
hdu = fits.BinTableHDU(data=data, header=header)
else:
hdu = fits.ImageHDU(data=data, header=header)
hdus.append(hdu)
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def to_tree(cls, hdulist, ctx):
units = []
for hdu in hdulist:
header_list = []
for card in hdu.header.cards:
if card.comment:
new_card = [card.keyword, card.value, card.comment]
else:
if card.value:
new_card = [card.keyword, card.value]
else:
if card.keyword:
new_card = [card.keyword]
else:
new_card = []
header_list.append(new_card)
hdu_dict = {}
hdu_dict['header'] = header_list
if hdu.data is not None:
if hdu.data.dtype.names is not None:
data = table.Table(hdu.data)
else:
data = hdu.data
hdu_dict['data'] = data
units.append(hdu_dict)
return units
@classmethod
def reserve_blocks(cls, data, ctx):
for hdu in data:
if hdu.data is not None:
yield ctx.blocks.find_or_create_block_for_array(hdu.data, ctx)
@classmethod
def assert_equal(cls, old, new):
for hdua, hdub in zip(old, new):
assert_array_equal(hdua.data, hdub.data)
for carda, cardb in zip(hdua.header.cards, hdub.header.cards):
assert tuple(carda) == tuple(cardb)
class AstropyFitsType(FitsType, AstropyType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by Astropy. It will be used by default when
writing new HDUs to ASDF files.
"""
class AsdfFitsType(FitsType, AstropyAsdfType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by the ASDF Standard. It will not be used by
default, except when reading files that use the ASDF Standard definition
rather than the one defined in Astropy. It will primarily be used for
backwards compatibility for reading older files. In the unlikely case that
another ASDF implementation uses the FITS schema from the ASDF Standard,
this tag could also be used to read a file it generated.
"""
|
3d8a23982cc506efaeaf6b72b7e47b15dea65c4ed222d21529413d369d1231b4 | from astropy.modeling.models import Spline1D
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['SplineType']
class SplineType(TransformType):
name = 'transform/spline1d'
version = '1.0.0'
types = ['astropy.modeling.spline.Spline1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return Spline1D(knots=node['knots'],
coeffs=node['coefficients'],
degree=node['degree'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {
"knots": model.t,
"coefficients": model.c,
"degree": model.degree
}
|
1463919b5128564bbc9bb91588efd173824db293d3e873ac5d5591f990499a7a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.modeling.math_functions import __all__ as math_classes
from astropy.modeling.math_functions import *
from astropy.modeling import math_functions
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['NpUfuncType']
class NpUfuncType(TransformType):
name = "transform/math_functions"
version = '1.0.0'
types = ['astropy.modeling.math_functions.'+ kl for kl in math_classes]
@classmethod
def from_tree_transform(cls, node, ctx):
klass_name = math_functions._make_class_name(node['func_name'])
klass = getattr(math_functions, klass_name)
return klass()
@classmethod
def to_tree_transform(cls, model, ctx):
return {'func_name': model.func.__name__}
|
eb7a0f9124f9e29e96e7bfea06f321ada27672c33d913bf19876968c4b536a36 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import functional_models
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['AiryDisk2DType', 'Box1DType', 'Box2DType',
'Disk2DType', 'Ellipse2DType', 'Exponential1DType',
'Gaussian1DType', 'Gaussian2DType', 'KingProjectedAnalytic1DType',
'Logarithmic1DType', 'Lorentz1DType', 'Moffat1DType',
'Moffat2DType', 'Planar2D', 'RedshiftScaleFactorType',
'RickerWavelet1DType', 'RickerWavelet2DType', 'Ring2DType',
'Sersic1DType', 'Sersic2DType',
'Sine1DType', 'Cosine1DType', 'Tangent1DType',
'ArcSine1DType', 'ArcCosine1DType', 'ArcTangent1DType',
'Trapezoid1DType', 'TrapezoidDisk2DType', 'Voigt1DType']
class AiryDisk2DType(TransformType):
name = 'transform/airy_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.AiryDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.AiryDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
radius=node['radius'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'radius': _parameter_to_value(model.radius)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.AiryDisk2D) and
isinstance(b, functional_models.AiryDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.radius, b.radius)
class Box1DType(TransformType):
name = 'transform/box1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box1D) and
isinstance(b, functional_models.Box1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
class Box2DType(TransformType):
name = 'transform/box2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box2D(amplitude=node['amplitude'],
x_0=node['x_0'],
x_width=node['x_width'],
y_0=node['y_0'],
y_width=node['y_width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'x_width': _parameter_to_value(model.x_width),
'y_0': _parameter_to_value(model.y_0),
'y_width': _parameter_to_value(model.y_width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box2D) and
isinstance(b, functional_models.Box2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.x_width, b.x_width)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.y_width, b.y_width)
class Disk2DType(TransformType):
name = 'transform/disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Disk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Disk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Disk2D) and
isinstance(b, functional_models.Disk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
class Ellipse2DType(TransformType):
name = 'transform/ellipse2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ellipse2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ellipse2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
a=node['a'],
b=node['b'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'a': _parameter_to_value(model.a),
'b': _parameter_to_value(model.b),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ellipse2D) and
isinstance(b, functional_models.Ellipse2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.a, b.a)
assert_array_equal(a.b, b.b)
assert_array_equal(a.theta, b.theta)
class Exponential1DType(TransformType):
name = 'transform/exponential1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Exponential1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Exponential1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Exponential1D) and
isinstance(b, functional_models.Exponential1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Gaussian1DType(TransformType):
name = 'transform/gaussian1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian1D(amplitude=node['amplitude'],
mean=node['mean'],
stddev=node['stddev'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'mean': _parameter_to_value(model.mean),
'stddev': _parameter_to_value(model.stddev)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian1D) and
isinstance(b, functional_models.Gaussian1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.mean, b.mean)
assert_array_equal(a.stddev, b.stddev)
class Gaussian2DType(TransformType):
name = 'transform/gaussian2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian2D(amplitude=node['amplitude'],
x_mean=node['x_mean'],
y_mean=node['y_mean'],
x_stddev=node['x_stddev'],
y_stddev=node['y_stddev'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_mean': _parameter_to_value(model.x_mean),
'y_mean': _parameter_to_value(model.y_mean),
'x_stddev': _parameter_to_value(model.x_stddev),
'y_stddev': _parameter_to_value(model.y_stddev),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian2D) and
isinstance(b, functional_models.Gaussian2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_mean, b.x_mean)
assert_array_equal(a.y_mean, b.y_mean)
assert_array_equal(a.x_stddev, b.x_stddev)
assert_array_equal(a.y_stddev, b.y_stddev)
assert_array_equal(a.theta, b.theta)
class KingProjectedAnalytic1DType(TransformType):
name = 'transform/king_projected_analytic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.KingProjectedAnalytic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.KingProjectedAnalytic1D(
amplitude=node['amplitude'],
r_core=node['r_core'],
r_tide=node['r_tide'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_core': _parameter_to_value(model.r_core),
'r_tide': _parameter_to_value(model.r_tide)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.KingProjectedAnalytic1D) and
isinstance(b, functional_models.KingProjectedAnalytic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_core, b.r_core)
assert_array_equal(a.r_tide, b.r_tide)
class Logarithmic1DType(TransformType):
name = 'transform/logarithmic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Logarithmic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Logarithmic1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Logarithmic1D) and
isinstance(b, functional_models.Logarithmic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Lorentz1DType(TransformType):
name = 'transform/lorentz1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Lorentz1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Lorentz1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Lorentz1D) and
isinstance(b, functional_models.Lorentz1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Moffat1DType(TransformType):
name = 'transform/moffat1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat1D(amplitude=node['amplitude'],
x_0=node['x_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat1D) and
isinstance(b, functional_models.Moffat1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Moffat2DType(TransformType):
name = 'transform/moffat2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat2D) and
isinstance(b, functional_models.Moffat2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Planar2D(TransformType):
name = 'transform/planar2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Planar2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Planar2D(slope_x=node['slope_x'],
slope_y=node['slope_y'],
intercept=node['intercept'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'slope_x': _parameter_to_value(model.slope_x),
'slope_y': _parameter_to_value(model.slope_y),
'intercept': _parameter_to_value(model.intercept)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Planar2D) and
isinstance(b, functional_models.Planar2D))
assert_array_equal(a.slope_x, b.slope_x)
assert_array_equal(a.slope_y, b.slope_y)
assert_array_equal(a.intercept, b.intercept)
class RedshiftScaleFactorType(TransformType):
name = 'transform/redshift_scale_factor'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RedshiftScaleFactor']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RedshiftScaleFactor(z=node['z'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'z': _parameter_to_value(model.z)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RedshiftScaleFactor) and
isinstance(b, functional_models.RedshiftScaleFactor))
assert_array_equal(a.z, b.z)
class RickerWavelet1DType(TransformType):
name = 'transform/ricker_wavelet1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet1D(amplitude=node['amplitude'],
x_0=node['x_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet1D) and
isinstance(b, functional_models.RickerWavelet1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.sigma, b.sigma)
class RickerWavelet2DType(TransformType):
name = 'transform/ricker_wavelet2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet2D) and
isinstance(b, functional_models.RickerWavelet2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.sigma, b.sigma)
class Ring2DType(TransformType):
name = 'transform/ring2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ring2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ring2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
r_in=node['r_in'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'r_in': _parameter_to_value(model.r_in),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ring2D) and
isinstance(b, functional_models.Ring2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.r_in, b.r_in)
assert_array_equal(a.width, b.width)
class Sersic1DType(TransformType):
name = 'transform/sersic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic1D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic1D) and
isinstance(b, functional_models.Sersic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
class Sersic2DType(TransformType):
name = 'transform/sersic2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic2D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'],
x_0=node['x_0'],
y_0=node['y_0'],
ellip=node['ellip'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'ellip': _parameter_to_value(model.ellip),
'theta': _parameter_to_value(model.theta)
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic2D) and
isinstance(b, functional_models.Sersic2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.ellip, b.ellip)
assert_array_equal(a.theta, b.theta)
class Trigonometric1DType(TransformType):
_model = None
@classmethod
def from_tree_transform(cls, node, ctx):
return cls._model(amplitude=node['amplitude'],
frequency=node['frequency'],
phase=node['phase'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'frequency': _parameter_to_value(model.frequency),
'phase': _parameter_to_value(model.phase)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, cls._model) and
isinstance(b, cls._model))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.frequency, b.frequency)
assert_array_equal(a.phase, b.phase)
class Sine1DType(Trigonometric1DType):
name = 'transform/sine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sine1D']
_model = functional_models.Sine1D
class Cosine1DType(Trigonometric1DType):
name = 'transform/cosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Cosine1D']
_model = functional_models.Cosine1D
class Tangent1DType(Trigonometric1DType):
name = 'transform/tangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Tangent1D']
_model = functional_models.Tangent1D
class ArcSine1DType(Trigonometric1DType):
name = 'transform/arcsine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcSine1D']
_model = functional_models.ArcSine1D
class ArcCosine1DType(Trigonometric1DType):
name = 'transform/arccosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcCosine1D']
_model = functional_models.ArcCosine1D
class ArcTangent1DType(Trigonometric1DType):
name = 'transform/arctangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcTangent1D']
_model = functional_models.ArcTangent1D
class Trapezoid1DType(TransformType):
name = 'transform/trapezoid1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Trapezoid1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Trapezoid1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Trapezoid1D) and
isinstance(b, functional_models.Trapezoid1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
assert_array_equal(a.slope, b.slope)
class TrapezoidDisk2DType(TransformType):
name = 'transform/trapezoid_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.TrapezoidDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.TrapezoidDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.TrapezoidDisk2D) and
isinstance(b, functional_models.TrapezoidDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
assert_array_equal(a.slope, b.slope)
class Voigt1DType(TransformType):
name = 'transform/voigt1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Voigt1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Voigt1D(x_0=node['x_0'],
amplitude_L=node['amplitude_L'],
fwhm_L=node['fwhm_L'],
fwhm_G=node['fwhm_G'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'x_0': _parameter_to_value(model.x_0),
'amplitude_L': _parameter_to_value(model.amplitude_L),
'fwhm_L': _parameter_to_value(model.fwhm_L),
'fwhm_G': _parameter_to_value(model.fwhm_G)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Voigt1D) and
isinstance(b, functional_models.Voigt1D))
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.amplitude_L, b.amplitude_L)
assert_array_equal(a.fwhm_L, b.fwhm_L)
assert_array_equal(a.fwhm_G, b.fwhm_G)
|
249b400dd91b33fdb7d262ada437fa44da4e9eda8e78a7a5784f137dcaefb83e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['AffineType', 'Rotate2DType', 'Rotate3DType',
'RotationSequenceType']
class AffineType(TransformType):
name = "transform/affine"
version = '1.3.0'
types = ['astropy.modeling.projections.AffineTransformation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
matrix = node['matrix']
translation = node['translation']
if matrix.shape != (2, 2):
raise NotImplementedError(
"asdf currently only supports 2x2 (2D) rotation transformation "
"matrices")
if translation.shape != (2,):
raise NotImplementedError(
"asdf currently only supports 2D translation transformations.")
return modeling.projections.AffineTransformation2D(
matrix=matrix, translation=translation)
@classmethod
def to_tree_transform(cls, model, ctx):
return {'matrix': _parameter_to_value(model.matrix),
'translation': _parameter_to_value(model.translation)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (a.__class__ == b.__class__)
assert_array_equal(a.matrix, b.matrix)
assert_array_equal(a.translation, b.translation)
class Rotate2DType(TransformType):
name = "transform/rotate2d"
version = '1.3.0'
types = ['astropy.modeling.rotations.Rotation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return modeling.rotations.Rotation2D(node['angle'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {'angle': _parameter_to_value(model.angle)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.rotations.Rotation2D) and
isinstance(b, modeling.rotations.Rotation2D))
assert_array_equal(a.angle, b.angle)
class Rotate3DType(TransformType):
name = "transform/rotate3d"
version = '1.3.0'
types = ['astropy.modeling.rotations.RotateNative2Celestial',
'astropy.modeling.rotations.RotateCelestial2Native',
'astropy.modeling.rotations.EulerAngleRotation']
@classmethod
def from_tree_transform(cls, node, ctx):
if node['direction'] == 'native2celestial':
return modeling.rotations.RotateNative2Celestial(node["phi"],
node["theta"],
node["psi"])
elif node['direction'] == 'celestial2native':
return modeling.rotations.RotateCelestial2Native(node["phi"],
node["theta"],
node["psi"])
else:
return modeling.rotations.EulerAngleRotation(node["phi"],
node["theta"],
node["psi"],
axes_order=node["direction"])
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.rotations.RotateNative2Celestial):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "native2celestial"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "native2celestial"
}
elif isinstance(model, modeling.rotations.RotateCelestial2Native):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "celestial2native"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "celestial2native"
}
else:
node = {"phi": _parameter_to_value(model.phi),
"theta": _parameter_to_value(model.theta),
"psi": _parameter_to_value(model.psi),
"direction": model.axes_order
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
if a.__class__.__name__ == "EulerAngleRotation":
assert_array_equal(a.phi, b.phi)
assert_array_equal(a.psi, b.psi)
assert_array_equal(a.theta, b.theta)
else:
assert_array_equal(a.lon, b.lon)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon_pole, b.lon_pole)
class RotationSequenceType(TransformType):
name = "transform/rotate_sequence_3d"
types = ['astropy.modeling.rotations.RotationSequence3D',
'astropy.modeling.rotations.SphericalRotationSequence']
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
angles = node['angles']
axes_order = node['axes_order']
rotation_type = node['rotation_type']
if rotation_type == 'cartesian':
return modeling.rotations.RotationSequence3D(angles, axes_order=axes_order)
elif rotation_type == 'spherical':
return modeling.rotations.SphericalRotationSequence(angles, axes_order=axes_order)
else:
raise ValueError(f"Unrecognized rotation_type: {rotation_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'angles': list(model.angles.value)}
node['axes_order'] = model.axes_order
if isinstance(model, modeling.rotations.SphericalRotationSequence):
node['rotation_type'] = "spherical"
elif isinstance(model, modeling.rotations.RotationSequence3D):
node['rotation_type'] = "cartesian"
else:
raise ValueError(f"Cannot serialize model of type {type(model)}")
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.__class__.__name__ == b.__class__.__name__
assert_array_equal(a.angles, b.angles)
assert a.axes_order == b.axes_order
class GenericProjectionType(TransformType):
@classmethod
def from_tree_transform(cls, node, ctx):
args = []
for param_name, default in cls.params:
args.append(node.get(param_name, default))
if node['direction'] == 'pix2sky':
return cls.types[0](*args)
else:
return cls.types[1](*args)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if isinstance(model, cls.types[0]):
node['direction'] = 'pix2sky'
else:
node['direction'] = 'sky2pix'
for param_name, default in cls.params:
val = getattr(model, param_name).value
if val != default:
node[param_name] = val
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
_generic_projections = {
'zenithal_perspective': ('ZenithalPerspective', (('mu', 0.0), ('gamma', 0.0)), '1.3.0'),
'gnomonic': ('Gnomonic', (), None),
'stereographic': ('Stereographic', (), None),
'slant_orthographic': ('SlantOrthographic', (('xi', 0.0), ('eta', 0.0)), None),
'zenithal_equidistant': ('ZenithalEquidistant', (), None),
'zenithal_equal_area': ('ZenithalEqualArea', (), None),
'airy': ('Airy', (('theta_b', 90.0),), '1.2.0'),
'cylindrical_perspective': ('CylindricalPerspective', (('mu', 0.0), ('lam', 0.0)), '1.3.0'),
'cylindrical_equal_area': ('CylindricalEqualArea', (('lam', 0.0),), '1.3.0'),
'plate_carree': ('PlateCarree', (), None),
'mercator': ('Mercator', (), None),
'sanson_flamsteed': ('SansonFlamsteed', (), None),
'parabolic': ('Parabolic', (), None),
'molleweide': ('Molleweide', (), None),
'hammer_aitoff': ('HammerAitoff', (), None),
'conic_perspective': ('ConicPerspective', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equal_area': ('ConicEqualArea', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equidistant': ('ConicEquidistant', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_orthomorphic': ('ConicOrthomorphic', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'bonne_equal_area': ('BonneEqualArea', (('theta1', 0.0),), '1.3.0'),
'polyconic': ('Polyconic', (), None),
'tangential_spherical_cube': ('TangentialSphericalCube', (), None),
'cobe_quad_spherical_cube': ('COBEQuadSphericalCube', (), None),
'quad_spherical_cube': ('QuadSphericalCube', (), None),
'healpix': ('HEALPix', (('H', 4.0), ('X', 3.0)), None),
'healpix_polar': ('HEALPixPolar', (), None)
}
def make_projection_types():
for tag_name, (name, params, version) in _generic_projections.items():
class_name = f'{name}Type'
types = [f'astropy.modeling.projections.Pix2Sky_{name}',
f'astropy.modeling.projections.Sky2Pix_{name}']
members = {'name': f'transform/{tag_name}',
'types': types,
'params': params}
if version:
members['version'] = version
globals()[class_name] = type(
str(class_name),
(GenericProjectionType,),
members)
__all__.append(class_name)
make_projection_types()
|
67e7c6ab05e6364d1de2ee52f363f324765f9ed65105e105b0c21e58c71a71ad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfVersion
import astropy.units as u
from astropy import modeling
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'Linear1DType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
return {'offset': _parameter_to_value(offset)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class MultiplyType(TransformType):
name = "transform/multiplyscale"
version = '1.0.0'
types = ['astropy.modeling.models.Multiply']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
return modeling.models.Multiply(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Multiply) and
isinstance(b, modeling.models.Multiply))
assert_array_equal(a.factor, b.factor)
class PolynomialTypeBase(TransformType):
DOMAIN_WINDOW_MIN_VERSION = AsdfVersion("1.2.0")
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = modeling.models.Polynomial1D(coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
typeindex = cls.types.index(model.__class__)
ndim = (typeindex % 2) + 1
if cls.version >= PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 included an unrelated "domain"
# property. We can't serialize the new domain values with those
# versions because they don't validate.
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
if cls.version > PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 are known not to serialize
# domain or window.
if isinstance(a, modeling.models.Polynomial1D):
assert a.domain == b.domain
assert a.window == b.window
else:
assert a.x_domain == b.x_domain
assert a.x_window == b.x_window
assert a.y_domain == b.y_domain
assert a.y_window == b.y_window
class PolynomialType1_0(PolynomialTypeBase):
version = "1.0.0"
class PolynomialType1_1(PolynomialTypeBase):
version = "1.1.0"
class PolynomialType1_2(PolynomialTypeBase):
version = "1.2.0"
class OrthoPolynomialType(TransformType):
name = "transform/ortho_polynomial"
types = ['astropy.modeling.models.Legendre1D',
'astropy.modeling.models.Legendre2D',
'astropy.modeling.models.Chebyshev1D',
'astropy.modeling.models.Chebyshev2D',
'astropy.modeling.models.Hermite1D',
'astropy.modeling.models.Hermite2D']
typemap = {
'legendre': 0,
'chebyshev': 2,
'hermite': 4,
}
invtypemap = dict([[v, k] for k, v in typemap.items()])
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
poly_type = node['polynomial_type']
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = cls.types[cls.typemap[poly_type]](coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
coeffs = {}
shape = coefficients.shape
x_degree = shape[0] - 1
y_degree = shape[1] - 1
for i in range(x_degree + 1):
for j in range(y_degree + 1):
name = f'c{i}_{j}'
coeffs[name] = coefficients[i, j]
model = cls.types[cls.typemap[poly_type]+1](x_degree, y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transforms.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
typeindex = cls.types.index(model.__class__)
poly_type = cls.invtypemap[int(typeindex/2)*2]
ndim = (typeindex % 2) + 1
if ndim == 1:
coefficients = np.array(model.parameters)
else:
coefficients = np.zeros((model.x_degree + 1, model.y_degree + 1))
for i in range(model.x_degree + 1):
for j in range(model.y_degree + 1):
name = f'c{i}_{j}'
coefficients[i, j] = getattr(model, name).value
node = {'polynomial_type': poly_type, 'coefficients': coefficients}
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
# There should be a more elegant way of doing this
TransformType.assert_equal(a, b)
assert ((isinstance(a, (modeling.models.Legendre1D, modeling.models.Legendre2D)) and
isinstance(b, (modeling.models.Legendre1D, modeling.models.Legendre2D))) or
(isinstance(a, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D)) and
isinstance(b, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D))) or
(isinstance(a, (modeling.models.Hermite1D, modeling.models.Hermite2D)) and
isinstance(b, (modeling.models.Hermite1D, modeling.models.Hermite2D))))
assert_array_equal(a.parameters, b.parameters)
class Linear1DType(TransformType):
name = "transform/linear1d"
version = '1.0.0'
types = ['astropy.modeling.models.Linear1D']
@classmethod
def from_tree_transform(cls, node, ctx):
slope = node.get('slope', None)
intercept = node.get('intercept', None)
return modeling.models.Linear1D(slope=slope, intercept=intercept)
@classmethod
def to_tree_transform(cls, model, ctx):
return {
'slope': _parameter_to_value(model.slope),
'intercept': _parameter_to_value(model.intercept),
}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Linear1D) and
isinstance(b, modeling.models.Linear1D))
assert_array_equal(a.slope, b.slope)
assert_array_equal(a.intercept, b.intercept)
|
55b4f78c98e631de30e9c11ae3c5da335a80cacf5095650e2ccf91ac54f79f9c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
version = '1.2.0'
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
elif dim == 2:
points = tuple([p[:] for p in node['points']])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple([p[:] for p in node['points']])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if model.fill_value is not None:
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
return node
@classmethod
def assert_equal(cls, a, b):
if isinstance(a.lookup_table, u.Quantity):
assert u.allclose(a.lookup_table, b.lookup_table)
assert u.allclose(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
for i in range(len(a_box)):
assert u.allclose(a_box[i], b_box[i])
else:
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
assert_array_equal(a_box, b_box)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
|
4eabfdc2d999d9e0d732113add114cc9d05ad55e6b3caafa121bea848d7d5cf3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import physical_models
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['BlackBody', 'Drude1DType', 'Plummer1DType']
class BlackBody(TransformType):
name = 'transform/blackbody'
version = '1.0.0'
types = ['astropy.modeling.physical_models.BlackBody']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.BlackBody(scale=node['scale'],
temperature=node['temperature'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'scale': _parameter_to_value(model.scale),
'temperature': _parameter_to_value(model.temperature)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.BlackBody) and
isinstance(b, physical_models.BlackBody))
assert_array_equal(a.scale, b.scale)
assert_array_equal(a.temperature, b.temperature)
class Drude1DType(TransformType):
name = 'transform/drude1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Drude1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Drude1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Drude1D) and
isinstance(b, physical_models.Drude1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Plummer1DType(TransformType):
name = 'transform/plummer1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Plummer1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Plummer1D(mass=node['mass'],
r_plum=node['r_plum'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mass': _parameter_to_value(model.mass),
'r_plum': _parameter_to_value(model.r_plum)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Plummer1D) and
isinstance(b, physical_models.Plummer1D))
assert_array_equal(a.mass, b.mass)
assert_array_equal(a.r_plum, b.r_plum)
|
6fccb027d862fb913da0dfde5b869e013464b6c08a0f77da068d043fa96fc519 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import warnings
from asdf import tagged
from asdf.tests.helpers import assert_tree_match
from astropy.modeling.core import Model, CompoundModel
from astropy.modeling.models import Identity, Mapping, Const1D
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['CompoundType', 'RemapAxesType']
_operator_to_tag_mapping = {
'+': 'add',
'-': 'subtract',
'*': 'multiply',
'/': 'divide',
'**': 'power',
'|': 'compose',
'&': 'concatenate',
'fix_inputs': 'fix_inputs'
}
_tag_to_method_mapping = {
'add': '__add__',
'subtract': '__sub__',
'multiply': '__mul__',
'divide': '__truediv__',
'power': '__pow__',
'compose': '__or__',
'concatenate': '__and__',
'fix_inputs': 'fix_inputs'
}
class CompoundType(TransformType):
name = ['transform/' + x for x in _tag_to_method_mapping.keys()]
types = [CompoundModel]
version = '1.2.0'
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
tag = node._tag[node._tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
oper = _tag_to_method_mapping[tag]
left = node['forward'][0]
if not isinstance(left, Model):
raise TypeError(f"Unknown model type '{node['forward'][0]._tag}'")
right = node['forward'][1]
if (not isinstance(right, Model) and
not (oper == 'fix_inputs' and isinstance(right, dict))):
raise TypeError(f"Unknown model type '{node['forward'][1]._tag}'")
if oper == 'fix_inputs':
right = dict(zip(right['keys'], right['values']))
model = CompoundModel('fix_inputs', left, right)
else:
model = getattr(left, oper)(right)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def to_tree_tagged(cls, model, ctx):
warnings.warn(create_asdf_deprecation_warning())
left = model.left
if isinstance(model.right, dict):
right = {
'keys': list(model.right.keys()),
'values': list(model.right.values())
}
else:
right = model.right
node = {
'forward': [left, right]
}
try:
tag_name = 'transform/' + _operator_to_tag_mapping[model.op]
except KeyError:
raise ValueError(f"Unknown operator '{model.op}'")
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a.left, b.left)
assert_tree_match(a.right, b.right)
class RemapAxesType(TransformType):
name = 'transform/remap_axes'
types = [Mapping]
version = '1.3.0'
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node['mapping']
n_inputs = node.get('n_inputs')
if all([isinstance(x, int) for x in mapping]):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max([x for x in mapping
if isinstance(x, int)]) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & Const1D(entry.value)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mapping': list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node['n_inputs'] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert(a.n_inputs == b.n_inputs)
|
8e245509ba1f371c57579e515ed59e33e6a6f63ea68483a51e8bbf32b7ecd1e3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import powerlaws
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['PowerLaw1DType', 'BrokenPowerLaw1DType',
'SmoothlyBrokenPowerLaw1DType', 'ExponentialCutoffPowerLaw1DType',
'LogParabola1DType']
class PowerLaw1DType(TransformType):
name = 'transform/power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.PowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.PowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.PowerLaw1D) and
isinstance(b, powerlaws.PowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
class BrokenPowerLaw1DType(TransformType):
name = 'transform/broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.BrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.BrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.BrokenPowerLaw1D) and
isinstance(b, powerlaws.BrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
class SmoothlyBrokenPowerLaw1DType(TransformType):
name = 'transform/smoothly_broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.SmoothlyBrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.SmoothlyBrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'],
delta=node['delta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2),
'delta': _parameter_to_value(model.delta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.SmoothlyBrokenPowerLaw1D) and
isinstance(b, powerlaws.SmoothlyBrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
assert_array_equal(a.delta, b.delta)
class ExponentialCutoffPowerLaw1DType(TransformType):
name = 'transform/exponential_cutoff_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.ExponentialCutoffPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.ExponentialCutoffPowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
x_cutoff=node['x_cutoff'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'x_cutoff': _parameter_to_value(model.x_cutoff)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.ExponentialCutoffPowerLaw1D) and
isinstance(b, powerlaws.ExponentialCutoffPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.x_cutoff, b.x_cutoff)
class LogParabola1DType(TransformType):
name = 'transform/log_parabola1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.LogParabola1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.LogParabola1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
beta=node['beta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'beta': _parameter_to_value(model.beta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.LogParabola1D) and
isinstance(b, powerlaws.LogParabola1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.beta, b.beta)
|
3d4af61f41d42cda7c66fa736cf2ba90c7312fb193027efd9a1418219cba951a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import functools
import numpy as np
from astropy.time import TimeDelta
from astropy.io.misc.asdf.types import AstropyType
__all__ = ['TimeDeltaType']
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TimeDeltaType(AstropyType):
name = 'time/timedelta'
types = [TimeDelta]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
961e7bfc8906d23b975f729fcacb28daff901df92b12253a487a8f141e6f8b72 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfSpec
from astropy import time
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyAsdfType
__all__ = ['TimeType']
_guessable_formats = set(['iso', 'byear', 'jyear', 'yday'])
_astropy_format_to_asdf_format = {
'isot': 'iso',
'byear_str': 'byear',
'jyear_str': 'jyear'
}
def _assert_earthlocation_equal(a, b):
assert_array_equal(a.x, b.x)
assert_array_equal(a.y, b.y)
assert_array_equal(a.z, b.z)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon, b.lon)
class TimeType(AstropyAsdfType):
name = 'time/time'
version = '1.1.0'
supported_versions = ['1.0.0', AsdfSpec('>=1.1.0')]
types = ['astropy.time.core.Time']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
fmt = node.format
if fmt == 'byear':
node = time.Time(node, format='byear_str')
elif fmt == 'jyear':
node = time.Time(node, format='jyear_str')
elif fmt in ('fits', 'datetime', 'plot_date'):
node = time.Time(node, format='isot')
fmt = node.format
fmt = _astropy_format_to_asdf_format.get(fmt, fmt)
guessable_format = fmt in _guessable_formats
if node.scale == 'utc' and guessable_format and node.isscalar:
return node.value
d = {'value': node.value}
if not guessable_format:
d['format'] = fmt
if node.scale != 'utc':
d['scale'] = node.scale
if node.location is not None:
x, y, z = node.location.x, node.location.y, node.location.z
# Preserve backwards compatibility for writing the old schema
# This allows WCS to test backwards compatibility with old frames
# This code does get tested in CI, but we don't run a coverage test
if cls.version == '1.0.0': # pragma: no cover
unit = node.location.unit
d['location'] = {
'x': x.value,
'y': y.value,
'z': z.value,
'unit': unit
}
else:
d['location'] = {
# It seems like EarthLocations can be represented either in
# terms of Cartesian coordinates or latitude and longitude, so
# we rather arbitrarily choose the former for our representation
'x': x,
'y': y,
'z': z
}
return d
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, (str, list, np.ndarray)):
t = time.Time(node)
fmt = _astropy_format_to_asdf_format.get(t.format, t.format)
if fmt not in _guessable_formats:
raise ValueError(f"Invalid time '{node}'")
return t
value = node['value']
fmt = node.get('format')
scale = node.get('scale')
location = node.get('location')
if location is not None:
unit = location.get('unit', u.m)
# This ensures that we can read the v.1.0.0 schema and convert it
# to the new EarthLocation object, which expects Quantity components
for comp in ['x', 'y', 'z']:
if not isinstance(location[comp], Quantity):
location[comp] = Quantity(location[comp], unit=unit)
location = EarthLocation.from_geocentric(
location['x'], location['y'], location['z'])
return time.Time(value, format=fmt, scale=scale, location=location)
@classmethod
def assert_equal(cls, old, new):
assert old.format == new.format
assert old.scale == new.scale
if isinstance(old.location, EarthLocation):
assert isinstance(new.location, EarthLocation)
_assert_earthlocation_equal(old.location, new.location)
else:
assert old.location == new.location
assert_array_equal(old, new)
|
9c278d53ff411faad44c939b92f70edc27844848e8fc370b3b825ff25923c4ba | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.tags.core import NDArrayType
from astropy.coordinates.spectral_coordinate import SpectralCoord
from astropy.io.misc.asdf.types import AstropyType
from astropy.io.misc.asdf.tags.unit.unit import UnitType
__all__ = ['SpectralCoordType']
class SpectralCoordType(AstropyType):
"""
ASDF tag implementation used to serialize/derialize SpectralCoord objects
"""
name = 'coordinates/spectralcoord'
types = [SpectralCoord]
version = '1.0.0'
@classmethod
def to_tree(cls, spec_coord, ctx):
node = {}
if isinstance(spec_coord, SpectralCoord):
node['value'] = spec_coord.value
node['unit'] = spec_coord.unit
if spec_coord.observer is not None:
node['observer'] = spec_coord.observer
if spec_coord.target is not None:
node['target'] = spec_coord.target
return node
raise TypeError(f"'{spec_coord}' is not a valid SpectralCoord")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, SpectralCoord):
return node
unit = UnitType.from_tree(node['unit'], ctx)
value = node['value']
observer = node['observer'] if 'observer' in node else None
target = node['target'] if 'observer' in node else None
if isinstance(value, NDArrayType):
value = value._make_array()
return SpectralCoord(value, unit=unit, observer=observer, target=target)
|
25e47b0af741bc069847da1a7f0d6bc50a0631e5d0c73afd52194fe2cbd57d40 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import glob
import warnings
from asdf import tagged
import astropy.units as u
import astropy.coordinates
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.units import Quantity
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from astropy.io.misc.asdf.types import AstropyType
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
__all__ = ['CoordType']
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'schemas', 'astropy.org', 'astropy'))
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split('-')
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ['baseframe', 'icrs']:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
frame = cls._tag_to_frame(node._tag)
data = node.get('data', None)
if data is not None:
return frame(node['data'], **node['frame_attributes'])
return frame(**node['frame_attributes'])
@classmethod
def to_tree_tagged(cls, frame, ctx):
warnings.warn(create_asdf_deprecation_warning())
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError("Can only save frames that are registered with the "
"transformation graph.")
node = {}
if frame.has_data:
node['data'] = frame.data
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node['frame_attributes'] = frame_attributes
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert u.allclose(new.data.lon, old.data.lon)
assert u.allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ['astropy']
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ['astropy.coordinates.ICRS']
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ['astropy']
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = Angle(node['ra']['wrap_angle'])
ra = Longitude(
node['ra']['value'],
unit=node['ra']['unit'],
wrap_angle=wrap_angle)
dec = Latitude(node['dec']['value'], unit=node['dec']['unit'])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node['ra'] = {
'value': frame.ra.value,
'unit': frame.ra.unit.to_string(),
'wrap_angle': wrap_angle
}
node['dec'] = {
'value': frame.dec.value,
'unit': frame.dec.unit.to_string()
}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert u.allclose(new.ra, old.ra)
assert u.allclose(new.dec, old.dec)
|
b4accf2d735bcdbb434d33f81d38543eecacb451446822c337f418179935652d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.types import AstropyType
class SkyCoordType(AstropyType):
name = 'coordinates/skycoord'
types = [SkyCoord]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, tree, ctx):
return SkyCoord.info._construct_from_dict(tree)
@classmethod
def assert_equal(cls, old, new):
assert skycoord_equal(old, new)
|
eff71f04d6bd25fc461e51941266bcd30829a35ed4540fbebbfac74d9554740c | import astropy.units as u
import astropy.coordinates.representation
from astropy.coordinates.representation import BaseRepresentationOrDifferential
from astropy.io.misc.asdf.types import AstropyType
class RepresentationType(AstropyType):
name = "coordinates/representation"
types = [BaseRepresentationOrDifferential]
version = "1.0.0"
_representation_module = astropy.coordinates.representation
@classmethod
def to_tree(cls, representation, ctx):
comps = representation.components
components = {}
for c in comps:
value = getattr(representation, '_' + c, None)
if value is not None:
components[c] = value
t = type(representation)
node = {}
node['type'] = t.__name__
node['components'] = components
return node
@classmethod
def from_tree(cls, node, ctx):
rep_type = getattr(cls._representation_module, node['type'])
return rep_type(**node['components'])
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
assert new.components == old.components
for comp in new.components:
nc = getattr(new, comp)
oc = getattr(old, comp)
assert u.allclose(nc, oc)
|
2c42a917a0806d1671c151e88efab95d0b7d3e9201d2474c66df894609196ecc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.io.misc.asdf.tags.unit.quantity import QuantityType
__all__ = ['AngleType', 'LatitudeType', 'LongitudeType']
class AngleType(QuantityType):
name = "coordinates/angle"
types = [Angle]
requires = ['astropy']
version = "1.0.0"
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def from_tree(cls, node, ctx):
return Angle(super().from_tree(node, ctx))
class LatitudeType(AngleType):
name = "coordinates/latitude"
types = [Latitude]
@classmethod
def from_tree(cls, node, ctx):
return Latitude(super().from_tree(node, ctx))
class LongitudeType(AngleType):
name = "coordinates/longitude"
types = [Longitude]
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = node['wrap_angle']
return Longitude(super().from_tree(node, ctx), wrap_angle=wrap_angle)
@classmethod
def to_tree(cls, longitude, ctx):
tree = super().to_tree(longitude, ctx)
tree['wrap_angle'] = longitude.wrap_angle
return tree
|
8306fa294bcf8ccb506ed745e014c7e23067f4659bfd2dbda25ddd30b535611d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyType
class EarthLocationType(AstropyType):
name = 'coordinates/earthlocation'
types = [EarthLocation]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return EarthLocation.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
return (old == new).all()
|
3cd0fc14d3877908ec63f7822811d5f6a7fa3c1f6b5c538ec5dfb47ba2e7d212 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.units.equivalencies import Equivalency
from astropy.units import equivalencies
from astropy.io.misc.asdf.types import AstropyType
class EquivalencyType(AstropyType):
name = "units/equivalency"
types = [Equivalency]
version = '1.0.0'
@classmethod
def to_tree(cls, equiv, ctx):
node = {}
if not isinstance(equiv, Equivalency):
raise TypeError(f"'{equiv}' is not a valid Equivalency")
eqs = []
for e, kwargs in zip(equiv.name, equiv.kwargs):
kwarg_names = list(kwargs.keys())
kwarg_values = list(kwargs.values())
eq = {'name': e, 'kwargs_names': kwarg_names, 'kwargs_values': kwarg_values}
eqs.append(eq)
return eqs
@classmethod
def from_tree(cls, node, ctx):
eqs = []
for eq in node:
equiv = getattr(equivalencies, eq['name'])
kwargs = dict(zip(eq['kwargs_names'], eq['kwargs_values']))
eqs.append(equiv(**kwargs))
return sum(eqs[1:], eqs[0])
@classmethod
def assert_equal(cls, a, b):
assert a == b
|
84be84162eafd822e0d11ac062d4a675ff146ed2cf43d8b1412c42b80ff62956 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.units import Unit, UnitBase
from astropy.io.misc.asdf.types import AstropyAsdfType
class UnitType(AstropyAsdfType):
name = 'unit/unit'
types = ['astropy.units.UnitBase']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
if isinstance(node, str):
node = Unit(node, format='vounit', parse_strict='warn')
if isinstance(node, UnitBase):
return node.to_string(format='vounit')
raise TypeError(f"'{node}' is not a valid unit")
@classmethod
def from_tree(cls, node, ctx):
return Unit(node, format='vounit', parse_strict='silent')
|
af61a6ad221048f9ac9a9a60a6cb41c0a4008b0afac326a87c5ee8a29f733544 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.tags.core import NDArrayType
from astropy.units import Quantity
from astropy.io.misc.asdf.types import AstropyAsdfType
class QuantityType(AstropyAsdfType):
name = 'unit/quantity'
types = ['astropy.units.Quantity']
requires = ['astropy']
version = '1.1.0'
@classmethod
def to_tree(cls, quantity, ctx):
node = {}
if isinstance(quantity, Quantity):
node['value'] = quantity.value
node['unit'] = quantity.unit
return node
raise TypeError(f"'{quantity}' is not a valid Quantity")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, Quantity):
return node
unit = node['unit']
value = node['value']
if isinstance(value, NDArrayType):
value = value._make_array()
return Quantity(value, unit=unit)
|
c72a9be8f7258e52c2025eaa2b7163037099812850aa42abc0be0d1c6fab7a3a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import numpy as np
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.coordinates.tests.helper import skycoord_equal
from asdf.tests import helpers
from asdf.tags.core.ndarray import NDArrayType
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[('a', '<i4', (2, 2)),
('b', '<f8'),
('c', '|S1')])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[('a', [('a0', '<i4'), ('a1', '|S1')]),
('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')],
dtype=[('a', '<i4'), ('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version('2.8.0'):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff) as ff:
pass
assert 'Inconsistent data column lengths' in str(err.value)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t['a'] = [1, 2, 3]
t['b'] = ['x', 'y', 'z']
t['c'] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff['table']['c'], u.Quantity)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
def check(ff):
assert isinstance(ff['table']['c'], Time)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff['table']['c'], TimeDelta)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5')
def check(ff):
assert isinstance(ff['table']['c'], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new['a'], old['a'])
NDArrayType.assert_equal(new['b'], old['b'])
assert skycoord_equal(new['c'], old['c'])
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
tree_match_func=tree_match)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff['table']['c'], EarthLocation)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({'table': t}, tmpdir)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile['example'], table.Table)
run_schema_example_test('stsci.edu', 'asdf', 'core/table', '1.0.0', check)
|
b830465d091919975d8e5a8b993601266b61c704578808862adafa0bb7717c8f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from astropy.io.votable import table
from astropy.io.votable import exceptions
from astropy.io.votable import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write(f'FAILED: {reason}\n'.encode('utf-8'))
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail(f"HTTPException: {str(e)}")
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify='warn', filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
["java", "-jar", path_to_stilts_jar, "votlint", "validate=false", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
f'{warning_code}: {warning_descr}',
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
f'{exception_code}: {exception_descr}',
exc, ['ul', 'li']))
return tables
|
213f56c11209d24fcd9b18116508a364f3466253ff5c8cad35371733e6e1e383 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import abc
import copy
import inspect
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.modeling import FittableModel, Model
from astropy.utils.decorators import classproperty
from .utils import convert_parameter_to_model_parameter
__all__ = [] # nothing is publicly scoped
class _CosmologyModel(FittableModel):
"""Base class for Cosmology redshift-method Models.
.. note::
This class is not publicly scoped so should not be used directly.
Instead, from a Cosmology instance use ``.to_format("astropy.model")``
to create an instance of a subclass of this class.
`_CosmologyModel` (subclasses) wrap a redshift-method of a
:class:`~astropy.cosmology.Cosmology` class, converting each non-`None`
|Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the redshift-method to the model's ``__call__ / evaluate``.
See Also
--------
astropy.cosmology.Cosmology.to_format
"""
@abc.abstractmethod
def _cosmology_class(self):
"""Cosmology class as a private attribute. Set in subclasses."""
@abc.abstractmethod
def _method_name(self):
"""Cosmology method name as a private attribute. Set in subclasses."""
@classproperty
def cosmology_class(cls):
"""|Cosmology| class."""
return cls._cosmology_class
@property
def cosmology(self):
"""Return |Cosmology| using `~astropy.modeling.Parameter` values."""
cosmo = self._cosmology_class(
name=self.name,
**{k: (v.value if not (v := getattr(self, k)).unit else v.quantity)
for k in self.param_names})
return cosmo
@classproperty
def method_name(self):
"""Redshift-method name on |Cosmology| instance."""
return self._method_name
# ---------------------------------------------------------------
def evaluate(self, *args, **kwargs):
"""Evaluate method {method!r} of {cosmo_cls!r} Cosmology.
The Model wraps the :class:`~astropy.cosmology.Cosmology` method,
converting each |Cosmology| :class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
(unless the Parameter is None, in which case it is skipped).
Here an instance of the cosmology is created using the current
Parameter values and the method is evaluated given the input.
Parameters
----------
*args, **kwargs
The first ``n_inputs`` of ``*args`` are for evaluating the method
of the cosmology. The remaining args and kwargs are passed to the
cosmology class constructor.
Any unspecified Cosmology Parameter use the current value of the
corresponding Model Parameter.
Returns
-------
Any
Results of evaluating the Cosmology method.
"""
# create BoundArgument with all available inputs beyond the Parameters,
# which will be filled in next
ba = self.cosmology_class._init_signature.bind_partial(*args[self.n_inputs:], **kwargs)
# fill in missing Parameters
for k in self.param_names:
if k not in ba.arguments:
v = getattr(self, k)
ba.arguments[k] = v.value if not v.unit else v.quantity
# unvectorize, since Cosmology is not vectorized
# TODO! remove when vectorized
if np.shape(ba.arguments[k]): # only in __call__
# m_nu is a special case # TODO! fix by making it 'structured'
if k == "m_nu" and len(ba.arguments[k].shape) == 1:
continue
ba.arguments[k] = ba.arguments[k][0]
# make instance of cosmology
cosmo = self._cosmology_class(**ba.arguments)
# evaluate method
result = getattr(cosmo, self._method_name)(*args[:self.n_inputs])
return result
##############################################################################
def from_model(model):
"""Load |Cosmology| from `~astropy.modeling.Model` object.
Parameters
----------
model : `_CosmologyModel` subclass instance
See ``Cosmology.to_format.help("astropy.model") for details.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
>>> from astropy.cosmology import Cosmology, Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> Cosmology.from_format(model)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
cosmology = model.cosmology_class
meta = copy.deepcopy(model.meta)
# assemble the Parameters
params = {}
for n in model.param_names:
p = getattr(model, n)
params[p.name] = p.quantity if p.unit else p.value
# put all attributes in a dict
meta[p.name] = {n: getattr(p, n) for n in dir(p)
if not (n.startswith("_") or callable(getattr(p, n)))}
ba = cosmology._init_signature.bind(name=model.name, **params, meta=meta)
return cosmology(*ba.args, **ba.kwargs)
def to_model(cosmology, *_, method):
"""Convert a `~astropy.cosmology.Cosmology` to a `~astropy.modeling.Model`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
method : str, keyword-only
The name of the method on the ``cosmology``.
Returns
-------
`_CosmologyModel` subclass instance
The Model wraps the |Cosmology| method, converting each non-`None`
:class:`~astropy.cosmology.Parameter` to a
:class:`astropy.modeling.Model` :class:`~astropy.modeling.Parameter`
and the method to the model's ``__call__ / evaluate``.
Examples
--------
>>> from astropy.cosmology import Planck18
>>> model = Planck18.to_format("astropy.model", method="lookback_time")
>>> model
<FlatLambdaCDMCosmologyLookbackTimeModel(H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. , 0. , 0.06] eV, Ob0=0.04897,
name='Planck18')>
"""
cosmo_cls = cosmology.__class__
# get bound method & sig from cosmology (unbound if class).
if not hasattr(cosmology, method):
raise AttributeError(f"{method} is not a method on {cosmology.__class__}.")
func = getattr(cosmology, method)
if not callable(func):
raise ValueError(f"{cosmology.__class__}.{method} is not callable.")
msig = inspect.signature(func)
# introspect for number of positional inputs, ignoring "self"
n_inputs = len([p for p in tuple(msig.parameters.values()) if (p.kind in (0, 1))])
attrs = {} # class attributes
attrs["_cosmology_class"] = cosmo_cls
attrs["_method_name"] = method
attrs["n_inputs"] = n_inputs
attrs["n_outputs"] = 1
params = {} # Parameters (also class attributes)
for n in cosmology.__parameters__:
v = getattr(cosmology, n) # parameter value
if v is None: # skip unspecified parameters
continue
# add as Model Parameter
params[n] = convert_parameter_to_model_parameter(getattr(cosmo_cls, n), v,
cosmology.meta.get(n))
# class name is cosmology name + Cosmology + method name + Model
clsname = (cosmo_cls.__qualname__.replace(".", "_")
+ "Cosmology"
+ method.replace("_", " ").title().replace(" ", "")
+ "Model")
# make Model class
CosmoModel = type(clsname, (_CosmologyModel, ), {**attrs, **params})
# override __signature__ and format the doc.
setattr(CosmoModel.evaluate, "__signature__", msig)
CosmoModel.evaluate.__doc__ = CosmoModel.evaluate.__doc__.format(
cosmo_cls=cosmo_cls.__qualname__, method=method)
# instantiate class using default values
ps = {n: getattr(cosmology, n) for n in params.keys()}
model = CosmoModel(**ps, name=cosmology.name, meta=copy.deepcopy(cosmology.meta))
return model
def model_identify(origin, format, *args, **kwargs):
"""Identify if object uses the :class:`~astropy.modeling.Model` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Model) and (format in (None, "astropy.model"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.model", Cosmology, from_model)
convert_registry.register_writer("astropy.model", Cosmology, to_model)
convert_registry.register_identifier("astropy.model", Cosmology, model_identify)
|
a88330e1609f66f63653f599d860471e7db8d55b1af20644d6a9610481b77662 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
from collections import defaultdict
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable, Row
from .mapping import from_mapping
def from_row(row, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a `~astropy.table.Row`.
Parameters
----------
row : `~astropy.table.Row`
The object containing the Cosmology information.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Row with
``from_row``, we will first make a `~astropy.table.Row` using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64 float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this row can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cr, format="astropy.row")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
"""
# special values
name = row['name'] if 'name' in row.columns else None # get name from column
meta = defaultdict(dict, copy.deepcopy(row.meta))
# Now need to add the Columnar metadata. This is only available on the
# parent table. If Row is ever separated from Table, this should be moved
# to ``to_table``.
for col in row._table.itercols():
if col.info.meta: # Only add metadata if not empty
meta[col.name].update(col.info.meta)
# turn row into mapping, filling cosmo if not in a column
mapping = dict(row)
mapping["name"] = name
mapping.setdefault("cosmology", meta.pop("cosmology", None))
mapping["meta"] = dict(meta)
# build cosmology from map
return from_mapping(mapping, move_to_meta=move_to_meta, cosmology=cosmology)
def to_row(cosmology, *args, cosmology_in_meta=False, table_cls=QTable):
"""Serialize the cosmology into a `~astropy.table.Row`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
table_cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to use.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`) or
as the first column (if `False`, default).
Returns
-------
`~astropy.table.Row`
With columns for the cosmology parameters, and metadata in the Table's
``meta`` attribute. The cosmology class name will either be a column
or in ``meta``, depending on 'cosmology_in_meta'.
Examples
--------
A Cosmology as a `~astropy.table.Row` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> cr = Planck18.to_format("astropy.row")
>>> cr
<Row index=0>
cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64 float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
"""
from .table import to_table
table = to_table(cosmology, cls=table_cls, cosmology_in_meta=cosmology_in_meta)
return table[0] # extract row from table
def row_identify(origin, format, *args, **kwargs):
"""Identify if object uses the `~astropy.table.Row` format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Row) and (format in (None, "astropy.row"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.row", Cosmology, from_row)
convert_registry.register_writer("astropy.row", Cosmology, to_row)
convert_registry.register_identifier("astropy.row", Cosmology, row_identify)
|
85f606f4dfe1af6f40d2f5a800afb9363c89a08aec66eb87592b869b4a30d602 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
"""
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
__all__ = [] # nothing is publicly scoped
def from_cosmology(cosmo, /, **kwargs):
"""Return the |Cosmology| unchanged.
Parameters
----------
cosmo : `~astropy.cosmology.Cosmology`
The cosmology to return.
**kwargs
This argument is required for compatibility with the standard set of
keyword arguments in format `~astropy.cosmology.Cosmology.from_format`,
e.g. "cosmology". If "cosmology" is included and is not `None`,
``cosmo`` is checked for correctness.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Just ``cosmo`` passed through.
Raises
------
TypeError
If the |Cosmology| object is not an instance of ``cosmo`` (and
``cosmology`` is not `None`).
"""
# Check argument `cosmology`
cosmology = kwargs.get("cosmology")
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
if cosmology is not None and not isinstance(cosmo, cosmology):
raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.")
return cosmo
def to_cosmology(cosmo, *args):
"""Return the |Cosmology| unchanged.
Parameters
----------
cosmo : `~astropy.cosmology.Cosmology`
The cosmology to return.
*args
Not used.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Just ``cosmo`` passed through.
"""
return cosmo
def cosmology_identify(origin, format, *args, **kwargs):
"""Identify if object is a `~astropy.cosmology.Cosmology`.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Cosmology) and (format in (None, "astropy.cosmology"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.cosmology", Cosmology, from_cosmology)
convert_registry.register_writer("astropy.cosmology", Cosmology, to_cosmology)
convert_registry.register_identifier("astropy.cosmology", Cosmology, cosmology_identify)
|
e313dcf332530a27bc2bb06566bfc8cec4ec6c09063275a46591085abd04a54a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import Cosmology
from astropy.table import Column, QTable, Table
from .mapping import to_mapping
from .row import from_row
from .utils import convert_parameter_to_column
def from_table(table, index=None, *, move_to_meta=False, cosmology=None):
"""Instantiate a `~astropy.cosmology.Cosmology` from a |QTable|.
Parameters
----------
table : `~astropy.table.Table`
The object to parse into a |Cosmology|.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a Table with
``from_table``, we will first make a |QTable| using
:func:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64 float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Now this table can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(ct, format="astropy.table")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del ct["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(ct)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
For tables with multiple rows of cosmological parameters, the ``index``
argument is needed to select the correct row. The index can be an integer
for the row number or, if the table is indexed by a column, the value of
that column. If the table is not indexed and ``index`` is a string, the
"name" column is used as the indexing column.
Here is an example where ``index`` is needed and can be either an integer
(for the row number) or the name of one of the cosmologies, e.g. 'Planck15'.
>>> from astropy.cosmology import Planck13, Planck15, Planck18
>>> from astropy.table import vstack
>>> cts = vstack([c.to_format("astropy.table")
... for c in (Planck13, Planck15, Planck18)],
... metadata_conflicts='silent')
>>> cts
<QTable length=3>
name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64 float64
-------- ------------ ------- ------- ------- ----------- --------
Planck13 67.77 0.30712 2.7255 3.046 0.0 .. 0.06 0.048252
Planck15 67.74 0.3075 2.7255 3.046 0.0 .. 0.06 0.0486
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
>>> cosmo = Cosmology.from_format(cts, index=1, format="astropy.table")
>>> cosmo == Planck15
True
For further examples, see :doc:`astropy:cosmology/io`.
"""
# Get row from table
# string index uses the indexed column on the table to find the row index.
if isinstance(index, str):
if not table.indices: # no indexing column, find by string match
indices = np.where(table['name'] == index)[0]
else: # has indexing column
indices = table.loc_indices[index] # need to convert to row index (int)
if isinstance(indices, (int, np.integer)): # loc_indices
index = indices
elif len(indices) == 1: # only happens w/ np.where
index = indices[0]
elif len(indices) == 0: # matches from loc_indices
raise KeyError(f"No matches found for key {indices}")
else: # like the Highlander, there can be only 1 Cosmology
raise ValueError(f"more than one cosmology found for key {indices}")
# no index is needed for a 1-row table. For a multi-row table...
if index is None:
if len(table) != 1: # multi-row table and no index
raise ValueError("need to select a specific row (e.g. index=1) when "
"constructing a Cosmology from a multi-row table.")
else: # single-row table
index = 0
row = table[index] # index is now the row index (int)
# parse row to cosmo
return from_row(row, move_to_meta=move_to_meta, cosmology=cosmology)
def to_table(cosmology, *args, cls=QTable, cosmology_in_meta=True):
"""Serialize the cosmology into a `~astropy.table.QTable`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` class or subclass type to return.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
Returns
-------
`~astropy.table.QTable`
With columns for the cosmology parameters, and metadata and
cosmology class name in the Table's ``meta`` attribute
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
Examples
--------
A Cosmology as a `~astropy.table.QTable` will have the cosmology's name and
parameters as columns.
>>> from astropy.cosmology import Planck18
>>> ct = Planck18.to_format("astropy.table")
>>> ct
<QTable length=1>
name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str8 float64 float64 float64 float64 float64 float64
-------- ------------ ------- ------- ------- ----------- -------
Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
The cosmological class and other metadata, e.g. a paper reference, are in
the Table's metadata.
>>> ct.meta
OrderedDict([..., ('cosmology', 'FlatLambdaCDM')])
To move the cosmology class from the metadata to a Table row, set the
``cosmology_in_meta`` argument to `False`:
>>> Planck18.to_format("astropy.table", cosmology_in_meta=False)
<QTable length=1>
cosmology name H0 Om0 Tcmb0 Neff m_nu [3] Ob0
km / (Mpc s) K eV
str13 str8 float64 float64 float64 float64 float64 float64
------------- -------- ------------ ------- ------- ------- ----------- -------
FlatLambdaCDM Planck18 67.66 0.30966 2.7255 3.046 0.0 .. 0.06 0.04897
Astropy recommends `~astropy.table.QTable` for tables with
`~astropy.units.Quantity` columns. However the returned type may be
overridden using the ``cls`` argument:
>>> from astropy.table import Table
>>> Planck18.to_format("astropy.table", cls=Table)
<Table length=1>
...
"""
if not issubclass(cls, Table):
raise TypeError(f"'cls' must be a (sub)class of Table, not {type(cls)}")
# Start by getting a map representation.
data = to_mapping(cosmology)
data["cosmology"] = data["cosmology"].__qualname__ # change to str
# Metadata
meta = data.pop("meta") # remove the meta
if cosmology_in_meta:
meta["cosmology"] = data.pop("cosmology")
# Need to turn everything into something Table can process:
# - Column for Parameter
# - list for anything else
cosmo_cls = cosmology.__class__
for k, v in data.items():
if k in cosmology.__parameters__:
col = convert_parameter_to_column(getattr(cosmo_cls, k), v,
cosmology.meta.get(k))
else:
col = Column([v])
data[k] = col
tbl = cls(data, meta=meta)
tbl.add_index("name", unique=True)
return tbl
def table_identify(origin, format, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Table) and (format in (None, "astropy.table"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("astropy.table", Cosmology, from_table)
convert_registry.register_writer("astropy.table", Cosmology, to_table)
convert_registry.register_identifier("astropy.table", Cosmology, table_identify)
|
942724e611a04ccb94389d2500c9da714e547e94bb86508bb8595189787708c4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
from astropy.io.misc.yaml import AstropyDumper, AstropyLoader, dump, load
from .mapping import from_mapping
from .utils import FULLQUALNAME_SUBSTITUTIONS as QNS
__all__ = [] # nothing is publicly scoped
##############################################################################
# Serializer Functions
# these do Cosmology <-> YAML through a modified dictionary representation of
# the Cosmology object. The Unified-I/O functions are just wrappers to the YAML
# that calls these functions.
def yaml_representer(tag):
""":mod:`yaml` representation of |Cosmology| object.
Parameters
----------
tag : str
The class tag, e.g. '!astropy.cosmology.LambdaCDM'
Returns
-------
representer : callable[[`~astropy.io.misc.yaml.AstropyDumper`, |Cosmology|], str]
Function to construct :mod:`yaml` representation of |Cosmology| object.
"""
def representer(dumper, obj):
"""Cosmology yaml representer function for {}.
Parameters
----------
dumper : `~astropy.io.misc.yaml.AstropyDumper`
obj : `~astropy.cosmology.Cosmology`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object.
"""
# convert to mapping
map = obj.to_format("mapping")
# remove the cosmology class info. It's already recorded in `tag`
map.pop("cosmology")
# make the metadata serializable in an order-preserving way.
map["meta"] = tuple(map["meta"].items())
return dumper.represent_mapping(tag, map)
representer.__doc__ = representer.__doc__.format(tag)
return representer
def yaml_constructor(cls):
"""Cosmology| object from :mod:`yaml` representation.
Parameters
----------
cls : type
The class type, e.g. `~astropy.cosmology.LambdaCDM`.
Returns
-------
constructor : callable
Function to construct |Cosmology| object from :mod:`yaml` representation.
"""
def constructor(loader, node):
"""Cosmology yaml constructor function.
Parameters
----------
loader : `~astropy.io.misc.yaml.AstropyLoader`
node : `yaml.nodes.MappingNode`
yaml representation of |Cosmology| object.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
# create mapping from YAML node
map = loader.construct_mapping(node)
# restore metadata to dict
map["meta"] = dict(map["meta"])
# get cosmology class qualified name from node
cosmology = str(node.tag).split(".")[-1]
# create Cosmology from mapping
return from_mapping(map, move_to_meta=False, cosmology=cosmology)
return constructor
def register_cosmology_yaml(cosmo_cls):
"""Register :mod:`yaml` for Cosmology class.
Parameters
----------
cosmo_cls : `~astropy.cosmology.Cosmology` class
"""
fqn = f"{cosmo_cls.__module__}.{cosmo_cls.__qualname__}"
tag = "!" + QNS.get(fqn, fqn) # Possibly sub fully qualified name for a preferred path
AstropyDumper.add_representer(cosmo_cls, yaml_representer(tag))
AstropyLoader.add_constructor(tag, yaml_constructor(cosmo_cls))
##############################################################################
# Unified-I/O Functions
def from_yaml(yml, *, cosmology=None):
"""Load `~astropy.cosmology.Cosmology` from :mod:`yaml` object.
Parameters
----------
yml : str
:mod:`yaml` representation of |Cosmology| object
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The expected cosmology class (or string name thereof). This argument is
is only checked for correctness if not `None`.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Raises
------
TypeError
If the |Cosmology| object loaded from ``yml`` is not an instance of
the ``cosmology`` (and ``cosmology`` is not `None`).
"""
with u.add_enabled_units(cu):
cosmo = load(yml)
# Check argument `cosmology`, if not None
# This kwarg is required for compatibility with |Cosmology.from_format|
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
if cosmology is not None and not isinstance(cosmo, cosmology):
raise TypeError(f"cosmology {cosmo} is not an {cosmology} instance.")
return cosmo
def to_yaml(cosmology, *args):
"""Return the cosmology class, parameters, and metadata as a :mod:`yaml` object.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
Returns
-------
str
:mod:`yaml` representation of |Cosmology| object
"""
return dump(cosmology)
# ``read`` cannot handle non-path strings.
# TODO! this says there should be different types of I/O registries.
# not just hacking object conversion on top of file I/O.
# def yaml_identify(origin, format, *args, **kwargs):
# """Identify if object uses the yaml format.
#
# Returns
# -------
# bool
# """
# itis = False
# if origin == "read":
# itis = isinstance(args[1], str) and args[1][0].startswith("!")
# itis &= format in (None, "yaml")
#
# return itis
# ===================================================================
# Register
for cosmo_cls in _COSMOLOGY_CLASSES.values():
register_cosmology_yaml(cosmo_cls)
convert_registry.register_reader("yaml", Cosmology, from_yaml)
convert_registry.register_writer("yaml", Cosmology, to_yaml)
# convert_registry.register_identifier("yaml", Cosmology, yaml_identify)
|
4dda94c820a33a1af38991e6972d0c16523e2111a4d62050ae06c85d14993c8b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The following are private functions, included here **FOR REFERENCE ONLY** since
the io registry cannot be displayed. These functions are registered into
:meth:`~astropy.cosmology.Cosmology.to_format` and
:meth:`~astropy.cosmology.Cosmology.from_format` and should only be accessed
via these methods.
""" # this is shown in the docs.
import copy
from collections.abc import Mapping
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.core import _COSMOLOGY_CLASSES, Cosmology
__all__ = [] # nothing is publicly scoped
def from_mapping(map, *, move_to_meta=False, cosmology=None):
"""Load `~astropy.cosmology.Cosmology` from mapping object.
Parameters
----------
map : mapping
Arguments into the class -- like "name" or "meta".
If 'cosmology' is None, must have field "cosmology" which can be either
the string name of the cosmology class (e.g. "FlatLambdaCDM") or the
class itself.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'map'.
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
Examples
--------
To see loading a `~astropy.cosmology.Cosmology` from a dictionary with
``from_mapping``, we will first make a mapping using
:meth:`~astropy.cosmology.Cosmology.to_format`.
>>> from astropy.cosmology import Cosmology, Planck18
>>> cm = Planck18.to_format('mapping')
>>> cm
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
Now this dict can be used to load a new cosmological instance identical
to the ``Planck18`` cosmology from which it was generated.
>>> cosmo = Cosmology.from_format(cm, format="mapping")
>>> cosmo
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=2.7255 K, Neff=3.046, m_nu=[0. 0. 0.06] eV, Ob0=0.04897)
Specific cosmology classes can be used to parse the data. The class'
default parameter values are used to fill in any information missing in the
data.
>>> from astropy.cosmology import FlatLambdaCDM
>>> del cm["Tcmb0"] # show FlatLambdaCDM provides default
>>> FlatLambdaCDM.from_format(cm)
FlatLambdaCDM(name="Planck18", H0=67.66 km / (Mpc s), Om0=0.30966,
Tcmb0=0.0 K, Neff=3.046, m_nu=None, Ob0=0.04897)
"""
params = dict(map) # so we are guaranteed to have a poppable map
# get cosmology
# 1st from argument. Allows for override of the cosmology, if on file.
# 2nd from params. This MUST have the cosmology if 'kwargs' did not.
if cosmology is None:
cosmology = params.pop("cosmology")
else:
params.pop("cosmology", None) # pop, but don't use
# if string, parse to class
if isinstance(cosmology, str):
cosmology = _COSMOLOGY_CLASSES[cosmology]
# select arguments from mapping that are in the cosmo's signature.
ba = cosmology._init_signature.bind_partial() # blank set of args
ba.apply_defaults() # fill in the defaults
for k in cosmology._init_signature.parameters.keys():
if k in params: # transfer argument, if in params
ba.arguments[k] = params.pop(k)
# deal with remaining params. If there is a **kwargs use that, else
# allow to transfer to metadata. Raise TypeError if can't.
lastp = tuple(cosmology._init_signature.parameters.values())[-1]
if lastp.kind == 4: # variable keyword-only
ba.arguments[lastp.name] = params
elif move_to_meta: # prefers current meta, which was explicitly set
meta = ba.arguments["meta"] or {} # (None -> dict)
ba.arguments["meta"] = {**params, **meta}
elif params:
raise TypeError(f"there are unused parameters {params}.")
# else: pass # no kwargs, no move-to-meta, and all the params are used
return cosmology(*ba.args, **ba.kwargs)
def to_mapping(cosmology, *args, cls=dict, cosmology_as_str=False, move_from_meta=False):
"""Return the cosmology class, parameters, and metadata as a `dict`.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
*args
Not used. Needed for compatibility with
`~astropy.io.registry.UnifiedReadWriteMethod`
cls : type (optional, keyword-only)
`dict` or `collections.Mapping` subclass.
The mapping type to return. Default is `dict`.
cosmology_as_str : bool (optional, keyword-only)
Whether the cosmology value is the class (if `False`, default) or
the semi-qualified name (if `True`).
move_from_meta : bool (optional, keyword-only)
Whether to add the Cosmology's metadata as an item to the mapping (if
`False`, default) or to merge with the rest of the mapping, preferring
the original values (if `True`)
Returns
-------
dict
with key-values for the cosmology parameters and also:
- 'cosmology' : the class
- 'meta' : the contents of the cosmology's metadata attribute.
If ``move_from_meta`` is `True`, this key is missing and the
contained metadata are added to the main `dict`.
Examples
--------
A Cosmology as a mapping will have the cosmology's name and
parameters as items, and the metadata as a nested dictionary.
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping')
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'H0': <Quantity 67.66 km / (Mpc s)>, 'Om0': 0.30966,
'Tcmb0': <Quantity 2.7255 K>, 'Neff': 3.046,
'm_nu': <Quantity [0. , 0. , 0.06] eV>, 'Ob0': 0.04897,
'meta': ...
The dictionary type may be changed with the ``cls`` keyword argument:
>>> from collections import OrderedDict
>>> Planck18.to_format('mapping', cls=OrderedDict)
OrderedDict([('cosmology', <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>),
('name', 'Planck18'), ('H0', <Quantity 67.66 km / (Mpc s)>),
('Om0', 0.30966), ('Tcmb0', <Quantity 2.7255 K>), ('Neff', 3.046),
('m_nu', <Quantity [0. , 0. , 0.06] eV>), ('Ob0', 0.04897),
('meta', ...
Sometimes it is more useful to have the name of the cosmology class, not
the object itself. The keyword argument ``cosmology_as_str`` may be used:
>>> Planck18.to_format('mapping', cosmology_as_str=True)
{'cosmology': 'FlatLambdaCDM', ...
The metadata is normally included as a nested mapping. To move the metadata
into the main mapping, use the keyword argument ``move_from_meta``. This
kwarg inverts ``move_to_meta`` in
``Cosmology.to_format("mapping", move_to_meta=...)`` where extra items
are moved to the metadata (if the cosmology constructor does not have a
variable keyword-only argument -- ``**kwargs``).
>>> from astropy.cosmology import Planck18
>>> Planck18.to_format('mapping', move_from_meta=True)
{'cosmology': <class 'astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM'>,
'name': 'Planck18', 'Oc0': 0.2607, 'n': 0.9665, 'sigma8': 0.8102, ...
"""
if not issubclass(cls, (dict, Mapping)):
raise TypeError(f"'cls' must be a (sub)class of dict or Mapping, not {cls}")
m = cls()
# start with the cosmology class & name
m["cosmology"] = cosmology.__class__.__qualname__ if cosmology_as_str else cosmology.__class__
m["name"] = cosmology.name # here only for dict ordering
meta = copy.deepcopy(cosmology.meta) # metadata (mutable)
if move_from_meta:
# Merge the mutable metadata. Since params are added later they will
# be preferred in cases of overlapping keys. Likewise, need to pop
# cosmology and name from meta.
meta.pop("cosmology", None)
meta.pop("name", None)
m.update(meta)
# Add all the immutable inputs
m.update({k: v for k, v in cosmology._init_arguments.items()
if k not in ("meta", "name")})
# Lastly, add the metadata, if haven't already (above)
if not move_from_meta:
m["meta"] = meta # TODO? should meta be type(cls)
return m
def mapping_identify(origin, format, *args, **kwargs):
"""Identify if object uses the mapping format.
Returns
-------
bool
"""
itis = False
if origin == "read":
itis = isinstance(args[1], Mapping) and (format in (None, "mapping"))
return itis
# ===================================================================
# Register
convert_registry.register_reader("mapping", Cosmology, from_mapping)
convert_registry.register_writer("mapping", Cosmology, to_mapping)
convert_registry.register_identifier("mapping", Cosmology, mapping_identify)
|
ffc04a13085ff1bcaedb1a7374fbeb51e5a1f5724ea1e0ac4d88828eeeb994c3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy.modeling import Parameter as ModelParameter
from astropy.table import Column
FULLQUALNAME_SUBSTITUTIONS = {
"astropy.cosmology.flrw.base.FLRW": "astropy.cosmology.flrw.FLRW",
"astropy.cosmology.flrw.lambdacdm.LambdaCDM": "astropy.cosmology.flrw.LambdaCDM",
"astropy.cosmology.flrw.lambdacdm.FlatLambdaCDM": "astropy.cosmology.flrw.FlatLambdaCDM",
"astropy.cosmology.flrw.w0wacdm.w0waCDM": "astropy.cosmology.flrw.w0waCDM",
"astropy.cosmology.flrw.w0wacdm.Flatw0waCDM": "astropy.cosmology.flrw.Flatw0waCDM",
"astropy.cosmology.flrw.w0wzcdm.w0wzCDM": "astropy.cosmology.flrw.w0wzCDM",
"astropy.cosmology.flrw.w0cdm.wCDM": "astropy.cosmology.flrw.wCDM",
"astropy.cosmology.flrw.w0cdm.FlatwCDM": "astropy.cosmology.flrw.FlatwCDM",
"astropy.cosmology.flrw.wpwazpcdm.wpwaCDM": "astropy.cosmology.flrw.wpwaCDM",
}
"""Substitutions mapping the actual qualitative name to its preferred value."""
def convert_parameter_to_column(parameter, value, meta=None):
"""Convert a |Cosmology| Parameter to a Table |Column|.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
Returns
-------
`astropy.table.Column`
"""
format = None if value is None else parameter.format_spec
shape = (1,) + np.shape(value) # minimum of 1d
col = Column(data=np.reshape(value, shape),
name=parameter.name,
dtype=None, # inferred from the data
description=parameter.__doc__,
format=format,
meta=meta)
return col
def convert_parameter_to_model_parameter(parameter, value, meta=None):
"""Convert a Cosmology Parameter to a Model Parameter.
Parameters
----------
parameter : `astropy.cosmology.parameter.Parameter`
value : Any
meta : dict or None, optional
Information from the Cosmology's metadata.
This function will use any of: 'getter', 'setter', 'fixed', 'tied',
'min', 'max', 'bounds', 'prior', 'posterior'.
Returns
-------
`astropy.modeling.Parameter`
"""
# Get from meta information relavant to Model
extra = {k: v for k, v in (meta or {}).items()
if k in ('getter', 'setter', 'fixed', 'tied', 'min', 'max',
'bounds', 'prior', 'posterior')}
return ModelParameter(description=parameter.__doc__,
default=value,
unit=getattr(value, "unit", None),
**extra)
|
fae4702c662967ca1f8bf81bf38825d34b2368c254cf63d4e604c8a69eeb7c45 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.table import QTable
from .table import from_table, to_table
def read_ecsv(filename, index=None, *, move_to_meta=False, cosmology=None, **kwargs):
"""Read a `~astropy.cosmology.Cosmology` from an ECSV file.
Parameters
----------
filename : path-like or file-like
From where to read the Cosmology.
index : int, str, or None, optional
Needed to select the row in tables with multiple rows. ``index`` can be
an integer for the row number or, if the table is indexed by a column,
the value of that column. If the table is not indexed and ``index``
is a string, the "name" column is used as the indexing column.
move_to_meta : bool (optional, keyword-only)
Whether to move keyword arguments that are not in the Cosmology class'
signature to the Cosmology's metadata. This will only be applied if the
Cosmology does NOT have a keyword-only argument (e.g. ``**kwargs``).
Arguments moved to the metadata will be merged with existing metadata,
preferring specified metadata in the case of a merge conflict
(e.g. for ``Cosmology(meta={'key':10}, key=42)``, the ``Cosmology.meta``
will be ``{'key': 10}``).
cosmology : str, `~astropy.cosmology.Cosmology` class, or None (optional, keyword-only)
The cosmology class (or string name thereof) to use when constructing
the cosmology instance. The class also provides default parameter values,
filling in any non-mandatory arguments missing in 'table'.
**kwargs
Passed to :attr:`astropy.table.QTable.read`
Returns
-------
`~astropy.cosmology.Cosmology` subclass instance
"""
kwargs["format"] = "ascii.ecsv"
with u.add_enabled_units(cu):
table = QTable.read(filename, **kwargs)
# build cosmology from table
return from_table(table, index=index, move_to_meta=move_to_meta, cosmology=cosmology)
def write_ecsv(cosmology, file, *, overwrite=False, cls=QTable, cosmology_in_meta=True, **kwargs):
"""Serialize the cosmology into a ECSV.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
Location to save the serialized cosmology.
overwrite : bool
Whether to overwrite the file, if it exists.
cls : type (optional, keyword-only)
Astropy :class:`~astropy.table.Table` (sub)class to use when writing.
Default is :class:`~astropy.table.QTable`.
cosmology_in_meta : bool
Whether to put the cosmology class in the Table metadata (if `True`,
default) or as the first column (if `False`).
**kwargs
Passed to ``cls.write``
Raises
------
TypeError
If kwarg (optional) 'cls' is not a subclass of `astropy.table.Table`
"""
table = to_table(cosmology, cls=cls, cosmology_in_meta=cosmology_in_meta)
kwargs["format"] = "ascii.ecsv"
table.write(file, overwrite=overwrite, **kwargs)
def ecsv_identify(origin, filepath, fileobj, *args, **kwargs):
"""Identify if object uses the Table format.
Returns
-------
bool
"""
return filepath is not None and filepath.endswith(".ecsv")
# ===================================================================
# Register
readwrite_registry.register_reader("ascii.ecsv", Cosmology, read_ecsv)
readwrite_registry.register_writer("ascii.ecsv", Cosmology, write_ecsv)
readwrite_registry.register_identifier("ascii.ecsv", Cosmology, ecsv_identify)
|
f0c025661e1eada1c55346df0508f40f4d93c4ad48cbfa418b4df921ea361dbe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import inf
import numpy as np
import pytest
from astropy.cosmology.utils import aszarr, inf_like, vectorize_if_needed, vectorize_redshift_method
from astropy.utils.exceptions import AstropyDeprecationWarning
from .test_core import _zarr, invalid_zs, valid_zs
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
def test_vectorize_if_needed():
"""
Test :func:`astropy.cosmology.utils.vectorize_if_needed`.
There's no need to test 'veckw' because that is directly pasased to
`numpy.vectorize` which thoroughly tests the various inputs.
"""
func = lambda x: x ** 2
with pytest.warns(AstropyDeprecationWarning):
# not vectorized
assert vectorize_if_needed(func, 2) == 4
# vectorized
assert all(vectorize_if_needed(func, [2, 3]) == [4, 9])
@pytest.mark.parametrize("arr, expected",
[(0.0, inf), # float scalar
(1, inf), # integer scalar should give float output
([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)),
([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list
])
def test_inf_like(arr, expected):
"""
Test :func:`astropy.cosmology.utils.inf_like`.
All inputs should give a float output.
These tests are also in the docstring, but it's better to have them also
in one consolidated location.
"""
with pytest.warns(AstropyDeprecationWarning):
assert np.all(inf_like(arr) == expected)
# -------------------------------------------------------------------
class Test_aszarr:
@pytest.mark.parametrize("z, expect", list(zip(valid_zs, [
0, 1, 1100, np.float64(3300), 2.0, 3.0, _zarr, _zarr, _zarr, _zarr
])))
def test_valid(self, z, expect):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
got = aszarr(z)
assert np.array_equal(got, expect)
@pytest.mark.parametrize("z, exc", invalid_zs)
def test_invalid(self, z, exc):
"""Test :func:`astropy.cosmology.utils.aszarr`."""
with pytest.raises(exc):
aszarr(z)
|
17f3527b26aa20409d592d88d1e86939c56d4122053540690feeab80a64362bf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Configure the tests for :mod:`astropy.cosmology`."""
from astropy.cosmology.tests.helper import clean_registry
from astropy.tests.helper import pickle_protocol
|
49519a568150d95a20960334c45878e3c141458b924227d09eeccb24cf5d1cde | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.parameter`."""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter, _validate_to_float, _validate_with_unit
##############################################################################
# TESTS
##############################################################################
class ParameterTestMixin:
"""Tests for a :class:`astropy.cosmology.Parameter` on a Cosmology.
:class:`astropy.cosmology.Parameter` is a descriptor and this test suite
tests descriptors by class inheritance, so ``ParameterTestMixin`` is mixed
into ``TestCosmology`` (tests :class:`astropy.cosmology.Cosmology`).
"""
@pytest.fixture
def parameter(self, cosmo_cls):
"""Cosmological Parameters"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__parameters__).pop())
@pytest.fixture
def all_parameter(self, cosmo_cls):
"""Cosmological All Parameter instances"""
# I wish this would work
# yield from {getattr(cosmo_cls, n) for n in cosmo_cls.__all_parameters__}
# just return one parameter at random
yield getattr(cosmo_cls, set(cosmo_cls.__all_parameters__).pop())
# ===============================================================
# Method Tests
def test_Parameter_class_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes on class."""
# _registry_validators
assert hasattr(all_parameter, "_registry_validators")
assert isinstance(all_parameter._registry_validators, dict)
assert all(isinstance(k, str) for k in all_parameter._registry_validators.keys())
assert all(callable(v) for v in all_parameter._registry_validators.values())
def test_Parameter_init(self):
"""Test :class:`astropy.cosmology.Parameter` instantiation."""
# defaults
parameter = Parameter()
assert parameter.fvalidate is _validate_with_unit
assert parameter.unit is None
assert parameter.equivalencies == []
assert parameter.format_spec == ""
assert parameter.derived is False
assert parameter.name is None
# setting all kwargs
parameter = Parameter(fvalidate="float", doc="DOCSTRING",
unit="km", equivalencies=[u.mass_energy()],
fmt=".4f", derived=True)
assert parameter.fvalidate is _validate_to_float
assert parameter.unit is u.km
assert parameter.equivalencies == [u.mass_energy()]
assert parameter.format_spec == ".4f"
assert parameter.derived is True
def test_Parameter_instance_attributes(self, all_parameter):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
assert hasattr(all_parameter, "__doc__")
# Parameter
assert hasattr(all_parameter, "_unit")
assert hasattr(all_parameter, "_equivalencies")
assert hasattr(all_parameter, "_fmt")
assert hasattr(all_parameter, "_derived")
# __set_name__
assert hasattr(all_parameter, "_attr_name")
assert hasattr(all_parameter, "_attr_name_private")
def test_Parameter_fvalidate(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
assert hasattr(all_parameter, "fvalidate")
assert callable(all_parameter.fvalidate)
def test_Parameter_name(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
assert hasattr(all_parameter, "name")
assert isinstance(all_parameter.name, str)
assert all_parameter.name is all_parameter._attr_name
def test_Parameter_unit(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
assert hasattr(all_parameter, "unit")
assert isinstance(all_parameter.unit, (u.UnitBase, type(None)))
assert all_parameter.unit is all_parameter._unit
def test_Parameter_equivalencies(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
assert hasattr(all_parameter, "equivalencies")
assert isinstance(all_parameter.equivalencies, (list, u.Equivalency))
assert all_parameter.equivalencies is all_parameter._equivalencies
def test_Parameter_format_spec(self, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
assert hasattr(all_parameter, "format_spec")
assert isinstance(all_parameter.format_spec, str)
assert all_parameter.format_spec is all_parameter._fmt
def test_Parameter_derived(self, cosmo_cls, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
assert hasattr(all_parameter, "derived")
assert isinstance(all_parameter.derived, bool)
assert all_parameter.derived is all_parameter._derived
# test value
if all_parameter.name in cosmo_cls.__parameters__:
assert all_parameter.derived is False
else:
assert all_parameter.derived is True
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__get__`."""
# from class
parameter = getattr(cosmo_cls, all_parameter.name)
assert isinstance(parameter, Parameter)
assert parameter is all_parameter
# from instance
parameter = getattr(cosmo, all_parameter.name)
assert np.all(parameter == getattr(cosmo, all_parameter._attr_name_private))
def test_Parameter_descriptor_set(self, cosmo, all_parameter):
"""Test :attr:`astropy.cosmology.Parameter.__set__`."""
# test it's already set
assert hasattr(cosmo, all_parameter._attr_name_private)
# and raises an error if set again
with pytest.raises(AttributeError, match="can't set attribute"):
setattr(cosmo, all_parameter._attr_name, None)
# -------------------------------------------
# validate value
# tested later.
# ===============================================================
# Usage Tests
def test_Parameter_listed(self, cosmo_cls, all_parameter):
"""Test each `astropy.cosmology.Parameter` attached to Cosmology."""
# just double check that each entry is a Parameter
assert isinstance(all_parameter, Parameter)
# the reverse: check that if it is a Parameter, it's listed.
# note have to check the more inclusive ``__all_parameters__``
assert all_parameter.name in cosmo_cls.__all_parameters__
if not all_parameter.derived:
assert all_parameter.name in cosmo_cls.__parameters__
def test_parameter_related_attributes_on_Cosmology(self, cosmo_cls):
"""Test `astropy.cosmology.Parameter`-related on Cosmology."""
# establish has expected attribute
assert hasattr(cosmo_cls, "__parameters__")
assert hasattr(cosmo_cls, "__all_parameters__")
def test_Parameter_not_unique(self, cosmo_cls, clean_registry):
"""Cosmology Parameter not unique to class when subclass defined."""
# define subclass to show param is same
class ExampleBase(cosmo_cls):
param = Parameter()
class Example(ExampleBase): pass
assert Example.param is ExampleBase.param
assert Example.__parameters__ == ExampleBase.__parameters__
def test_Parameters_reorder_by_signature(self, cosmo_cls, clean_registry):
"""Test parameters are reordered."""
class Example(cosmo_cls):
param = Parameter()
def __init__(self, param, *, name=None, meta=None):
pass # never actually initialized
# param should be 1st, all other parameters next
Example.__parameters__[0] == "param"
# Check the other parameters are as expected.
# only run this test if "param" is not already on the cosmology
if cosmo_cls.__parameters__[0] != "param":
assert set(Example.__parameters__[1:]) == set(cosmo_cls.__parameters__)
def test_make_from_Parameter(self, cosmo_cls, clean_registry):
"""Test the parameter creation process. Uses ``__set__``."""
class Example(cosmo_cls):
param = Parameter(unit=u.eV, equivalencies=u.mass_energy())
def __init__(self, param, *, name=None, meta=None):
self.param = param
@property
def is_flat(self):
return super().is_flat()
assert Example(1).param == 1 * u.eV
assert Example(1 * u.eV).param == 1 * u.eV
assert Example(1 * u.J).param == (1 * u.J).to(u.eV)
assert Example(1 * u.kg).param == (1 * u.kg).to(u.eV, u.mass_energy())
# ========================================================================
class TestParameter(ParameterTestMixin):
"""
Test `astropy.cosmology.Parameter` directly. Adds a lot of specific tests
that wouldn't be covered by the per-cosmology tests.
"""
def setup_class(self):
class Example1(Cosmology):
param = Parameter(doc="Description of example parameter.",
unit=u.m, equivalencies=u.mass_energy())
def __init__(self, param=15):
self.param = param
@property
def is_flat(self):
return super().is_flat()
# with validator
class Example2(Example1):
def __init__(self, param=15 * u.m):
self.param = param
@Example1.param.validator
def param(self, param, value):
return value.to(u.km)
# attributes
self.classes = {"Example1": Example1, "Example2": Example2}
def teardown_class(self):
for cls in self.classes.values():
_COSMOLOGY_CLASSES.pop(cls.__qualname__)
@pytest.fixture(scope="class", params=["Example1", "Example2"])
def cosmo_cls(self, request):
"""Cosmology class."""
return self.classes[request.param]
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""Cosmology instance"""
return cosmo_cls()
@pytest.fixture(scope="class")
def param(self, cosmo_cls):
"""Get Parameter 'param' from cosmology class."""
return cosmo_cls.param
# ==============================================================
def test_Parameter_instance_attributes(self, param):
"""Test :class:`astropy.cosmology.Parameter` attributes from init."""
super().test_Parameter_instance_attributes(param)
# property
assert param.__doc__ == "Description of example parameter."
# custom from init
assert param._unit == u.m
assert param._equivalencies == u.mass_energy()
assert param._fmt == ""
assert param._derived == False
# custom from set_name
assert param._attr_name == "param"
assert param._attr_name_private == "_param"
def test_Parameter_fvalidate(self, cosmo, param):
"""Test :attr:`astropy.cosmology.Parameter.fvalidate`."""
super().test_Parameter_fvalidate(param)
value = param.fvalidate(cosmo, param, 1000 * u.m)
assert value == 1 * u.km
def test_Parameter_name(self, param):
"""Test :attr:`astropy.cosmology.Parameter.name`."""
super().test_Parameter_name(param)
assert param.name == "param"
def test_Parameter_unit(self, param):
"""Test :attr:`astropy.cosmology.Parameter.unit`."""
super().test_Parameter_unit(param)
assert param.unit == u.m
def test_Parameter_equivalencies(self, param):
"""Test :attr:`astropy.cosmology.Parameter.equivalencies`."""
super().test_Parameter_equivalencies(param)
assert param.equivalencies == u.mass_energy()
def test_Parameter_format_spec(self, param):
"""Test :attr:`astropy.cosmology.Parameter.format_spec`."""
super().test_Parameter_format_spec(param)
assert param.format_spec == ""
def test_Parameter_derived(self, cosmo_cls, param):
"""Test :attr:`astropy.cosmology.Parameter.derived`."""
super().test_Parameter_derived(cosmo_cls, param)
assert param.derived is False
# -------------------------------------------
# descriptor methods
def test_Parameter_descriptor_get(self, cosmo_cls, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.__get__`."""
super().test_Parameter_descriptor_get(cosmo_cls, cosmo, param)
# from instance
value = getattr(cosmo, param.name)
assert value == 15 * u.m
# -------------------------------------------
# validation
def test_Parameter_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.validator`."""
for k in Parameter._registry_validators:
newparam = param.validator(k)
assert newparam.fvalidate == newparam._registry_validators[k]
# error for non-registered str
with pytest.raises(ValueError, match="`fvalidate`, if str"):
Parameter(fvalidate="NOT REGISTERED")
# error if wrong type
with pytest.raises(TypeError, match="`fvalidate` must be a function or"):
Parameter(fvalidate=object())
def test_Parameter_validate(self, cosmo, param):
"""Test :meth:`astropy.cosmology.Parameter.validate`."""
value = param.validate(cosmo, 1000 * u.m)
# whether has custom validator
if param.fvalidate is param._registry_validators["default"]:
assert value.unit == u.m
assert value.value == 1000
else:
assert value.unit == u.km
assert value.value == 1
def test_Parameter_register_validator(self, param):
"""Test :meth:`astropy.cosmology.Parameter.register_validator`."""
# already registered
with pytest.raises(KeyError, match="validator 'default' already"):
param.__class__.register_validator("default", None)
# validator not None
try:
func = lambda x: x
validator = param.__class__.register_validator("newvalidator", func)
assert validator is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# used as decorator
try:
@param.__class__.register_validator("newvalidator")
def func(cosmology, param, value):
return value
assert param.__class__._registry_validators["newvalidator"] is func
finally:
param.__class__._registry_validators.pop("newvalidator", None)
# -------------------------------------------
def test_Parameter_clone(self, param):
"""Test :meth:`astropy.cosmology.Parameter.clone`."""
# this implicitly relies on `__eq__` testing properly. Which is tested.
# basic test that nothing changes
assert param.clone() == param
assert param.clone() is not param # but it's not a 'singleton'
# passing kwargs will change stuff
newparam = param.clone(unit="km/(yr sr)")
assert newparam.unit == u.km / u.yr / u.sr
assert param.unit != u.km / u.yr / u.sr # original is unchanged
# expected failure for not-an-argument
with pytest.raises(TypeError):
param.clone(not_a_valid_parameter=True)
# -------------------------------------------
def test_Parameter_equality(self):
"""
Test Parameter equality.
Determined from the processed initialization args (including defaults).
"""
p1 = Parameter(unit="km / (s Mpc)")
p2 = Parameter(unit="km / (s Mpc)")
assert p1 == p2
# not equal parameters
p3 = Parameter(unit="km / s")
assert p3 != p1
# misc
assert p1 != 2 # show doesn't error
# -------------------------------------------
def test_Parameter_repr(self, cosmo_cls, param):
"""Test Parameter repr."""
r = repr(param)
assert "Parameter(" in r
for subs in ("derived=False", 'unit=Unit("m")', 'equivalencies=[(Unit("kg"), Unit("J")',
"fmt=''", "doc='Description of example parameter.'"):
assert subs in r, subs
# `fvalidate` is a little tricker b/c one of them is custom!
if param.fvalidate in param._registry_validators.values(): # not custom
assert "fvalidate='default'" in r
else:
assert "fvalidate=<" in r # Some function, don't care about details.
def test_Parameter_repr_roundtrip(self, param):
"""Test ``eval(repr(Parameter))`` can round trip to ``Parameter``."""
P = Parameter(doc="A description of this parameter.", derived=True)
NP = eval(repr(P)) # Evaluate string representation back into a param.
assert P == NP
# ==============================================================
def test_Parameter_doesnt_change_with_generic_class(self):
"""Descriptors are initialized once and not updated on subclasses."""
class ExampleBase:
def __init__(self, param=15):
self._param = param
sig = inspect.signature(__init__)
_init_signature = sig.replace(parameters=list(sig.parameters.values())[1:])
param = Parameter(doc="example parameter")
class Example(ExampleBase): pass
assert Example.param is ExampleBase.param
def test_Parameter_doesnt_change_with_cosmology(self, cosmo_cls):
"""Cosmology reinitializes all descriptors when a subclass is defined."""
# define subclass to show param is same
class Example(cosmo_cls): pass
assert Example.param is cosmo_cls.param
# unregister
_COSMOLOGY_CLASSES.pop(Example.__qualname__)
assert Example.__qualname__ not in _COSMOLOGY_CLASSES
|
6ecf70bedf368be2ee3fea8c6863036e7805629c8f33398bcff6bc377ca26070 | # -*- coding: utf-8 -*-
"""Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh ** -2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift ** 3) == val
# and in composite units
assert (3 * u.km / cu.redshift ** 3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km/u.s/u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
@pytest.mark.parametrize(
"kind",
[cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"]
)
def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
"""Test `astropy.cosmology.units.with_redshift`."""
@pytest.fixture(scope="class")
def cosmo(self):
"""Test cosmology."""
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
"""The default is different than the test cosmology."""
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test ``with_redshift`` with the temperature off."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test temperature equivalency component."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# -------------------------------------------
def test_hubble_off(self, cosmo):
"""Test ``with_redshift`` with Hubble off."""
unit = u.km / u.s / u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
def test_hubble(self, cosmo):
"""Test Hubble equivalency component."""
unit = u.km/u.s/u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(hubble=True)
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h
# -------------------------------------------
def test_distance_off(self, cosmo):
"""Test ``with_redshift`` with the distance off."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
def test_distance_default(self):
"""Test distance equivalency default."""
z = 15 * cu.redshift
d = default_cosmology.get().comoving_distance(z)
equivalency = cu.with_redshift()
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_distance_wrong_kind(self):
"""Test distance equivalency, but the wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.with_redshift(distance=ValueError)
@pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"])
def test_distance(self, kind):
"""Test distance equivalency."""
cosmo = Planck13
z = 15 * cu.redshift
dist = getattr(cosmo, kind + "_distance")(z)
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
# 1) without specifying the cosmology
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency),
getattr(default_cosmo, kind + "_distance")(z))
assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# Test atzkw
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10})
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# FIXME! get "dimensionless_redshift", "with_redshift" to work in this
# they are not in ``astropy.units.equivalencies``, so the following fails
@pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF")
@pytest.mark.parametrize("equiv", [cu.with_H0])
def test_equivalencies_asdf(tmpdir, equiv, recwarn):
from asdf.tests import helpers
tree = {"equiv": equiv()}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
|
c575071a35eb4bc3a079f1893681914ed8c00558a8e5dfe735470ea8d7a3d204 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import os
import pytest
from astropy import cosmology
from astropy.cosmology import Cosmology, w0wzCDM
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from astropy.cosmology.io.tests import (test_cosmology, test_ecsv, test_json, test_mapping,
test_model, test_row, test_table, test_yaml)
from astropy.table import QTable, Row
###############################################################################
# SETUP
cosmo_instances = cosmology.realizations.available
# Collect the registered read/write formats.
readwrite_formats = {"ascii.ecsv", "json"}
# Collect all the registered to/from formats. Unfortunately this is NOT
# automatic since the output format class is not stored on the registry.
# (format, data type)
tofrom_formats = [("mapping", dict), ("yaml", str),
("astropy.cosmology", Cosmology),
("astropy.row", Row), ("astropy.table", QTable)]
###############################################################################
class ReadWriteTestMixin(test_ecsv.ReadWriteECSVTestMixin, test_json.ReadWriteJSONTestMixin):
"""
Tests for a CosmologyRead/Write on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestReadWriteCosmology`` or ``TestCosmology`` for examples.
"""
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_complete_info(self, cosmo, tmpdir, format):
"""
Test writing from an instance and reading from the base class.
This requires full information.
The round-tripped metadata can be in a different order, so the
OrderedDict must be converted to a dict before testing equality.
"""
fname = str(tmpdir / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# Also test kwarg "overwrite"
assert os.path.exists(fname) # file exists
with pytest.raises(IOError):
cosmo.write(fname, format=format, overwrite=False)
assert os.path.exists(fname) # overwrite file existing file
cosmo.write(fname, format=format, overwrite=True)
# Read back
got = Cosmology.read(fname, format=format)
assert got == cosmo
assert dict(got.meta) == dict(cosmo.meta)
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_from_subclass_complete_info(self, cosmo_cls, cosmo, tmpdir, format):
"""
Test writing from an instance and reading from that class, when there's
full information saved.
"""
fname = str(tmpdir / f"{cosmo.name}.{format}")
cosmo.write(fname, format=format)
# read with the same class that wrote.
got = cosmo_cls.read(fname, format=format)
assert got == cosmo
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.read(fname, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyReadWrite(ReadWriteTestMixin):
"""Test the classes CosmologyRead/Write."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format", readwrite_formats)
def test_write_methods_have_explicit_kwarg_overwrite(self, format):
writer = readwrite_registry.get_writer(format, Cosmology)
# test in signature
sig = inspect.signature(writer)
assert "overwrite" in sig.parameters
# also in docstring
assert "overwrite : bool" in writer.__doc__
@pytest.mark.parametrize("format", readwrite_formats)
def test_readwrite_reader_class_mismatch(self, cosmo, tmpdir, format):
"""Test when the reader class doesn't match the file."""
fname = tmpdir / f"{cosmo.name}.{format}"
cosmo.write(str(fname), format=format)
# class mismatch
# when reading directly
with pytest.raises(TypeError, match="missing 1 required"):
w0wzCDM.read(fname, format=format)
with pytest.raises(TypeError, match="missing 1 required"):
Cosmology.read(fname, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.read(fname, format=format, cosmology="FlatLambdaCDM")
###############################################################################
# To/From_Format Tests
class ToFromFormatTestMixin(test_cosmology.ToFromCosmologyTestMixin,
test_mapping.ToFromMappingTestMixin, test_model.ToFromModelTestMixin,
test_row.ToFromRowTestMixin, test_table.ToFromTableTestMixin,
test_yaml.ToFromYAMLTestMixin):
"""
Tests for a Cosmology[To/From]Format on a |Cosmology|.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_tofromformat_complete_info(self, cosmo, format, totype,
xfail_if_not_registered_with_yaml):
"""Read tests happen later."""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# test from_format
got = Cosmology.from_format(obj, format=format)
# Test autodetect, if enabled
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj)
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
@pytest.mark.parametrize("format, totype", tofrom_formats)
def test_fromformat_subclass_complete_info(self, cosmo_cls, cosmo, format, totype,
xfail_if_not_registered_with_yaml):
"""
Test transforming an instance and parsing from that class, when there's
full information available.
Partial information tests are handled in the Mixin super classes.
"""
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# read with the same class that wrote.
got = cosmo_cls.from_format(obj, format=format)
if self.can_autodentify(format):
got2 = Cosmology.from_format(obj) # and autodetect
assert got2 == got # internal consistency
assert got == cosmo # external consistency
assert got.meta == cosmo.meta
# this should be equivalent to
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls)
assert got == cosmo
assert got.meta == cosmo.meta
# and also
got = Cosmology.from_format(obj, format=format, cosmology=cosmo_cls.__qualname__)
assert got == cosmo
assert got.meta == cosmo.meta
class TestCosmologyToFromFormat(ToFromFormatTestMixin):
"""Test Cosmology[To/From]Format classes."""
@pytest.fixture(scope="class", params=cosmo_instances)
def cosmo(self, request):
return getattr(cosmology.realizations, request.param)
@pytest.fixture(scope="class")
def cosmo_cls(self, cosmo):
return cosmo.__class__
# ==============================================================
@pytest.mark.parametrize("format_type", tofrom_formats)
def test_fromformat_class_mismatch(self, cosmo, format_type):
format, totype = format_type
# test to_format
obj = cosmo.to_format(format)
assert isinstance(obj, totype)
# class mismatch
with pytest.raises(TypeError):
w0wzCDM.from_format(obj, format=format)
with pytest.raises(TypeError):
Cosmology.from_format(obj, format=format, cosmology=w0wzCDM)
# when specifying the class
with pytest.raises(ValueError, match="`cosmology` must be either"):
w0wzCDM.from_format(obj, format=format, cosmology="FlatLambdaCDM")
|
f4ef22fda5e24850eebc91a465f289c4db33d5e8178482cc7e99d24398de181e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import inspect
import pickle
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0, 1, 1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift, 3 * u.one # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class TestCosmology(ParameterTestMixin, MetaTestMixin,
ReadWriteTestMixin, ToFromFormatTestMixin,
metaclass=abc.ABCMeta):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(cosmo._init_signature.parameters.keys())
assert all(np.all(sig.parameters[k].default == p.default) for k, p in
cosmo._init_signature.parameters.items())
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
with pytest.raises(AttributeError, match="can't set"):
cosmo.name = None
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
newclone = cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1:] # remove
# name in string rep
if cosmo.name is not None:
assert f"name=\"{cosmo.name}\"" in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3:] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = format(v, cps[k].format_spec if v is not None else '')
assert (k + '=' + sv) in r
assert r.index(k) == 0
r = r[len((k + '=' + sv)) + 2:] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class CosmologySubclassTest(TestCosmology):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
This is broken away from ``TestCosmology``, because |Cosmology| is/will be
an ABC and subclasses must override some methods.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# instance-level
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologySubclassTest.test_is_equivalent(self, cosmo)
# -----------------------------------------------------------------------------
def test_flrw_moved_deprecation():
"""Test the deprecation warning about the move of FLRW classes."""
from astropy.cosmology import flrw
# it's deprecated to import `flrw/*` from `core.py`
with pytest.warns(AstropyDeprecationWarning):
from astropy.cosmology.core import FLRW
# but they are the same object
assert FLRW is flrw.FLRW
|
8eb35001b8706ab4072b3dc8a5e62632b332d51bfffec01eb770a06d73af83f6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the cosmology test suite
from the installed astropy. It makes use of the `pytest`_ testing framework.
"""
##############################################################################
# IMPORTS
# STDLIB
import inspect
# THIRD PARTY
import pytest
# LOCAL
from astropy.cosmology import core
__all__ = ["get_redshift_methods", "clean_registry"]
###############################################################################
# FUNCTIONS
def get_redshift_methods(cosmology, include_private=True, include_z2=True):
"""Get redshift methods from a cosmology.
Parameters
----------
cosmology : |Cosmology| class or instance
include_private : bool
Whether to include private methods, i.e. starts with an underscore.
include_z2 : bool
Whether to include methods that are functions of 2 (or more) redshifts,
not the more common 1 redshift argument.
Returns
-------
set[str]
The names of the redshift methods on `cosmology`, satisfying
`include_private` and `include_z2`.
"""
# Get all the method names, optionally sieving out private methods
methods = set()
for n in dir(cosmology):
try: # get method, some will error on ABCs
m = getattr(cosmology, n)
except NotImplementedError:
continue
# Add anything callable, optionally excluding private methods.
if callable(m) and (not n.startswith('_') or include_private):
methods.add(n)
# Sieve out incompatible methods.
# The index to check for redshift depends on whether cosmology is a class
# or instance and does/doesn't include 'self'.
iz1 = 1 if inspect.isclass(cosmology) else 0
for n in tuple(methods):
try:
sig = inspect.signature(getattr(cosmology, n))
except ValueError: # Remove non-introspectable methods.
methods.discard(n)
continue
else:
params = list(sig.parameters.keys())
# Remove non redshift methods:
if len(params) <= iz1: # Check there are enough arguments.
methods.discard(n)
elif len(params) >= iz1 + 1 and not params[iz1].startswith("z"): # First non-self arg is z.
methods.discard(n)
# If methods with 2 z args are not allowed, the following arg is checked.
elif not include_z2 and (len(params) >= iz1 + 2) and params[iz1 + 1].startswith("z"):
methods.discard(n)
return methods
###############################################################################
# FIXTURES
@pytest.fixture
def clean_registry():
"""`pytest.fixture` for clearing and restoring ``_COSMOLOGY_CLASSES``."""
# TODO! with monkeypatch instead for thread safety.
ORIGINAL_COSMOLOGY_CLASSES = core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = {} # set as empty dict
yield core._COSMOLOGY_CLASSES
core._COSMOLOGY_CLASSES = ORIGINAL_COSMOLOGY_CLASSES
|
a260de3f66e897b040af27ee0bdf20c5b5c128f02bfba07e547622ca68dc1a5a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Stand-alone overall systems tests for :mod:`astropy.cosmology`."""
from io import StringIO
import numpy as np
import pytest
import astropy.constants as const
import astropy.units as u
from astropy.cosmology import flrw
from astropy.cosmology.realizations import Planck18
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_flat_z1():
"""Test a flat cosmology at z=1 against several other on-line calculators.
Test values were taken from the following web cosmology calculators on
2012-02-11:
Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
(https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
Kempner: http://www.kempner.net/cosmic.php
iCosmos: http://www.icosmos.co.uk/index.html
"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(1),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(1),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(1),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(1),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(1),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy.")
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a Mathematica computation."""
z = np.array([0.2, 0.4, 0.9, 1.2])
# w0wa models
cosmo = flrw.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
###############################################################################
# TODO! sort and refactor following tests.
# overall systems tests stay here, specific tests go to new test suite.
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
flrw.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
flrw.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
flrw.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
flrw.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
def test_equality():
"""Test equality and equivalence."""
# mismatched signatures, both directions.
newcosmo = flrw.w0waCDM(**Planck18._init_arguments, Ode0=0.6)
assert newcosmo != Planck18
assert Planck18 != newcosmo
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = flrw.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(flrw.FLRW):
def __init__(self):
super().__init__(70.0, 0.27, 0.73, Tcmb0=0.0, name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(flrw.FLRW):
def __init__(self):
super().__init__(70.0, 0.27, 0.73, Tcmb0=3.0, m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = flrw.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = flrw.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = flrw.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = flrw.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparison is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = flrw.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = flrw.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = flrw.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = flrw.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = flrw.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = flrw.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
from astropy.constants import codata2014
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py.
# critical_density0 is inversely proportional to G.
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
fac = (const.G / codata2014.G).to(u.dimensionless_unscaled).value
assert allclose(tcos.critical_density0 * fac,
9.309668456020899e-30 * (u.g / u.cm**3))
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(
tcos.critical_density([1, 5]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))
assert allclose(
tcos.critical_density([1., 5.]) * fac,
[2.70352772e-29, 5.53739080e-28] * (u.g / u.cm**3))
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = flrw.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = flrw.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = flrw.LambdaCDM(100, 0, 1, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = flrw.LambdaCDM(100, 1, 0, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = flrw.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = flrw.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = flrw.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = flrw.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 2 # Separate test for z2<z1, returns negative value with warning
z2 = 1
results = -969.34452994 * u.Mpc
with pytest.warns(AstropyUserWarning, match='less than first redshift'):
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2), results)
z1 = 0, 0, 0.5, 1
z2 = 2, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = flrw.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = flrw.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = flrw.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = flrw.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = flrw.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = flrw.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = flrw.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = flrw.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = flrw.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = flrw.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = flrw.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = flrw.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = flrw.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = flrw.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = flrw.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = flrw.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = flrw.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = flrw.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = flrw.LambdaCDM(70., 2.3, 0.05, Tcmb0=0)
z = 0.2
assert allclose(cosmo.comoving_distance(z),
cosmo._integral_comoving_distance_z1z2(0., z))
assert allclose(cosmo._elliptic_comoving_distance_z1z2(0., z),
cosmo._integral_comoving_distance_z1z2(0., z))
SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES = [
flrw.FlatLambdaCDM(H0=70, Om0=0.0, Tcmb0=0.0), # de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=1.0, Tcmb0=0.0), # Einstein - de Sitter
flrw.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0.0), # Hypergeometric
flrw.LambdaCDM(H0=70, Om0=0.3, Ode0=0.6, Tcmb0=0.0), # Elliptic
]
ITERABLE_REDSHIFTS = [
(0, 1, 2, 3, 4), # tuple
[0, 1, 2, 3, 4], # list
np.array([0, 1, 2, 3, 4]), # array
]
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
@pytest.mark.parametrize('z', ITERABLE_REDSHIFTS)
def test_comoving_distance_iterable_argument(cosmo, z):
"""
Regression test for #10980
Test that specialized comoving distance methods handle iterable arguments.
"""
assert allclose(cosmo.comoving_distance(z),
cosmo._integral_comoving_distance_z1z2(0., z))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('cosmo', SPECIALIZED_COMOVING_DISTANCE_COSMOLOGIES)
def test_comoving_distance_broadcast(cosmo):
"""
Regression test for #10980
Test that specialized comoving distance methods broadcast array arguments.
"""
z1 = np.zeros((2, 5))
z2 = np.ones((3, 1, 5))
z3 = np.ones((7, 5))
output_shape = np.broadcast(z1, z2).shape
# Check compatible array arguments return an array with the correct shape
assert cosmo._comoving_distance_z1z2(z1, z2).shape == output_shape
# Check incompatible array arguments raise an error
with pytest.raises(ValueError, match='z1 and z2 have different shapes'):
cosmo._comoving_distance_z1z2(z1, z3)
|
77f6d73295df0b542b17ea700073cc6c2e4ed1c30586cf63da04b5759ee50357 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pickle
# THIRD PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy import cosmology
from astropy.cosmology import parameters, realizations
from astropy.cosmology.realizations import Planck13, default_cosmology
def test_realizations_in_toplevel_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology`."""
d = dir(cosmology)
assert set(d) == set(cosmology.__all__)
for n in parameters.available:
assert n in d
def test_realizations_in_realizations_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`."""
d = dir(realizations)
assert set(d) == set(realizations.__all__)
for n in parameters.available:
assert n in d
class Test_default_cosmology(object):
"""Tests for :class:`~astropy.cosmology.realizations.default_cosmology`."""
# -----------------------------------------------------
# Get
def test_get_fail(self):
"""Test bad inputs to :meth:`astropy.cosmology.default_cosmology.get`."""
# a not-valid option, but still a str
with pytest.raises(ValueError, match="Unknown cosmology"):
cosmo = default_cosmology.get("fail!")
# a not-valid type
with pytest.raises(TypeError, match="'key' must be must be"):
cosmo = default_cosmology.get(object())
def test_get_current(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` current value."""
cosmo = default_cosmology.get(None)
assert cosmo is default_cosmology.get(default_cosmology._value)
def test_get_none(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` to `None`."""
cosmo = default_cosmology.get("no_default")
assert cosmo is None
@pytest.mark.parametrize("name", parameters.available)
def test_get_valid(self, name):
"""Test :meth:`astropy.cosmology.default_cosmology.get` from str."""
cosmo = default_cosmology.get(name)
assert cosmo is getattr(realizations, name)
def test_get_cosmology_from_string(self, recwarn):
"""Test method ``get_cosmology_from_string``."""
cosmo = default_cosmology.get_cosmology_from_string("no_default")
assert cosmo is None
cosmo = default_cosmology.get_cosmology_from_string("Planck13")
assert cosmo is Planck13
with pytest.raises(ValueError):
cosmo = default_cosmology.get_cosmology_from_string("fail!")
# -----------------------------------------------------
# Validate
def test_validate_fail(self):
"""Test :meth:`astropy.cosmology.default_cosmology.validate`."""
# bad input type
with pytest.raises(TypeError, match="must be a string or Cosmology"):
default_cosmology.validate(TypeError)
def test_validate_default(self):
"""Test method ``validate`` for specific values."""
value = default_cosmology.validate(None)
assert value is realizations.Planck18
@pytest.mark.parametrize("name", parameters.available)
def test_validate_str(self, name):
"""Test method ``validate`` for string input."""
value = default_cosmology.validate(name)
assert value is getattr(realizations, name)
@pytest.mark.parametrize("name", parameters.available)
def test_validate_cosmo(self, name):
"""Test method ``validate`` for cosmology instance input."""
cosmo = getattr(realizations, name)
value = default_cosmology.validate(cosmo)
assert value is cosmo
@pytest.mark.parametrize("name", parameters.available)
def test_pickle_builtin_realizations(name, pickle_protocol):
"""
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008.
"""
# get class instance
original = getattr(cosmology, name)
# pickle and unpickle
f = pickle.dumps(original, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta == original.meta
# if the units are not enabled, it isn't equal because redshift units
# are not equal. This is a weird, known issue.
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta != original.meta
|
d88a30bf375a88ee370bbfd27c7ada33aa9a157fb080977309d39dc180246501 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value
from astropy.cosmology.realizations import (WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15,
Planck18)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3, rtol=1e-9)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=2),
0.681277696, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=2.5),
3.7914908, rtol=1e-6)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182], rtol=1e-6)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5], zmax=[2, 4]),
[0.681277696, 3.7914908], rtol=1e-6)
# more interesting broadcast
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[[0, 2.5]], zmax=[2, 4]),
[[0.681277696, 3.7914908]], rtol=1e-6)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115, rtol=1e-6)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115, rtol=1e-6)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket=np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115], rtol=1e-6)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5, 0.1], zmax=[2, 4])
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=0, zmax=2)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(_z_at_scalar_value,
excluded=["func", "method", "verbose"])
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10*u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, 'stdout', mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == 'Bounded':
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z = z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=bracket), z, rtol=1e-6)
else:
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.3, 1.0)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(2.0, 4.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.0, 2.0)), 0.6812777, rtol=1e-6)
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0), zmax=1.6), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5), zmin=1.5), 3.7914908, rtol=1e-6)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(3.9, 5.0), zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {'Brent': [1e-4, 1e-4], 'Golden': [1e-3, 1e-2], 'Bounded': [1e-3, 1e-1]}
if method == 'Bounded':
ctx = pytest.warns(AstropyUserWarning, match='Solver returned 1: Maximum number of '
'function calls reached')
else:
ctx = pytest.warns(AstropyUserWarning, match='Solver returned None')
with ctx:
z0 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmax=2, maxfun=13, method=method)
with ctx:
z1 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmin=2, maxfun=13, method=method)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('cosmo', [Planck13, Planck15, Planck18, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9,
flrw.LambdaCDM, flrw.FlatLambdaCDM, flrw.wpwaCDM, flrw.w0wzCDM,
flrw.wCDM, flrw.FlatwCDM, flrw.w0waCDM, flrw.Flatw0waCDM])
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = ('Ok', 'Otot',
'angular_diameter_distance_z1z2',
'clone', 'is_equivalent',
'de_density_scale', 'w')
if str(cosmo.name).startswith('WMAP'):
skip += ('nu_relative_density', )
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f'Round-trip testing {name} failed'
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2)
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
|
749598750c789626996fc599a4292c4b727eacbc3bafb685a9ef3fc67e147254 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Astropy FLRW classes."""
from . import base, lambdacdm, w0cdm, w0wacdm, w0wzcdm, wpwazpcdm
from .base import * # noqa: F401, F403
from .lambdacdm import * # noqa: F401, F403
from .w0cdm import * # noqa: F401, F403
from .w0wacdm import * # noqa: F401, F403
from .w0wzcdm import * # noqa: F401, F403
from .wpwazpcdm import * # noqa: F401, F403
# isort: split
# Importing private API for backward compatibility.
from .base import (H0units_to_invs, a_B_c2, critdens_const, kB_evK, quad, radian_in_arcmin,
radian_in_arcsec, sec_to_Gyr)
from .lambdacdm import ellipkinc, hyp2f1
__all__ = (base.__all__ + lambdacdm.__all__ + w0cdm.__all__ + w0wacdm.__all__
+ wpwazpcdm.__all__ + w0wzcdm.__all__)
|
Subsets and Splits