hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
5e282234101ff56e5e1a22a9023d6d7e5b702f31b8431fafd63c5c8b893fc905 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import numpy as np
from astropy.io import fits
from . import FitsTestCase
class TestDivisionFunctions(FitsTestCase):
"""Test code units that rely on correct integer division."""
def test_rec_from_string(self):
with fits.open(self.data('tb.fits')) as t1:
s = t1[1].data.tobytes()
np.rec.array(
s,
dtype=np.dtype([('c1', '>i4'), ('c2', '|S3'),
('c3', '>f4'), ('c4', '|i1')]),
shape=len(s) // 12)
def test_card_with_continue(self):
h = fits.PrimaryHDU()
h.header['abc'] = 'abcdefg' * 20
def test_valid_hdu_size(self):
with fits.open(self.data('tb.fits')) as t1:
assert type(t1[1].size) is int # noqa
def test_hdu_get_size(self):
with fits.open(self.data('tb.fits')) as _:
pass
def test_section(self, capsys):
# section testing
with fits.open(self.data('arange.fits')) as fs:
assert np.all(fs[0].section[3, 2, 5] == np.array([357]))
|
f8b296713800bc23e4b05341fbe1a33abf1ac4c8402b312e7ad8f1096e044924 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import sys
import copy
import subprocess
import pytest
import numpy as np
from astropy.io.fits.hdu.base import _ValidHDU, _NonstandardHDU
from astropy.io.fits.verify import VerifyError, VerifyWarning
from astropy.io import fits
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filenames
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
with fits.open(self.data('checksum.fits')) as hdul:
res = hdul.fileinfo(0)
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
with fits.open(self.data('tb.fits')) as hdul1:
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
with fits.open(self.data('arange.fits')) as hdul:
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
with pytest.raises(ValueError):
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
with fits.open(self.data('tb.fits')) as hdul:
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
with fits.open(self.data('tb.fits')) as hdul:
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
with fits.open(self.data('tb.fits')) as hdul:
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
with fits.open(self.data('test0.fits')) as f:
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdu = fits.ImageHDU(header=f[1].header)
hdul.append(hdu)
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
assert hdul.index_of(hdu) == len(hdul) - 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header['FOO'] = 'BAR'
with pytest.warns(AstropyUserWarning, match='mode is not supported') as w:
hdul.flush()
assert len(w) == 1
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
hdu.close()
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
hdu.close()
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
with fits.open(self.temp('temp.fits'), memmap=True) as hdul:
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_bad_file_padding(self):
"""
Test warning when opening files with extra padding at the end.
See https://github.com/astropy/astropy/issues/4351
"""
# write some arbitrary data to a FITS file
fits.writeto(self.temp('temp.fits'), np.arange(100))
# append some arbitrary number of zeros to the end
with open(self.temp('temp.fits'), 'ab') as fobj:
fobj.write(b'\x00' * 1234)
with pytest.warns(
AstropyUserWarning,
match='Unexpected extra padding at the end of the file.'
) as w:
with fits.open(self.temp('temp.fits')) as fobj:
fobj.info()
assert len(w) == 1
@pytest.mark.filterwarnings('ignore:Unexpected extra padding')
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
with fits.open(self.data('test0.fits'),
do_not_scale_image_data=True) as hdul:
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write(b'\0' * padding_len)
with pytest.warns(AstropyUserWarning, match='contains null bytes instead of spaces') as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header[f'TEST{idx}'] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header[f'TEST{idx}'] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header[f'TEST{idx}'] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in get_pkg_data_filenames('data', pattern='*.fits'):
if sys.platform == 'win32' and filename.endswith('zerowidth.fits'):
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith(('variable_length_table.fits',
'theap-gap.fits')):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
@pytest.mark.filterwarnings('ignore:Saving a backup')
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
# This emits warning that needs to be ignored at the
# pytest.mark.filterwarnings level.
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[0].data = hdul_b[0].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert np.all(hdul_a[0].data == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
with fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a) as hdul_a:
with fits.open(self.temp('test_b.fits'),
memmap=mmap_b) as hdul_b:
hdul_a[1].data = hdul_b[1].data
with fits.open(self.temp('test_a.fits')) as hdul_a:
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
with fits.open(self.data('o4sp040b0_raw.fits')) as hdulist:
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
assert hdulist[0] in hdulist
assert fits.ImageHDU() not in hdulist
def test_overwrite(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
f.close()
def test_read_non_standard_hdu(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
buf.seek(0)
hdustr = buf.read()
hdustr = hdustr.replace(b'SIMPLE = T',
b'SIMPLE = F')
with open(filename, mode='wb') as f:
f.write(hdustr)
with fits.open(filename) as hdul:
assert isinstance(hdul[0], _NonstandardHDU)
assert hdul[0].header['FOO'] == 'BAR'
def test_proper_error_raised_on_non_fits_file(self):
filename = self.temp('not-fits.fits')
with open(filename, mode='w', encoding='utf=8') as f:
f.write('Not a FITS file')
match = ('No SIMPLE card found, this file '
'does not appear to be a valid FITS file')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode='append')
with pytest.raises(OSError, match=match):
fits.open(filename, mode='update')
def test_proper_error_raised_on_invalid_fits_file(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
# write 80 additional bytes so the block will have the correct size
buf.write(b' '*80)
buf.seek(0)
buf.seek(80) # now remove the SIMPLE card
with open(filename, mode='wb') as f:
f.write(buf.read())
match = ('No SIMPLE card found, this file '
'does not appear to be a valid FITS file')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match=match):
fits.open(filename)
with pytest.raises(OSError, match=match):
fits.open(filename, mode='append')
with pytest.raises(OSError, match=match):
fits.open(filename, mode='update')
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header['FOO'] == 'BAR'
def test_warning_raised_on_non_standard_simple_card(self):
filename = self.temp('bad-fits.fits')
hdu = fits.PrimaryHDU()
hdu.header['FOO'] = 'BAR'
buf = io.BytesIO()
hdu.writeto(buf)
# change the simple card format
buf.seek(0)
buf.write(b'SIMPLE = T ')
buf.seek(0)
with open(filename, mode='wb') as f:
f.write(buf.read())
match = ("Found a SIMPLE card but its format doesn't"
" respect the FITS Standard")
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode='append')
with pytest.warns(VerifyWarning, match=match):
fits.open(filename, mode='update')
with fits.open(filename, ignore_missing_simple=True) as hdul:
assert isinstance(hdul[0], _ValidHDU)
assert hdul[0].header['FOO'] == 'BAR'
# change the simple card format
buf.seek(0)
buf.write(b'SIMPLE = T / This is a FITS file')
buf.seek(0)
with open(filename, mode='wb') as f:
f.write(buf.read())
with pytest.warns(VerifyWarning, match=match):
fits.open(filename)
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
filename = self.temp('not-fits-with-unicode.fits')
with open(filename, mode='w', encoding='utf=8') as f:
f.write('Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError, match='No SIMPLE card found, this file '
'does not appear to be a valid FITS file'):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised.
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError), pytest.warns(VerifyWarning):
fits.open(filename, ignore_missing_end=True)
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
with fits.open(filename) as hdul:
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
with fits.open(filename) as hdul2:
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
with fits.open(filename) as hdul3:
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
with fits.open(self.data('o4sp040b0_raw.fits')) as hdul:
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
# Skip due to https://github.com/astropy/astropy/issues/8916
@pytest.mark.skipif('sys.platform.startswith("win32")')
def test_write_hdulist_to_stream(self):
"""
Unit test for https://github.com/astropy/astropy/issues/7435
to ensure that an HDUList can be written to a stream.
"""
data = np.array([[1, 2, 3], [4, 5, 6]])
hdu = fits.PrimaryHDU(data)
hdulist = fits.HDUList([hdu])
with open(self.temp('test.fits'), 'wb') as fout:
with subprocess.Popen(["cat"], stdin=subprocess.PIPE,
stdout=fout) as p:
hdulist.writeto(p.stdin)
def test_output_verify(self):
hdul = fits.HDUList([fits.PrimaryHDU()])
hdul[0].header['FOOBAR'] = 42
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
data = f.read()
# create invalid card
data = data.replace(b'FOOBAR =', b'FOOBAR = ')
with open(self.temp('test2.fits'), 'wb') as f:
f.write(data)
with pytest.raises(VerifyError):
with fits.open(self.temp('test2.fits'), mode='update') as hdul:
hdul[0].header['MORE'] = 'here'
with pytest.warns(VerifyWarning) as ww:
with fits.open(self.temp('test2.fits'), mode='update',
output_verify='fix+warn') as hdul:
hdul[0].header['MORE'] = 'here'
assert len(ww) == 6
msg = "Card 'FOOBAR ' is not FITS standard (equal sign not at column 8)"
assert msg in str(ww[3].message)
|
a1d4423cd9f4f189f236c66caee96b934da1c25e48e89519c7e97c44286c9e9a | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import collections
import warnings
from io import StringIO, BytesIO
import pytest
import numpy as np
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning, VerifyError
from astropy.utils.exceptions import AstropyUserWarning
from . import FitsTestCase
from astropy.io.fits.card import _pad
from astropy.io.fits.header import _pad_length
from astropy.io.fits.util import encode_ascii
def test_shallow_copy():
"""Make sure that operations on a shallow copy do not alter the original.
#4990."""
original_header = fits.Header([('a', 1), ('b', 1)])
copied_header = copy.copy(original_header)
# Modifying the original dict should not alter the copy
original_header['c'] = 100
assert 'c' not in copied_header
# and changing the copy should not change the original.
copied_header['a'] = 0
assert original_header['a'] == 1
def test_init_with_header():
"""Make sure that creating a Header from another Header makes a copy if
copy is True."""
original_header = fits.Header([('a', 10)])
new_header = fits.Header(original_header, copy=True)
original_header['a'] = 20
assert new_header['a'] == 10
new_header['a'] = 0
assert original_header['a'] == 20
def test_init_with_dict():
dict1 = {'a': 11, 'b': 12, 'c': 13, 'd': 14, 'e': 15}
h1 = fits.Header(dict1)
for i in dict1:
assert dict1[i] == h1[i]
def test_init_with_ordereddict():
# Create a list of tuples. Each tuple consisting of a letter and the number
list1 = [(i, j) for j, i in enumerate('abcdefghijklmnopqrstuvwxyz')]
# Create an ordered dictionary and a header from this dictionary
dict1 = collections.OrderedDict(list1)
h1 = fits.Header(dict1)
# Check that the order is preserved of the initial list
assert all(h1[val] == list1[i][1] for i, val in enumerate(h1))
class TestHeaderFunctions(FitsTestCase):
"""Test Header and Card objects."""
def test_rename_keyword(self):
"""Test renaming keyword with rename_keyword."""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
header.rename_keyword('A', 'B')
assert 'A' not in header
assert 'B' in header
assert header[0] == 'B'
assert header['B'] == 'B'
assert header.comments['B'] == 'C'
@pytest.mark.parametrize('key', ['A', 'a'])
def test_indexing_case(self, key):
"""Check that indexing is case insensitive"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
assert key in header
assert header[key] == 'B'
assert header.get(key) == 'B'
assert header.index(key) == 0
assert header.comments[key] == 'C'
assert header.count(key) == 1
header.remove(key, ignore_missing=False)
def test_card_constructor_default_args(self):
"""Test Card constructor with default argument values."""
c = fits.Card()
assert '' == c.keyword
def test_card_from_bytes(self):
"""
Test loading a Card from a `bytes` object (assuming latin-1 encoding).
"""
c = fits.Card.fromstring(b"ABC = 'abc'")
assert c.keyword == 'ABC'
assert c.value == 'abc'
def test_string_value_card(self):
"""Test Card constructor with string value"""
c = fits.Card('abc', '<8 ch')
assert str(c) == _pad("ABC = '<8 ch '")
c = fits.Card('nullstr', '')
assert str(c) == _pad("NULLSTR = ''")
def test_boolean_value_card(self):
"""Test Card constructor with boolean value"""
c = fits.Card("abc", True)
assert str(c) == _pad("ABC = T")
c = fits.Card.fromstring('ABC = F')
assert c.value is False
def test_long_integer_value_card(self):
"""Test Card constructor with long integer value"""
c = fits.Card('long_int', -467374636747637647347374734737437)
assert str(c) == _pad("LONG_INT= -467374636747637647347374734737437")
def test_floating_point_value_card(self):
"""Test Card constructor with floating point value"""
c = fits.Card('floatnum', -467374636747637647347374734737437.)
if (str(c) != _pad("FLOATNUM= -4.6737463674763E+32") and
str(c) != _pad("FLOATNUM= -4.6737463674763E+032")):
assert str(c) == _pad("FLOATNUM= -4.6737463674763E+32")
def test_complex_value_card(self):
"""Test Card constructor with complex value"""
c = fits.Card('abc',
(1.2345377437887837487e88 + 6324767364763746367e-33j))
f1 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
f2 = _pad("ABC = (1.2345377437887E+088, 6.3247673647637E-015)")
f3 = _pad("ABC = (1.23453774378878E+88, 6.32476736476374E-15)")
if str(c) != f1 and str(c) != f2:
assert str(c) == f3
def test_card_image_constructed_too_long(self):
"""Test that over-long cards truncate the comment"""
# card image constructed from key/value/comment is too long
# (non-string value)
c = fits.Card('abc', 9, 'abcde' * 20)
with pytest.warns(fits.verify.VerifyWarning):
assert (str(c) ==
"ABC = 9 "
"/ abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeab")
c = fits.Card('abc', 'a' * 68, 'abcdefg')
with pytest.warns(fits.verify.VerifyWarning):
assert str(c) == f"ABC = '{'a' * 68}'"
def test_constructor_filter_illegal_data_structures(self):
"""Test that Card constructor raises exceptions on bad arguments"""
pytest.raises(ValueError, fits.Card, ('abc',), {'value': (2, 3)})
pytest.raises(ValueError, fits.Card, 'key', [], 'comment')
def test_keyword_too_long(self):
"""Test that long Card keywords are allowed, but with a warning"""
pytest.warns(UserWarning, fits.Card, 'abcdefghi', 'long')
def test_illegal_characters_in_key(self):
"""
Test that Card constructor allows illegal characters in the keyword,
but creates a HIERARCH card.
"""
# This test used to check that a ValueError was raised, because a
# keyword like 'abc+' was simply not allowed. Now it should create a
# HIERARCH card.
with pytest.warns(AstropyUserWarning) as w:
c = fits.Card('abc+', 9)
assert len(w) == 1
assert c.image == _pad('HIERARCH abc+ = 9')
def test_add_history(self):
header = fits.Header([('A', 'B', 'C'), ('HISTORY', 1),
('HISTORY', 2), ('HISTORY', 3), ('', '', ''),
('', '', '')])
header.add_history(4)
# One of the blanks should get used, so the length shouldn't change
assert len(header) == 6
assert header.cards[4].value == 4
assert header['HISTORY'] == [1, 2, 3, 4]
assert repr(header['HISTORY']) == '1\n2\n3\n4'
header.add_history(0, after='A')
assert len(header) == 6
assert header.cards[1].value == 0
assert header['HISTORY'] == [0, 1, 2, 3, 4]
def test_add_blank(self):
header = fits.Header([('A', 'B', 'C'), ('', 1), ('', 2), ('', 3),
('', '', ''), ('', '', '')])
header.add_blank(4)
# This time a new blank should be added, and the existing blanks don't
# get used... (though this is really kinda sketchy--there's a
# distinction between truly blank cards, and cards with blank keywords
# that isn't currently made int he code)
assert len(header) == 7
assert header.cards[6].value == 4
assert header[''] == [1, 2, 3, '', '', 4]
assert repr(header['']) == '1\n2\n3\n\n\n4'
header.add_blank(0, after='A')
assert len(header) == 8
assert header.cards[1].value == 0
assert header[''] == [0, 1, 2, 3, '', '', 4]
header[''] = 5
header[' '] = 6
assert header[''] == [0, 1, 2, 3, '', '', 4, 5, 6]
assert header[' '] == [0, 1, 2, 3, '', '', 4, 5, 6]
def test_update(self):
class FakeHeader(list):
def keys(self):
return [l[0] for l in self]
def __getitem__(self, key):
return next(l[1:] for l in self if l[0] == key)
header = fits.Header()
header.update({'FOO': ('BAR', 'BAZ')})
header.update(FakeHeader([('A', 1), ('B', 2, 'comment')]))
assert set(header.keys()) == {'FOO', 'A', 'B'}
assert header.comments['B'] == 'comment'
# test that comments are preserved
tmphdr = fits.Header()
tmphdr['HELLO'] = (1, 'this is a comment')
header.update(tmphdr)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO'}
assert header.comments['HELLO'] == 'this is a comment'
header.update(NAXIS1=100, NAXIS2=100)
assert set(header.keys()) == {'FOO', 'A', 'B', 'HELLO', 'NAXIS1', 'NAXIS2'}
assert set(header.values()) == {'BAR', 1, 2, 100, 100}
def test_update_comment(self):
hdul = fits.open(self.data('arange.fits'))
hdul[0].header.update({'FOO': ('BAR', 'BAZ')})
assert hdul[0].header['FOO'] == 'BAR'
assert hdul[0].header.comments['FOO'] == 'BAZ'
with pytest.raises(ValueError):
hdul[0].header.update({'FOO2': ('BAR', 'BAZ', 'EXTRA')})
hdul.writeto(self.temp('test.fits'))
hdul.close()
hdul = fits.open(self.temp('test.fits'), mode='update')
hdul[0].header.comments['FOO'] = 'QUX'
hdul.close()
hdul = fits.open(self.temp('test.fits'))
assert hdul[0].header.comments['FOO'] == 'QUX'
hdul[0].header.add_comment(0, after='FOO')
assert str(hdul[0].header.cards[-1]).strip() == 'COMMENT 0'
hdul.close()
def test_commentary_cards(self):
# commentary cards
val = "A commentary card's value has no quotes around it."
c = fits.Card("HISTORY", val)
assert str(c) == _pad('HISTORY ' + val)
val = "A commentary card has no comment."
c = fits.Card("COMMENT", val, "comment")
assert str(c) == _pad('COMMENT ' + val)
def test_commentary_card_created_by_fromstring(self):
# commentary card created by fromstring()
c = fits.Card.fromstring(
"COMMENT card has no comments. "
"/ text after slash is still part of the value.")
assert (c.value == 'card has no comments. '
'/ text after slash is still part of the value.')
assert c.comment == ''
def test_commentary_card_will_not_parse_numerical_value(self):
# commentary card will not parse the numerical value
c = fits.Card.fromstring("HISTORY (1, 2)")
assert str(c) == _pad("HISTORY (1, 2)")
def test_equal_sign_after_column8(self):
# equal sign after column 8 of a commentary card will be part of the
# string value
c = fits.Card.fromstring("HISTORY = (1, 2)")
assert str(c) == _pad("HISTORY = (1, 2)")
def test_blank_keyword(self):
c = fits.Card('', ' / EXPOSURE INFORMATION')
assert str(c) == _pad(' / EXPOSURE INFORMATION')
c = fits.Card.fromstring(str(c))
assert c.keyword == ''
assert c.value == ' / EXPOSURE INFORMATION'
def test_specify_undefined_value(self):
# this is how to specify an undefined value
c = fits.Card("undef", fits.card.UNDEFINED)
assert str(c) == _pad("UNDEF =")
def test_complex_number_using_string_input(self):
# complex number using string input
c = fits.Card.fromstring('ABC = (8, 9)')
assert str(c) == _pad("ABC = (8, 9)")
def test_fixable_non_standard_fits_card(self, capsys):
# fixable non-standard FITS card will keep the original format
c = fits.Card.fromstring('abc = + 2.1 e + 12')
assert c.value == 2100000000000.0
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("ABC = +2.1E+12")
def test_fixable_non_fsc(self):
# fixable non-FSC: if the card is not parsable, it's value will be
# assumed
# to be a string and everything after the first slash will be comment
c = fits.Card.fromstring(
"no_quote= this card's value has no quotes "
"/ let's also try the comment")
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) == "NO_QUOTE= 'this card''s value has no quotes' "
"/ let's also try the comment ")
def test_undefined_value_using_string_input(self):
# undefined value using string input
c = fits.Card.fromstring('ABC = ')
assert str(c) == _pad("ABC =")
def test_undefined_keys_values(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['UNDEF'] = None
assert list(header.values()) == ['BAR', None]
assert list(header.items()) == [('FOO', 'BAR'), ('UNDEF', None)]
def test_mislocated_equal_sign(self, capsys):
# test mislocated "=" sign
c = fits.Card.fromstring('XYZ= 100')
assert c.keyword == 'XYZ'
assert c.value == 100
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert str(c) == _pad("XYZ = 100")
def test_equal_only_up_to_column_10(self, capsys):
# the test of "=" location is only up to column 10
# This test used to check if Astropy rewrote this card to a new format,
# something like "HISTO = '= (1, 2)". But since ticket #109 if the
# format is completely wrong we don't make any assumptions and the card
# should be left alone
c = fits.Card.fromstring("HISTO = (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad("HISTO = (1, 2)")
# Likewise this card should just be left in its original form and
# we shouldn't guess how to parse it or rewrite it.
c = fits.Card.fromstring(" HISTORY (1, 2)")
with pytest.warns(AstropyUserWarning,
match=r'header keyword is invalid'):
assert str(c) == _pad(" HISTORY (1, 2)")
def test_verify_invalid_equal_sign(self):
# verification
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning) as w:
c.verify()
err_text1 = ("Card 'ABC' is not FITS standard (equal sign not at "
"column 8)")
err_text2 = ("Card 'ABC' is not FITS standard (invalid value "
"string: 'a6'")
assert len(w) == 4
assert err_text1 in str(w[1].message)
assert err_text2 in str(w[2].message)
def test_fix_invalid_equal_sign(self):
fix_text = "Fixed 'ABC' card to meet the FITS standard."
c = fits.Card.fromstring('ABC= a6')
with pytest.warns(AstropyUserWarning, match=fix_text) as w:
c.verify('fix')
assert len(w) == 4
assert str(c) == _pad("ABC = 'a6 '")
def test_long_string_value(self):
# test long string value
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_long_string_value_with_multiple_long_words(self):
"""
Regression test for https://github.com/astropy/astropy/issues/11298
"""
c = fits.Card('WHATEVER',
'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_'
'03)-AAABBBCCC.n.h5 SuperNavigationParameters_XXXX_YYYY'
'_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.xml')
assert (str(c) ==
"WHATEVER= 'SuperCalibrationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n&'"
"CONTINUE '.h5 &' "
"CONTINUE 'SuperNavigationParameters_XXXX_YYYY_ZZZZZ_KK_01_02_03)-AAABBBCCC.n.&'"
"CONTINUE 'xml' ")
def test_long_unicode_string(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/1
So long as a unicode string can be converted to ASCII it should have no
different behavior in this regard from a byte string.
"""
h1 = fits.Header()
h1['TEST'] = 'abcdefg' * 30
h2 = fits.Header()
h2['TEST'] = 'abcdefg' * 30
assert str(h1) == str(h2)
def test_long_string_repr(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/193
Ensure that the __repr__() for cards represented with CONTINUE cards is
split across multiple lines (broken at each *physical* card).
"""
header = fits.Header()
header['TEST1'] = ('Regular value', 'Regular comment')
header['TEST2'] = ('long string value ' * 10, 'long comment ' * 10)
header['TEST3'] = ('Regular value', 'Regular comment')
assert (repr(header).splitlines() ==
[str(fits.Card('TEST1', 'Regular value', 'Regular comment')),
"TEST2 = 'long string value long string value long string value long string &' ",
"CONTINUE 'value long string value long string value long string value long &' ",
"CONTINUE 'string value long string value long string value &' ",
"CONTINUE '&' / long comment long comment long comment long comment long ",
"CONTINUE '&' / comment long comment long comment long comment long comment ",
"CONTINUE '' / long comment ",
str(fits.Card('TEST3', 'Regular value', 'Regular comment'))])
def test_blank_keyword_long_value(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/194
Test that a blank keyword ('') can be assigned a too-long value that is
continued across multiple cards with blank keywords, just like COMMENT
and HISTORY cards.
"""
value = 'long string value ' * 10
header = fits.Header()
header[''] = value
assert len(header) == 3
assert ' '.join(header['']) == value.rstrip()
# Ensure that this works like other commentary keywords
header['COMMENT'] = value
header['HISTORY'] = value
assert header['COMMENT'] == header['HISTORY']
assert header['COMMENT'] == header['']
def test_long_string_from_file(self):
c = fits.Card('abc', 'long string value ' * 10, 'long comment ' * 10)
hdu = fits.PrimaryHDU()
hdu.header.append(c)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
c = hdul[0].header.cards['abc']
hdul.close()
assert (str(c) ==
"ABC = 'long string value long string value long string value long string &' "
"CONTINUE 'value long string value long string value long string value long &' "
"CONTINUE 'string value long string value long string value &' "
"CONTINUE '&' / long comment long comment long comment long comment long "
"CONTINUE '&' / comment long comment long comment long comment long comment "
"CONTINUE '' / long comment ")
def test_word_in_long_string_too_long(self):
# if a word in a long string is too long, it will be cut in the middle
c = fits.Card('abc', 'longstringvalue' * 10, 'longcomment' * 10)
assert (str(c) ==
"ABC = 'longstringvaluelongstringvaluelongstringvaluelongstringvaluelongstr&'"
"CONTINUE 'ingvaluelongstringvaluelongstringvaluelongstringvaluelongstringvalu&'"
"CONTINUE 'elongstringvalue&' "
"CONTINUE '&' / longcommentlongcommentlongcommentlongcommentlongcommentlongcomme"
"CONTINUE '' / ntlongcommentlongcommentlongcommentlongcomment ")
def test_long_string_value_via_fromstring(self, capsys):
# long string value via fromstring() method
c = fits.Card.fromstring(
_pad("abc = 'longstring''s testing & ' "
"/ comments in line 1") +
_pad("continue 'continue with long string but without the "
"ampersand at the end' /") +
_pad("continue 'continue must have string value (with quotes)' "
"/ comments with ''. "))
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(c) ==
"ABC = 'longstring''s testing continue with long string but without the &' "
"CONTINUE 'ampersand at the endcontinue must have string value (with quotes)&' "
"CONTINUE '' / comments in line 1 comments with ''. ")
def test_continue_card_with_equals_in_value(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/117
"""
c = fits.Card.fromstring(
_pad("EXPR = '/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits * &'") +
_pad("CONTINUE '5.87359e-12 * MWAvg(Av=0.12)&'") +
_pad("CONTINUE '&' / pysyn expression"))
assert c.keyword == 'EXPR'
assert (c.value ==
'/grp/hst/cdbs//grid/pickles/dat_uvk/pickles_uk_10.fits '
'* 5.87359e-12 * MWAvg(Av=0.12)')
assert c.comment == 'pysyn expression'
def test_final_continue_card_lacks_ampersand(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
h = fits.Header()
h['SVALUE'] = 'A' * 69
assert repr(h).splitlines()[-1] == _pad("CONTINUE 'AA'")
def test_final_continue_card_ampersand_removal_on_long_comments(self):
"""
Regression test for https://github.com/astropy/astropy/issues/3282
"""
c = fits.Card('TEST', 'long value' * 10, 'long comment &' * 10)
assert (str(c) ==
"TEST = 'long valuelong valuelong valuelong valuelong valuelong valuelong &' "
"CONTINUE 'valuelong valuelong valuelong value&' "
"CONTINUE '&' / long comment &long comment &long comment &long comment &long "
"CONTINUE '&' / comment &long comment &long comment &long comment &long comment "
"CONTINUE '' / &long comment & ")
def test_hierarch_card_creation(self):
# Test automatic upgrade to hierarch card
with pytest.warns(AstropyUserWarning, match='HIERARCH card will be created') as w:
c = fits.Card('ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert len(w) == 1
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
# Test manual creation of hierarch card
c = fits.Card('hierarch abcdefghi', 10)
assert str(c) == _pad("HIERARCH abcdefghi = 10")
c = fits.Card('HIERARCH ESO INS SLIT2 Y1FRML',
'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)')
assert (str(c) ==
"HIERARCH ESO INS SLIT2 Y1FRML= "
"'ENC=OFFSET+RESOL*acos((WID-(MAX+MIN))/(MAX-MIN)'")
def test_hierarch_with_abbrev_value_indicator(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/5
"""
c = fits.Card.fromstring("HIERARCH key.META_4='calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_hierarch_not_warn(self):
"""Check that compressed image headers do not issue HIERARCH warnings.
"""
filename = fits.util.get_testdata_filepath('compressed_image.fits')
with fits.open(filename) as hdul:
header = hdul[1].header
with warnings.catch_warnings(record=True) as warning_list:
header["HIERARCH LONG KEYWORD"] = 42
assert len(warning_list) == 0
assert header["LONG KEYWORD"] == 42
assert header["HIERARCH LONG KEYWORD"] == 42
# Check that it still warns if we do not use HIERARCH
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header["LONG KEYWORD2"] = 1
assert header["LONG KEYWORD2"] == 1
def test_hierarch_keyword_whitespace(self):
"""
Regression test for
https://github.com/spacetelescope/PyFITS/issues/6
Make sure any leading or trailing whitespace around HIERARCH
keywords is stripped from the actual keyword value.
"""
c = fits.Card.fromstring(
"HIERARCH key.META_4 = 'calFileVersion'")
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
# Test also with creation via the Card constructor
c = fits.Card('HIERARCH key.META_4', 'calFileVersion')
assert c.keyword == 'key.META_4'
assert c.value == 'calFileVersion'
assert c.comment == ''
def test_verify_mixed_case_hierarch(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/7
Assures that HIERARCH keywords with lower-case characters and other
normally invalid keyword characters are not considered invalid.
"""
c = fits.Card('HIERARCH WeirdCard.~!@#_^$%&', 'The value', 'a comment')
# This should not raise any exceptions
c.verify('exception')
assert c.keyword == 'WeirdCard.~!@#_^$%&'
assert c.value == 'The value'
assert c.comment == 'a comment'
# Test also the specific case from the original bug report
header = fits.Header([
('simple', True),
('BITPIX', 8),
('NAXIS', 0),
('EXTEND', True, 'May contain datasets'),
('HIERARCH key.META_0', 'detRow')
])
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
header2 = hdul[0].header
assert (str(header.cards[header.index('key.META_0')]) ==
str(header2.cards[header2.index('key.META_0')]))
def test_missing_keyword(self):
"""Test that accessing a non-existent keyword raises a KeyError."""
header = fits.Header()
# De-referencing header through the inline function should behave
# identically to accessing it in the pytest.raises context below.
pytest.raises(KeyError, lambda k: header[k], 'NAXIS')
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'NAXIS' not found."):
header['NAXIS']
def test_hierarch_card_lookup(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
assert 'abcdefghi' in header
assert header['abcdefghi'] == 10
# This used to be assert_false, but per ticket
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/155 hierarch keywords
# should be treated case-insensitively when performing lookups
assert 'ABCDEFGHI' in header
def test_hierarch_card_delete(self):
header = fits.Header()
header['hierarch abcdefghi'] = 10
del header['hierarch abcdefghi']
def test_hierarch_card_insert_delete(self):
header = fits.Header()
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header['abcdefghi'] = 10
header['abcdefgh'] = 10
header['abcdefg'] = 10
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header['abcdefghij']
with pytest.warns(fits.verify.VerifyWarning,
match=r'greater than 8 characters'):
header.insert(2, ('abcdefghij', 10))
del header[2]
assert list(header.keys())[2] == 'abcdefg'.upper()
def test_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLAH BLAH': 'TESTA'})
assert len(w) == 0
assert 'BLAH BLAH' in header
assert header['BLAH BLAH'] == 'TESTA'
header.update({'HIERARCH BLAH BLAH': 'TESTB'})
assert len(w) == 0
assert header['BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH': 'TESTC'})
assert len(w) == 1
assert len(header) == 1
assert header['BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['blah blah'], 'TESTD'
header.update({'blah blah': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['blah blah'], 'TESTE'
# Create a HIERARCH card > 8 characters without explicitly stating
# 'HIERARCH'
header.update({'BLAH BLAH BLAH': 'TESTA'})
assert len(w) == 3
assert msg in str(w[0].message)
header.update({'HIERARCH BLAH BLAH BLAH': 'TESTB'})
assert len(w) == 3
assert header['BLAH BLAH BLAH'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLAH BLAH BLAH': 'TESTC'})
assert len(w) == 4
assert header['BLAH BLAH BLAH'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH blah blah blah': 'TESTD'})
assert len(w) == 4
assert header['blah blah blah'], 'TESTD'
header.update({'blah blah blah': 'TESTE'})
assert len(w) == 5
assert header['blah blah blah'], 'TESTE'
def test_short_hierarch_create_and_update(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/158
Tests several additional use cases for working with HIERARCH cards,
specifically where the keyword is fewer than 8 characters, but contains
invalid characters such that it can only be created as a HIERARCH card.
"""
msg = 'a HIERARCH card will be created'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
header.update({'HIERARCH BLA BLA': 'TESTA'})
assert len(w) == 0
assert 'BLA BLA' in header
assert header['BLA BLA'] == 'TESTA'
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 0
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 1
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTE'
header = fits.Header()
with pytest.warns(VerifyWarning) as w:
# Create a HIERARCH card containing invalid characters without
# explicitly stating 'HIERARCH'
header.update({'BLA BLA': 'TESTA'})
print([x.category for x in w])
assert len(w) == 1
assert msg in str(w[0].message)
header.update({'HIERARCH BLA BLA': 'TESTB'})
assert len(w) == 1
assert header['BLA BLA'], 'TESTB'
# Update without explicitly stating 'HIERARCH':
header.update({'BLA BLA': 'TESTC'})
assert len(w) == 2
assert header['BLA BLA'], 'TESTC'
# Test case-insensitivity
header.update({'HIERARCH bla bla': 'TESTD'})
assert len(w) == 2
assert len(header) == 1
assert header['bla bla'], 'TESTD'
header.update({'bla bla': 'TESTE'})
assert len(w) == 3
assert len(header) == 1
assert header['bla bla'], 'TESTE'
def test_header_setitem_invalid(self):
header = fits.Header()
def test():
header['FOO'] = ('bar', 'baz', 'qux')
pytest.raises(ValueError, test)
def test_header_setitem_1tuple(self):
header = fits.Header()
header['FOO'] = ('BAR',)
header['FOO2'] = (None,)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == ''
assert header.comments['FOO'] == ''
def test_header_setitem_2tuple(self):
header = fits.Header()
header['FOO'] = ('BAR', 'BAZ')
header['FOO2'] = (None, None)
assert header['FOO'] == 'BAR'
assert header['FOO2'] is None
assert header[0] == 'BAR'
assert header.comments[0] == 'BAZ'
assert header.comments['FOO'] == 'BAZ'
assert header.comments['FOO2'] == ''
def test_header_set_value_to_none(self):
"""
Setting the value of a card to None should simply give that card an
undefined value. Undefined value should map to None.
"""
header = fits.Header()
header['FOO'] = 'BAR'
assert header['FOO'] == 'BAR'
header['FOO'] = None
assert header['FOO'] is None
# Create a header that contains an undefined value and a defined
# value.
hstr = "UNDEF = \nDEFINED = 42"
header = fits.Header.fromstring(hstr, sep='\n')
# Explicitly add a card with an UNDEFINED value
c = fits.Card("UNDEF2", fits.card.UNDEFINED)
header.extend([c])
# And now assign an undefined value to the header through setitem
header['UNDEF3'] = fits.card.UNDEFINED
# Tuple assignment
header.append(("UNDEF5", None, "Undefined value"), end=True)
header.append("UNDEF6")
assert header['DEFINED'] == 42
assert header['UNDEF'] is None
assert header['UNDEF2'] is None
assert header['UNDEF3'] is None
assert header['UNDEF5'] is None
assert header['UNDEF6'] is None
# Assign an undefined value to a new card
header['UNDEF4'] = None
# Overwrite an existing value with None
header["DEFINED"] = None
# All headers now should be undefined
for c in header.cards:
assert c.value == fits.card.UNDEFINED
def test_set_comment_only(self):
header = fits.Header([('A', 'B', 'C')])
header.set('A', comment='D')
assert header['A'] == 'B'
assert header.comments['A'] == 'D'
def test_header_iter(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header) == ['A', 'C']
def test_header_slice(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
newheader = header[1:]
assert len(newheader) == 2
assert 'A' not in newheader
assert 'C' in newheader
assert 'E' in newheader
newheader = header[::-1]
assert len(newheader) == 3
assert newheader[0] == 'F'
assert newheader[1] == 'D'
assert newheader[2] == 'B'
newheader = header[::2]
assert len(newheader) == 2
assert 'A' in newheader
assert 'C' not in newheader
assert 'E' in newheader
def test_header_slice_assignment(self):
"""
Assigning to a slice should just assign new values to the cards
included in the slice.
"""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header[1:] = 1
assert header[1] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header[1:] = 'GH'
assert header[1] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header[1:] = ['H', 'I']
assert header[1] == 'H'
assert header[2] == 'I'
def test_header_slice_delete(self):
"""Test deleting a slice of cards from the header."""
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
del header[1:]
assert len(header) == 1
assert header[0] == 'B'
del header[:]
assert len(header) == 0
def test_wildcard_slice(self):
"""Test selecting a subsection of a header via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
newheader = header['AB*']
assert len(newheader) == 2
assert newheader[0] == 0
assert newheader[1] == 2
def test_wildcard_with_hyphen(self):
"""
Regression test for issue where wildcards did not work on keywords
containing hyphens.
"""
header = fits.Header([('DATE', 1), ('DATE-OBS', 2), ('DATE-FOO', 3)])
assert len(header['DATE*']) == 3
assert len(header['DATE?*']) == 2
assert len(header['DATE-*']) == 2
def test_wildcard_slice_assignment(self):
"""Test assigning to a header slice selected via wildcard matching."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
# Test assigning slice to the same value; this works similarly to numpy
# arrays
header['AB*'] = 1
assert header[0] == 1
assert header[2] == 1
# Though strings are iterable they should be treated as a scalar value
header['AB*'] = 'GH'
assert header[0] == 'GH'
assert header[2] == 'GH'
# Now assign via an iterable
header['AB*'] = ['H', 'I']
assert header[0] == 'H'
assert header[2] == 'I'
def test_wildcard_slice_deletion(self):
"""Test deleting cards from a header that match a wildcard pattern."""
header = fits.Header([('ABC', 0), ('DEF', 1), ('ABD', 2)])
del header['AB*']
assert len(header) == 1
assert header[0] == 1
def test_header_history(self):
header = fits.Header([('ABC', 0), ('HISTORY', 1), ('HISTORY', 2),
('DEF', 3), ('HISTORY', 4), ('HISTORY', 5)])
assert header['HISTORY'] == [1, 2, 4, 5]
def test_header_clear(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
header.clear()
assert 'A' not in header
assert 'C' not in header
assert len(header) == 0
@pytest.mark.parametrize('fitsext', [fits.ImageHDU(), fits.CompImageHDU()])
def test_header_clear_write(self, fitsext):
hdulist = fits.HDUList([fits.PrimaryHDU(), fitsext])
hdulist[1].header['FOO'] = 'BAR'
hdulist[1].header.clear()
with pytest.raises(VerifyError) as err:
hdulist.writeto(self.temp('temp.fits'), overwrite=True)
err_msg = "'XTENSION' card does not exist."
assert err_msg in str(err.value)
def test_header_fromkeys(self):
header = fits.Header.fromkeys(['A', 'B'])
assert 'A' in header
assert header['A'] is None
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] is None
assert header.comments['B'] == ''
def test_header_fromkeys_with_value(self):
header = fits.Header.fromkeys(['A', 'B'], 'C')
assert 'A' in header
assert header['A'] == 'C'
assert header.comments['A'] == ''
assert 'B' in header
assert header['B'] == 'C'
assert header.comments['B'] == ''
def test_header_fromkeys_with_value_and_comment(self):
header = fits.Header.fromkeys(['A'], ('B', 'C'))
assert 'A' in header
assert header['A'] == 'B'
assert header.comments['A'] == 'C'
def test_header_fromkeys_with_duplicates(self):
header = fits.Header.fromkeys(['A', 'B', 'A'], 'C')
assert 'A' in header
assert ('A', 0) in header
assert ('A', 1) in header
assert ('A', 2) not in header
assert header[0] == 'C'
assert header['A'] == 'C'
assert header[('A', 0)] == 'C'
assert header[2] == 'C'
assert header[('A', 1)] == 'C'
def test_header_items(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
assert list(header.items()) == [('A', 'B'), ('C', 'D')]
def test_header_iterkeys(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.keys(), header):
assert a == b
def test_header_itervalues(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
for a, b in zip(header.values(), ['B', 'D']):
assert a == b
def test_header_keys(self):
with fits.open(self.data('arange.fits')) as hdul:
assert (list(hdul[0].header) ==
['SIMPLE', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2', 'NAXIS3',
'EXTEND'])
def test_header_list_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
last = header.pop()
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop(1)
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop(0)
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
pytest.raises(IndexError, header.pop, 42)
def test_header_dict_like_pop(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F'),
('G', 'H')])
pytest.raises(TypeError, header.pop, 'A', 'B', 'C')
last = header.pop('G')
assert last == 'H'
assert len(header) == 3
assert list(header) == ['A', 'C', 'E']
mid = header.pop('C')
assert mid == 'D'
assert len(header) == 2
assert list(header) == ['A', 'E']
first = header.pop('A')
assert first == 'B'
assert len(header) == 1
assert list(header) == ['E']
default = header.pop('X', 'Y')
assert default == 'Y'
assert len(header) == 1
pytest.raises(KeyError, header.pop, 'X')
def test_popitem(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 2
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 1
keyword, value = header.popitem()
assert keyword not in header
assert len(header) == 0
pytest.raises(KeyError, header.popitem)
def test_setdefault(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.setdefault('A') == 'B'
assert header.setdefault('C') == 'D'
assert header.setdefault('E') == 'F'
assert len(header) == 3
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
assert 'G' in header
assert header.setdefault('G', 'H') == 'H'
assert len(header) == 4
def test_update_from_dict(self):
"""
Test adding new cards and updating existing cards from a dict using
Header.update()
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update({'A': 'E', 'F': 'G'})
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
# Same as above but this time pass the update dict as keyword arguments
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update(A='E', F='G')
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_update_from_iterable(self):
"""
Test adding new cards and updating existing cards from an iterable of
cards and card tuples.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.update([('A', 'E'), fits.Card('F', 'G')])
assert header['A'] == 'E'
assert header[0] == 'E'
assert 'F' in header
assert header['F'] == 'G'
assert header[-1] == 'G'
def test_header_extend(self):
"""
Test extending a header both with and without stripping cards from the
extension header.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu2.header['MYKEY'] = ('some val', 'some comment')
hdu.header += hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Same thing, but using + instead of +=
hdu = fits.PrimaryHDU()
hdu.header = hdu.header + hdu2.header
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
# Directly append the other header in full--not usually a desirable
# operation when the header is coming from another HDU
hdu.header.extend(hdu2.header, strip=False)
assert len(hdu.header) == 11
assert list(hdu.header)[5] == 'XTENSION'
assert hdu.header[-1] == 'some val'
assert ('MYKEY', 1) in hdu.header
def test_header_extend_unique(self):
"""
Test extending the header with and without unique=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 6
assert hdu.header[-2] == 'some val'
assert hdu.header[-1] == 'some other val'
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu.header.extend(hdu2.header, unique=True)
assert len(hdu.header) == 5
assert hdu.header[-1] == 'some val'
def test_header_extend_unique_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added. Issue astropy/astropy#3967
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_unique in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, unique=is_unique)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_update(self):
"""
Test extending the header with and without update=True.
"""
hdu = fits.PrimaryHDU()
hdu2 = fits.ImageHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu2.header['MYKEY'] = ('some other val', 'some other comment')
hdu2.header['HISTORY'] = 'history 1'
hdu2.header['HISTORY'] = 'history 2'
hdu.header.extend(hdu2.header)
assert len(hdu.header) == 9
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) in hdu.header
assert hdu.header[('MYKEY', 1)] == 'some other val'
assert len(hdu.header['HISTORY']) == 3
assert hdu.header[-1] == 'history 2'
hdu = fits.PrimaryHDU()
hdu.header['MYKEY'] = ('some val', 'some comment')
hdu.header['HISTORY'] = 'history 1'
hdu.header.extend(hdu2.header, update=True)
assert len(hdu.header) == 7
assert ('MYKEY', 0) in hdu.header
assert ('MYKEY', 1) not in hdu.header
assert hdu.header['MYKEY'] == 'some other val'
assert len(hdu.header['HISTORY']) == 2
assert hdu.header[-1] == 'history 2'
def test_header_extend_update_commentary(self):
"""
Test extending header with and without unique=True and commentary
cards in the header being added.
Though not quite the same as astropy/astropy#3967, update=True hits
the same if statement as that issue.
"""
for commentary_card in ['', 'COMMENT', 'HISTORY']:
for is_update in [True, False]:
hdu = fits.PrimaryHDU()
# Make sure we are testing the case we want.
assert commentary_card not in hdu.header
hdu2 = fits.ImageHDU()
hdu2.header[commentary_card] = 'My text'
hdu.header.extend(hdu2.header, update=is_update)
assert len(hdu.header) == 5
assert hdu.header[commentary_card][0] == 'My text'
def test_header_extend_exact(self):
"""
Test that extending an empty header with the contents of an existing
header can exactly duplicate that header, given strip=False and
end=True.
"""
header = fits.getheader(self.data('test0.fits'))
header2 = fits.Header()
header2.extend(header, strip=False, end=True)
assert header == header2
def test_header_count(self):
header = fits.Header([('A', 'B'), ('C', 'D'), ('E', 'F')])
assert header.count('A') == 1
assert header.count('C') == 1
assert header.count('E') == 1
header['HISTORY'] = 'a'
header['HISTORY'] = 'b'
assert header.count('HISTORY') == 2
pytest.raises(KeyError, header.count, 'G')
def test_header_append_use_blanks(self):
"""
Tests that blank cards can be appended, and that future appends will
use blank cards when available (unless useblanks=False)
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
assert len(header) == 4
assert header[-1] == ''
assert header[-2] == ''
# New card should fill the first blank by default
header.append(('E', 'F'))
assert len(header) == 4
assert header[-2] == 'F'
assert header[-1] == ''
# This card should not use up a blank spot
header.append(('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[-1] == ''
assert header[-2] == 'H'
def test_header_append_keyword_only(self):
"""
Test appending a new card with just the keyword, and no value or
comment given.
"""
header = fits.Header([('A', 'B'), ('C', 'D')])
header.append('E')
assert len(header) == 3
assert list(header)[-1] == 'E'
assert header[-1] is None
assert header.comments['E'] == ''
# Try appending a blank--normally this can be accomplished with just
# header.append(), but header.append('') should also work (and is maybe
# a little more clear)
header.append('')
assert len(header) == 4
assert list(header)[-1] == ''
assert header[''] == ''
assert header.comments[''] == ''
def test_header_insert_use_blanks(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# Append a couple blanks
header.append()
header.append()
# Insert a new card; should use up one of the blanks
header.insert(1, ('E', 'F'))
assert len(header) == 4
assert header[1] == 'F'
assert header[-1] == ''
assert header[-2] == 'D'
# Insert a new card without using blanks
header.insert(1, ('G', 'H'), useblanks=False)
assert len(header) == 5
assert header[1] == 'H'
assert header[-1] == ''
def test_header_insert_before_keyword(self):
"""
Test that a keyword name or tuple can be used to insert new keywords.
Also tests the ``after`` keyword argument.
Regression test for https://github.com/spacetelescope/PyFITS/issues/12
"""
header = fits.Header([
('NAXIS1', 10), ('COMMENT', 'Comment 1'),
('COMMENT', 'Comment 3')])
header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
assert list(header.keys())[0] == 'NAXIS'
assert header[0] == 2
assert header.comments[0] == 'Number of axes'
header.insert('NAXIS1', ('NAXIS2', 20), after=True)
assert list(header.keys())[1] == 'NAXIS1'
assert list(header.keys())[2] == 'NAXIS2'
assert header[2] == 20
header.insert(('COMMENT', 1), ('COMMENT', 'Comment 2'))
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3']
header.insert(('COMMENT', 2), ('COMMENT', 'Comment 4'), after=True)
assert header['COMMENT'] == ['Comment 1', 'Comment 2', 'Comment 3',
'Comment 4']
header.insert(-1, ('TEST1', True))
assert list(header.keys())[-2] == 'TEST1'
header.insert(-1, ('TEST2', True), after=True)
assert list(header.keys())[-1] == 'TEST2'
assert list(header.keys())[-3] == 'TEST1'
def test_remove(self):
header = fits.Header([('A', 'B'), ('C', 'D')])
# When keyword is present in the header it should be removed.
header.remove('C')
assert len(header) == 1
assert list(header) == ['A']
assert 'C' not in header
# When keyword is not present in the header and ignore_missing is
# False, KeyError should be raised
with pytest.raises(KeyError):
header.remove('F')
# When keyword is not present and ignore_missing is True, KeyError
# will be ignored
header.remove('F', ignore_missing=True)
assert len(header) == 1
# Test for removing all instances of a keyword
header = fits.Header([('A', 'B'), ('C', 'D'), ('A', 'F')])
header.remove('A', remove_all=True)
assert 'A' not in header
assert len(header) == 1
assert list(header) == ['C']
assert header[0] == 'D'
def test_header_comments(self):
header = fits.Header([('A', 'B', 'C'), ('DEF', 'G', 'H')])
assert (repr(header.comments) ==
' A C\n'
' DEF H')
def test_comment_slices_and_filters(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
s = header.comments[1:]
assert list(s) == ['H', 'K']
s = header.comments[::-1]
assert list(s) == ['K', 'H', 'D']
s = header.comments['A*']
assert list(s) == ['D', 'K']
def test_comment_slice_filter_assign(self):
header = fits.Header([('AB', 'C', 'D'), ('EF', 'G', 'H'),
('AI', 'J', 'K')])
header.comments[1:] = 'L'
assert list(header.comments) == ['D', 'L', 'L']
assert header.cards[header.index('AB')].comment == 'D'
assert header.cards[header.index('EF')].comment == 'L'
assert header.cards[header.index('AI')].comment == 'L'
header.comments[::-1] = header.comments[:]
assert list(header.comments) == ['L', 'L', 'D']
header.comments['A*'] = ['M', 'N']
assert list(header.comments) == ['M', 'L', 'N']
def test_commentary_slicing(self):
header = fits.Header()
indices = list(range(5))
for idx in indices:
header['HISTORY'] = idx
# Just a few sample slice types; this won't get all corner cases but if
# these all work we should be in good shape
assert header['HISTORY'][1:] == indices[1:]
assert header['HISTORY'][:3] == indices[:3]
assert header['HISTORY'][:6] == indices[:6]
assert header['HISTORY'][:-2] == indices[:-2]
assert header['HISTORY'][::-1] == indices[::-1]
assert header['HISTORY'][1::-1] == indices[1::-1]
assert header['HISTORY'][1:5:2] == indices[1:5:2]
# Same tests, but copy the values first; as it turns out this is
# different from just directly doing an __eq__ as in the first set of
# assertions
header.insert(0, ('A', 'B', 'C'))
header.append(('D', 'E', 'F'), end=True)
assert list(header['HISTORY'][1:]) == indices[1:]
assert list(header['HISTORY'][:3]) == indices[:3]
assert list(header['HISTORY'][:6]) == indices[:6]
assert list(header['HISTORY'][:-2]) == indices[:-2]
assert list(header['HISTORY'][::-1]) == indices[::-1]
assert list(header['HISTORY'][1::-1]) == indices[1::-1]
assert list(header['HISTORY'][1:5:2]) == indices[1:5:2]
def test_update_commentary(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['HISTORY'] = 'ABC'
header['FRED'] = 'BARNEY'
header['HISTORY'] = 'DEF'
header['HISTORY'] = 'GHI'
assert header['HISTORY'] == ['ABC', 'DEF', 'GHI']
# Single value update
header['HISTORY'][0] = 'FOO'
assert header['HISTORY'] == ['FOO', 'DEF', 'GHI']
# Single value partial slice update
header['HISTORY'][1:] = 'BAR'
assert header['HISTORY'] == ['FOO', 'BAR', 'BAR']
# Multi-value update
header['HISTORY'][:] = ['BAZ', 'QUX']
assert header['HISTORY'] == ['BAZ', 'QUX', 'BAR']
def test_commentary_comparison(self):
"""
Regression test for an issue found in *writing* the regression test for
https://github.com/astropy/astropy/issues/2363, where comparison of
the list of values for a commentary keyword did not always compare
correctly with other iterables.
"""
header = fits.Header()
header['HISTORY'] = 'hello world'
header['HISTORY'] = 'hello world'
header['COMMENT'] = 'hello world'
assert header['HISTORY'] != header['COMMENT']
header['COMMENT'] = 'hello world'
assert header['HISTORY'] == header['COMMENT']
def test_long_commentary_card(self):
header = fits.Header()
header['FOO'] = 'BAR'
header['BAZ'] = 'QUX'
longval = 'ABC' * 30
header['HISTORY'] = longval
header['FRED'] = 'BARNEY'
header['HISTORY'] = longval
assert len(header) == 7
assert list(header)[2] == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.set('HISTORY', longval, after='FOO')
assert len(header) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
header = fits.Header()
header.update({'FOO': 'BAR'})
header.update({'BAZ': 'QUX'})
longval = 'ABC' * 30
header.add_history(longval)
header.update({'FRED': 'BARNEY'})
header.add_history(longval)
assert len(header.cards) == 7
assert header.cards[2].keyword == 'FRED'
assert str(header.cards[3]) == 'HISTORY ' + longval[:72]
assert str(header.cards[4]).rstrip() == 'HISTORY ' + longval[72:]
header.add_history(longval, after='FOO')
assert len(header.cards) == 9
assert str(header.cards[1]) == 'HISTORY ' + longval[:72]
assert str(header.cards[2]).rstrip() == 'HISTORY ' + longval[72:]
def test_totxtfile(self):
with fits.open(self.data('test0.fits')) as hdul:
hdul[0].header.totextfile(self.temp('header.txt'))
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True)
# Write the hdu out and read it back in again--it should be recognized
# as a PrimaryHDU
hdu.writeto(self.temp('test.fits'), output_verify='ignore')
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[0], fits.PrimaryHDU)
hdu = fits.ImageHDU()
hdu.header.update({'MYKEY': 'FOO'})
hdu.header.extend(hdu.header.fromtextfile(self.temp('header.txt')),
update=True, update_first=True, strip=False)
assert 'MYKEY' in hdu.header
assert 'EXTENSION' not in hdu.header
assert 'SIMPLE' in hdu.header
hdu.writeto(self.temp('test.fits'), output_verify='ignore',
overwrite=True)
with fits.open(self.temp('test.fits')) as hdul2:
assert len(hdul2) == 2
assert 'MYKEY' in hdul2[1].header
def test_fromfile(self):
"""Regression test for https://github.com/astropy/astropy/issues/8711
"""
filename = self.data('scale.fits')
hdr = fits.Header.fromfile(filename)
assert hdr['DATASET'] == '2MASS'
def test_header_fromtextfile(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/122
Manually write a text file containing some header cards ending with
newlines and ensure that fromtextfile can read them back in.
"""
header = fits.Header()
header['A'] = ('B', 'C')
header['B'] = ('C', 'D')
header['C'] = ('D', 'E')
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert header == header2
def test_header_fromtextfile_with_end_card(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Make sure that when a Header is read from a text file that the END card
is ignored.
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
# We don't use header.totextfile here because it writes each card with
# trailing spaces to pad them out to 80 characters. But this bug only
# presents itself when each card ends immediately with a newline, and
# no trailing spaces
with open(self.temp('test.hdr'), 'w') as f:
f.write('\n'.join(str(c).strip() for c in header.cards))
f.write('\nEND')
new_header = fits.Header.fromtextfile(self.temp('test.hdr'))
assert 'END' not in new_header
assert header == new_header
def test_append_end_card(self):
"""
Regression test 2 for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/154
Manually adding an END card to a header should simply result in a
ValueError (as was the case in PyFITS 3.0 and earlier).
"""
header = fits.Header([('A', 'B', 'C'), ('D', 'E', 'F')])
def setitem(k, v):
header[k] = v
pytest.raises(ValueError, setitem, 'END', '')
pytest.raises(ValueError, header.append, 'END')
pytest.raises(ValueError, header.append, 'END', end=True)
pytest.raises(ValueError, header.insert, len(header), 'END')
pytest.raises(ValueError, header.set, 'END')
def test_invalid_end_cards(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/217
This tests the case where the END card looks like a normal card like
'END = ' and other similar oddities. As long as a card starts with END
and looks like it was intended to be the END card we allow it, but with
a warning.
"""
horig = fits.PrimaryHDU(data=np.arange(100)).header
def invalid_header(end, pad):
# Build up a goofy invalid header
# Start from a seemingly normal header
s = horig.tostring(sep='', endcard=False, padding=False)
# append the bogus end card
s += end
# add additional padding if requested
if pad:
s += ' ' * _pad_length(len(s))
# This will differ between Python versions
if isinstance(s, bytes):
return BytesIO(s)
else:
return StringIO(s)
# Basic case motivated by the original issue; it's as if the END card
# was appended by software that doesn't know to treat it specially, and
# it is given an = after it
s = invalid_header('END =', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# A case similar to the last but with more spaces between END and the
# =, as though the '= ' value indicator were placed like that of a
# normal card
s = invalid_header('END = ', True)
with pytest.warns(AstropyUserWarning, match="Unexpected bytes trailing "
"END keyword: ' ='") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# END card with trailing gibberish
s = invalid_header('END$%&%^*%*', True)
with pytest.warns(AstropyUserWarning, match=r"Unexpected bytes trailing "
r"END keyword: '\$%&%\^\*%\*'") as w:
h = fits.Header.fromfile(s)
assert h == horig
assert len(w) == 1
# 'END' at the very end of a truncated file without padding; the way
# the block reader works currently this can only happen if the 'END'
# is at the very end of the file.
s = invalid_header('END', False)
with pytest.warns(AstropyUserWarning, match="Missing padding to end of "
"the FITS block") as w:
# Don't raise an exception on missing padding, but still produce a
# warning that the END card is incomplete
h = fits.Header.fromfile(s, padding=False)
assert h == horig
assert len(w) == 1
def test_invalid_characters(self):
"""
Test header with invalid characters
"""
# Generate invalid file with non-ASCII character
h = fits.Header()
h['FOO'] = 'BAR'
h['COMMENT'] = 'hello'
hdul = fits.PrimaryHDU(header=h, data=np.arange(5))
hdul.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
out = f.read()
out = out.replace(b'hello', 'héllo'.encode('latin1'))
out = out.replace(b'BAR', 'BÀR'.encode('latin1'))
with open(self.temp('test2.fits'), 'wb') as f2:
f2.write(out)
with pytest.warns(AstropyUserWarning, match="non-ASCII characters are "
"present in the FITS file") as w:
h = fits.getheader(self.temp('test2.fits'))
assert h['FOO'] == 'B?R'
assert h['COMMENT'] == 'h?llo'
assert len(w) == 1
def test_unnecessary_move(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/125
Ensures that a header is not modified when setting the position of a
keyword that's already in its correct position.
"""
header = fits.Header([('A', 'B'), ('B', 'C'), ('C', 'D')])
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after=0)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before='C')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', after='A')
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('B', before=2)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
# 123 is well past the end, and C is already at the end, so it's in the
# right place already
header.set('C', before=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
header.set('C', after=123)
assert list(header) == ['A', 'B', 'C']
assert not header._modified
def test_invalid_float_cards(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137"""
# Create a header containing two of the problematic cards in the test
# case where this came up:
hstr = "FOCALLEN= +1.550000000000e+002\nAPERTURE= +0.000000000000e+000"
h = fits.Header.fromstring(hstr, sep='\n')
# First the case that *does* work prior to fixing this issue
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
# Now if this were reserialized, would new values for these cards be
# written with repaired exponent signs?
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h._modified
# This is the case that was specifically causing problems; generating
# the card strings *before* parsing the values. Also, the card strings
# really should be "fixed" before being returned to the user
h = fits.Header.fromstring(hstr, sep='\n')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert h.cards['FOCALLEN']._modified
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
assert h.cards['APERTURE']._modified
assert h['FOCALLEN'] == 155.0
assert h['APERTURE'] == 0.0
assert h._modified
# For the heck of it, try assigning the identical values and ensure
# that the newly fixed value strings are left intact
h['FOCALLEN'] = 155.0
h['APERTURE'] = 0.0
assert (str(h.cards['FOCALLEN']) ==
_pad("FOCALLEN= +1.550000000000E+002"))
assert (str(h.cards['APERTURE']) ==
_pad("APERTURE= +0.000000000000E+000"))
def test_invalid_float_cards2(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/140
"""
# The example for this test requires creating a FITS file containing a
# slightly misformatted float value. I can't actually even find a way
# to do that directly through Astropy--it won't let me.
hdu = fits.PrimaryHDU()
hdu.header['TEST'] = 5.0022221e-07
hdu.writeto(self.temp('test.fits'))
# Here we manually make the file invalid
with open(self.temp('test.fits'), 'rb+') as f:
f.seek(346) # Location of the exponent 'E' symbol
f.write(encode_ascii('e'))
with fits.open(self.temp('test.fits')) as hdul, \
pytest.warns(AstropyUserWarning) as w:
hdul.writeto(self.temp('temp.fits'), output_verify='warn')
assert len(w) == 5
# The first two warnings are just the headers to the actual warning
# message (HDU 0, Card 4). I'm still not sure things like that
# should be output as separate warning messages, but that's
# something to think about...
msg = str(w[3].message)
assert "(invalid value string: '5.0022221e-07')" in msg
def test_leading_zeros(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137, part 2
Ticket https://aeon.stsci.edu/ssb/trac/pyfits/ticket/137 also showed that in
float values like 0.001 the leading zero was unnecessarily being
stripped off when rewriting the header. Though leading zeros should be
removed from integer values to prevent misinterpretation as octal by
python (for now Astropy will still maintain the leading zeros if now
changes are made to the value, but will drop them if changes are made).
"""
c = fits.Card.fromstring("APERTURE= +0.000000000000E+000")
assert str(c) == _pad("APERTURE= +0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 0.000000000000E+000")
assert str(c) == _pad("APERTURE= 0.000000000000E+000")
assert c.value == 0.0
c = fits.Card.fromstring("APERTURE= 017")
assert str(c) == _pad("APERTURE= 017")
assert c.value == 17
def test_assign_boolean(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/123
Tests assigning Python and Numpy boolean values to keyword values.
"""
fooimg = _pad('FOO = T')
barimg = _pad('BAR = F')
h = fits.Header()
h['FOO'] = True
h['BAR'] = False
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h['FOO'] = np.bool_(True)
h['BAR'] = np.bool_(False)
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
h = fits.Header()
h.append(fits.Card.fromstring(fooimg))
h.append(fits.Card.fromstring(barimg))
assert h['FOO'] is True
assert h['BAR'] is False
assert str(h.cards['FOO']) == fooimg
assert str(h.cards['BAR']) == barimg
def test_header_method_keyword_normalization(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/149
Basically ensures that all public Header methods are case-insensitive
w.r.t. keywords.
Provides a reasonably comprehensive test of several methods at once.
"""
h = fits.Header([('abC', 1), ('Def', 2), ('GeH', 3)])
assert list(h) == ['ABC', 'DEF', 'GEH']
assert 'abc' in h
assert 'dEf' in h
assert h['geh'] == 3
# Case insensitivity of wildcards
assert len(h['g*']) == 1
h['aBc'] = 2
assert h['abc'] == 2
# ABC already existed so assigning to aBc should not have added any new
# cards
assert len(h) == 3
del h['gEh']
assert list(h) == ['ABC', 'DEF']
assert len(h) == 2
assert h.get('def') == 2
h.set('Abc', 3)
assert h['ABC'] == 3
h.set('gEh', 3, before='Abc')
assert list(h) == ['GEH', 'ABC', 'DEF']
assert h.pop('abC') == 3
assert len(h) == 2
assert h.setdefault('def', 3) == 2
assert len(h) == 2
assert h.setdefault('aBc', 1) == 1
assert len(h) == 3
assert list(h) == ['GEH', 'DEF', 'ABC']
h.update({'GeH': 1, 'iJk': 4})
assert len(h) == 4
assert list(h) == ['GEH', 'DEF', 'ABC', 'IJK']
assert h['GEH'] == 1
assert h.count('ijk') == 1
assert h.index('ijk') == 3
h.remove('Def')
assert len(h) == 3
assert list(h) == ['GEH', 'ABC', 'IJK']
def test_end_in_comment(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/142
Tests a case where the comment of a card ends with END, and is followed
by several blank cards.
"""
data = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=data)
hdu.header['TESTKW'] = ('Test val', 'This is the END')
# Add a couple blanks after the END string
hdu.header.append()
hdu.header.append()
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), memmap=False) as hdul:
# memmap = False to avoid leaving open a mmap to the file when we
# access the data--this causes problems on Windows when we try to
# overwrite the file later
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Add blanks until the header is extended to two block sizes
while len(hdu.header) < 36:
hdu.header.append()
hdu.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as hdul:
assert 'TESTKW' in hdul[0].header
assert hdul[0].header == hdu.header
assert (hdul[0].data == data).all()
# Test parsing the same header when it's written to a text file
hdu.header.totextfile(self.temp('test.hdr'))
header2 = fits.Header.fromtextfile(self.temp('test.hdr'))
assert hdu.header == header2
def test_assign_unicode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/134
Assigning a unicode literal as a header value should not fail silently.
If the value can be converted to ASCII then it should just work.
Otherwise it should fail with an appropriate value error.
Also tests unicode for keywords and comments.
"""
erikku = '\u30a8\u30ea\u30c3\u30af'
def assign(keyword, val):
h[keyword] = val
h = fits.Header()
h['FOO'] = 'BAR'
assert 'FOO' in h
assert h['FOO'] == 'BAR'
assert repr(h) == _pad("FOO = 'BAR '")
pytest.raises(ValueError, assign, erikku, 'BAR')
h['FOO'] = 'BAZ'
assert h['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAZ '")
pytest.raises(ValueError, assign, 'FOO', erikku)
h['FOO'] = ('BAR', 'BAZ')
assert h['FOO'] == 'BAR'
assert h.comments['FOO'] == 'BAZ'
assert repr(h) == _pad("FOO = 'BAR ' / BAZ")
pytest.raises(ValueError, assign, 'FOO', ('BAR', erikku))
pytest.raises(ValueError, assign, 'FOO', (erikku, 'BAZ'))
pytest.raises(ValueError, assign, 'FOO', (erikku, erikku))
def test_assign_non_ascii(self):
"""
First regression test for
https://github.com/spacetelescope/PyFITS/issues/37
Although test_assign_unicode ensures that `str` objects containing
non-ASCII characters cannot be assigned to headers.
It should not be possible to assign bytes to a header at all.
"""
h = fits.Header()
with pytest.raises(ValueError, match="Illegal value: b'Hello'."):
h.set('TEST', b'Hello')
def test_header_strip_whitespace(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/146, and
for the solution that is optional stripping of whitespace from the end
of a header value.
By default extra whitespace is stripped off, but if
`fits.conf.strip_header_whitespace` = False it should not be
stripped.
"""
h = fits.Header()
h['FOO'] = 'Bar '
assert h['FOO'] == 'Bar'
c = fits.Card.fromstring("QUX = 'Bar '")
h.append(c)
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
with fits.conf.set_temp('strip_header_whitespace', False):
assert h['FOO'] == 'Bar '
assert h['QUX'] == 'Bar '
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
assert h['FOO'] == 'Bar'
assert h['QUX'] == 'Bar'
assert h.cards['FOO'].image.rstrip() == "FOO = 'Bar '"
assert h.cards['QUX'].image.rstrip() == "QUX = 'Bar '"
def test_keep_duplicate_history_in_orig_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/156
When creating a new HDU from an existing Header read from an existing
FITS file, if the original header contains duplicate HISTORY values
those duplicates should be preserved just as in the original header.
This bug occurred due to naivete in Header.extend.
"""
history = ['CCD parameters table ...',
' reference table oref$n951041ko_ccd.fits',
' INFLIGHT 12/07/2001 25/02/2002',
' all bias frames'] * 3
hdu = fits.PrimaryHDU()
# Add the history entries twice
for item in history:
hdu.header['HISTORY'] = item
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
new_hdu = fits.PrimaryHDU(header=hdu.header)
assert new_hdu.header['HISTORY'] == hdu.header['HISTORY']
new_hdu.writeto(self.temp('test2.fits'))
with fits.open(self.temp('test2.fits')) as hdul:
assert hdul[0].header['HISTORY'] == history
def test_invalid_keyword_cards(self):
"""
Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/109
Allow opening files with headers containing invalid keywords.
"""
# Create a header containing a few different types of BAD headers.
c1 = fits.Card.fromstring('CLFIND2D: contour = 0.30')
c2 = fits.Card.fromstring('Just some random text.')
c3 = fits.Card.fromstring('A' * 80)
hdu = fits.PrimaryHDU()
# This should work with some warnings
with pytest.warns(AstropyUserWarning) as w:
hdu.header.append(c1)
hdu.header.append(c2)
hdu.header.append(c3)
assert len(w) == 3
hdu.writeto(self.temp('test.fits'))
with pytest.warns(AstropyUserWarning) as w:
with fits.open(self.temp('test.fits')) as hdul:
# Merely opening the file should blast some warnings about the
# invalid keywords
assert len(w) == 3
header = hdul[0].header
assert 'CLFIND2D' in header
assert 'Just som' in header
assert 'AAAAAAAA' in header
assert header['CLFIND2D'] == ': contour = 0.30'
assert header['Just som'] == 'e random text.'
assert header['AAAAAAAA'] == 'A' * 72
# It should not be possible to assign to the invalid keywords
pytest.raises(ValueError, header.set, 'CLFIND2D', 'foo')
pytest.raises(ValueError, header.set, 'Just som', 'foo')
pytest.raises(ValueError, header.set, 'AAAAAAAA', 'foo')
def test_fix_hierarch_with_invalid_value(self, capsys):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/172
Ensures that when fixing a hierarch card it remains a hierarch card.
"""
c = fits.Card.fromstring('HIERARCH ESO DET CHIP PXSPACE = 5e6')
with pytest.warns(fits.verify.VerifyWarning,
match=r'Verification reported errors'):
c.verify('fix')
assert str(c) == _pad('HIERARCH ESO DET CHIP PXSPACE = 5E6')
def test_assign_inf_nan(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/11
For the time being it should not be possible to assign the floating
point values inf or nan to a header value, since this is not defined by
the FITS standard.
"""
h = fits.Header()
pytest.raises(ValueError, h.set, 'TEST', float('nan'))
pytest.raises(ValueError, h.set, 'TEST', np.nan)
pytest.raises(ValueError, h.set, 'TEST', np.float32('nan'))
pytest.raises(ValueError, h.set, 'TEST', float('inf'))
pytest.raises(ValueError, h.set, 'TEST', np.inf)
def test_update_bool(self):
"""
Regression test for an issue where a value of True in a header
cannot be updated to a value of 1, and likewise for False/0.
"""
h = fits.Header([('TEST', True)])
h['TEST'] = 1
assert h['TEST'] is not True
assert isinstance(h['TEST'], int)
assert h['TEST'] == 1
h['TEST'] = np.bool_(True)
assert h['TEST'] is True
h['TEST'] = False
assert h['TEST'] is False
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
h['TEST'] = 0
assert h['TEST'] is not False
assert isinstance(h['TEST'], int)
assert h['TEST'] == 0
h['TEST'] = np.bool_(False)
assert h['TEST'] is False
def test_update_numeric(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/49
Ensure that numeric values can be upcast/downcast between int, float,
and complex by assigning values that compare equal to the existing
value but are a different type.
"""
h = fits.Header()
h['TEST'] = 1
# int -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# int -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> float
h['TEST'] = 1.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 1.0')
# float -> complex
h['TEST'] = 1.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (1.0, 0.0)')
# complex -> int
h['TEST'] = 1
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 1')
# Now the same tests but with zeros
h['TEST'] = 0
# int -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
# int -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> float
h['TEST'] = 0.0
assert isinstance(h['TEST'], float)
assert str(h).startswith('TEST = 0.0')
# float -> complex
h['TEST'] = 0.0+0.0j
assert isinstance(h['TEST'], complex)
assert str(h).startswith('TEST = (0.0, 0.0)')
# complex -> int
h['TEST'] = 0
assert isinstance(h['TEST'], int)
assert str(h).startswith('TEST = 0')
def test_newlines_in_commentary(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/51
Test data extracted from a header in an actual FITS file found in the
wild. Names have been changed to protect the innocent.
"""
# First ensure that we can't assign new keyword values with newlines in
# them
h = fits.Header()
pytest.raises(ValueError, h.set, 'HISTORY', '\n')
pytest.raises(ValueError, h.set, 'HISTORY', '\nabc')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\n')
pytest.raises(ValueError, h.set, 'HISTORY', 'abc\ndef')
test_cards = [
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18 "
"HISTORY File modified by user ' fred' with fv on 2013-04-23T11:16:29 "
"HISTORY File modified by user ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14 "
"HISTORY File modified by user 'wilma' with fv on 2013-04-22T21:42:18\nFile modif"
"HISTORY ied by user 'wilma' with fv on 2013-04-23T11:16:29\nFile modified by use"
"HISTORY r ' fred' with fv on 2013-11-04T16:59:14\nFile modified by user 'wilma' "
"HISTORY with fv on 2013-04-22T21:42:18\nFile modif\nied by user 'wilma' with fv "
"HISTORY on 2013-04-23T11:16:29\nFile modified by use\nr ' fred' with fv on 2013-1"
"HISTORY 1-04T16:59:14 "
]
for card_image in test_cards:
c = fits.Card.fromstring(card_image)
if '\n' in card_image:
pytest.raises(fits.VerifyError, c.verify, 'exception')
else:
c.verify('exception')
def test_long_commentary_card_appended_to_header(self):
"""
If a HISTORY or COMMENT card with a too-long value is appended to a
header with Header.append (as opposed to assigning to hdr['HISTORY']
it fails verification.
Regression test for https://github.com/astropy/astropy/issues/11486
"""
header = fits.Header()
value = 'abc' * 90
# this is what Table does when saving its history metadata key to a
# FITS file
header.append(('history', value))
assert len(header.cards) == 1
# Test Card._split() directly since this was the main problem area
key, val = header.cards[0]._split()
assert key == 'HISTORY' and val == value
# Try writing adding this header to an HDU and writing it to a file
hdu = fits.PrimaryHDU(header=header)
hdu.writeto(self.temp('test.fits'), overwrite=True)
def test_header_fromstring_bytes(self):
"""
Test reading a Header from a `bytes` string.
See https://github.com/astropy/astropy/issues/8706
"""
with open(self.data('test0.fits'), 'rb') as fobj:
pri_hdr_from_bytes = fits.Header.fromstring(fobj.read())
pri_hdr = fits.getheader(self.data('test0.fits'))
assert pri_hdr['NAXIS'] == pri_hdr_from_bytes['NAXIS']
assert pri_hdr == pri_hdr_from_bytes
assert pri_hdr.tostring() == pri_hdr_from_bytes.tostring()
def test_set_keyword_with_space(self):
"""
Regression test for https://github.com/astropy/astropy/issues/10479
"""
hdr = fits.Header()
hdr['KEY2 '] = 2
hdr['KEY2 '] = 4
assert len(hdr) == 1
assert hdr['KEY2'] == 4
assert hdr['KEY2 '] == 4
def test_strip(self):
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr.strip()
assert set(hdr) == {'HISTORY', 'FOO'}
hdr = fits.getheader(self.data('tb.fits'), ext=1)
hdr['FOO'] = 'bar'
hdr = hdr.copy(strip=True)
assert set(hdr) == {'HISTORY', 'FOO'}
def test_update_invalid_card(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5408
Tests updating the value of a card that is malformatted (with an
invalid value literal).
This tests two ways of reproducing the problem, one working with a
Card object directly, and one when reading/writing a header containing
such an invalid card.
"""
card = fits.Card.fromstring('KW = INF / Comment')
card.value = 'FIXED'
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card.verify('fix')
assert tuple(card) == ('KW', 'FIXED', 'Comment')
card = fits.Card.fromstring('KW = INF')
hdu = fits.PrimaryHDU()
# This is a loophole to write a header containing a malformatted card
card._verified = True
hdu.header.append(card)
hdu.header.tofile(self.temp('bogus.fits'))
with fits.open(self.temp('bogus.fits')) as hdul:
hdul[0].header['KW'] = -1
hdul.writeto(self.temp('bogus_fixed.fits'))
with fits.open(self.temp('bogus_fixed.fits')) as hdul:
assert hdul[0].header['KW'] == -1
def test_index_numpy_int(self):
header = fits.Header([('A', 'FOO'), ('B', 2), ('C', 'BAR')])
idx = np.int8(2)
assert header[idx] == 'BAR'
header[idx] = 'BAZ'
assert header[idx] == 'BAZ'
header.insert(idx, ('D', 42))
assert header[idx] == 42
header.add_comment('HELLO')
header.add_comment('WORLD')
assert header['COMMENT'][np.int64(1)] == 'WORLD'
header.append(('C', 'BAZBAZ'))
assert header[('C', np.int16(0))] == 'BAZ'
assert header[('C', np.uint32(1))] == 'BAZBAZ'
def test_header_data_size(self):
"""
Tests data size calculation (w/o padding) given a Header.
"""
hdu = fits.PrimaryHDU()
header = hdu.header
assert header.data_size == 0
header['BITPIX'] = 32
header['NAXIS'] = 2
header['NAXIS1'] = 100
header['NAXIS2'] = 100
assert header.data_size == 40000
assert header.data_size_padded == 40320
class TestRecordValuedKeywordCards(FitsTestCase):
"""
Tests for handling of record-valued keyword cards as used by the
`FITS WCS distortion paper
<https://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
These tests are derived primarily from the release notes for PyFITS 1.4 (in
which this feature was first introduced.
Note that extra leading spaces in the `value` fields should be parsed on input,
but will be stripped in the cards.
"""
def setup(self):
super().setup()
self._test_header = fits.Header()
self._test_header.set('DP1', 'NAXIS: 2')
self._test_header.set('DP1', 'AXIS.1: 1')
self._test_header.set('DP1', 'AXIS.2: 2')
self._test_header.set('DP1', 'NAUX: 2')
self._test_header.set('DP1', 'AUX.1.COEFF.0: 0')
self._test_header.set('DP1', 'AUX.1.POWER.0: 1')
self._test_header.set('DP1', 'AUX.1.COEFF.1: 0.00048828125')
self._test_header.set('DP1', 'AUX.1.POWER.1: 1')
def test_initialize_rvkc(self):
"""
Test different methods for initializing a card that should be
recognized as a RVKC
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
assert c.comment == 'A comment'
c = fits.Card.fromstring("DP1 = 'NAXIS: 2.1'")
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.1
assert c.field_specifier == 'NAXIS'
c = fits.Card.fromstring("DP1 = 'NAXIS: a'")
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1', 'NAXIS: 2')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: 2.0')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1', 'NAXIS: a')
assert c.keyword == 'DP1'
assert c.value == 'NAXIS: a'
assert c.field_specifier is None
c = fits.Card('DP1.NAXIS', 2)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.field_specifier == 'NAXIS'
with pytest.warns(fits.verify.VerifyWarning):
c = fits.Card('DP1.NAXIS', 'a')
assert c.keyword == 'DP1.NAXIS'
assert c.value == 'a'
assert c.field_specifier is None
def test_parse_field_specifier(self):
"""
Tests that the field_specifier can accessed from a card read from a
string before any other attributes are accessed.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
assert c.keyword == 'DP1.NAXIS'
assert c.value == 2.0
assert c.comment == 'A comment'
def test_update_field_specifier(self):
"""
Test setting the field_specifier attribute and updating the card image
to reflect the new value.
"""
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.field_specifier == 'NAXIS'
c.field_specifier = 'NAXIS1'
assert c.field_specifier == 'NAXIS1'
assert c.keyword == 'DP1.NAXIS1'
assert c.value == 2.0
assert c.comment == 'A comment'
assert str(c).rstrip() == "DP1 = 'NAXIS1: 2' / A comment"
def test_field_specifier_case_senstivity(self):
"""
The keyword portion of an RVKC should still be case-insensitive, but
the field-specifier portion should be case-sensitive.
"""
header = fits.Header()
header.set('abc.def', 1)
header.set('abc.DEF', 2)
assert header['abc.def'] == 1
assert header['ABC.def'] == 1
assert header['aBc.def'] == 1
assert header['ABC.DEF'] == 2
assert 'ABC.dEf' not in header
def test_get_rvkc_by_index(self):
"""
Returning a RVKC from a header via index lookup should return the
float value of the card.
"""
assert self._test_header[0] == 2.0
assert isinstance(self._test_header[0], float)
assert self._test_header[1] == 1.0
assert isinstance(self._test_header[1], float)
def test_get_rvkc_by_keyword(self):
"""
Returning a RVKC just via the keyword name should return the full value
string of the first card with that keyword.
This test was changed to reflect the requirement in ticket
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184--previously it required
_test_header['DP1'] to return the parsed float value.
"""
assert self._test_header['DP1'] == 'NAXIS: 2'
def test_get_rvkc_by_keyword_and_field_specifier(self):
"""
Returning a RVKC via the full keyword/field-specifier combination
should return the floating point value associated with the RVKC.
"""
assert self._test_header['DP1.NAXIS'] == 2.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
assert self._test_header['DP1.AUX.1.COEFF.1'] == 0.00048828125
def test_access_nonexistent_rvkc(self):
"""
Accessing a nonexistent RVKC should raise an IndexError for
index-based lookup, or a KeyError for keyword lookup (like a normal
card).
"""
pytest.raises(IndexError, lambda x: self._test_header[x], 8)
# Test exception with message
with pytest.raises(KeyError, match=r"Keyword 'DP1\.AXIS\.3' not found."):
self._test_header['DP1.AXIS.3']
def test_update_rvkc(self):
"""A RVKC can be updated either via index or keyword access."""
self._test_header[0] = 3
assert self._test_header['DP1.NAXIS'] == 3.0
assert isinstance(self._test_header['DP1.NAXIS'], float)
self._test_header['DP1.AXIS.1'] = 1.1
assert self._test_header['DP1.AXIS.1'] == 1.1
def test_update_rvkc_2(self):
"""Regression test for an issue that appeared after SVN r2412."""
h = fits.Header()
h['D2IM1.EXTVER'] = 1
assert h['D2IM1.EXTVER'] == 1.0
h['D2IM1.EXTVER'] = 2
assert h['D2IM1.EXTVER'] == 2.0
def test_raw_keyword_value(self):
c = fits.Card.fromstring("DP1 = 'NAXIS: 2' / A comment")
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2'
c = fits.Card('DP1.NAXIS', 2)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
c = fits.Card('DP1.NAXIS', 2.0)
assert c.rawkeyword == 'DP1'
assert c.rawvalue == 'NAXIS: 2.0'
def test_rvkc_insert_after(self):
"""
It should be possible to insert a new RVKC after an existing one
specified by the full keyword/field-specifier combination."""
self._test_header.set('DP1', 'AXIS.3: 1', 'a comment',
after='DP1.AXIS.2')
assert self._test_header[3] == 1
assert self._test_header['DP1.AXIS.3'] == 1
def test_rvkc_delete(self):
"""
Deleting a RVKC should work as with a normal card by using the full
keyword/field-spcifier combination.
"""
del self._test_header['DP1.AXIS.1']
assert len(self._test_header) == 7
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.AXIS.2'
# Perform a subsequent delete to make sure all the index mappings were
# updated
del self._test_header['DP1.AXIS.2']
assert len(self._test_header) == 6
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_pattern_matching_keys(self):
"""Test the keyword filter strings with RVKCs."""
cl = self._test_header['DP1.AXIS.*']
assert isinstance(cl, fits.Header)
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
cl = self._test_header['DP1.N*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'",
"DP1 = 'NAUX: 2'"])
cl = self._test_header['DP1.AUX...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl = self._test_header['DP?.NAXIS']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'NAXIS: 2'"])
cl = self._test_header['DP1.A*S.*']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'"])
def test_pattern_matching_key_deletion(self):
"""Deletion by filter strings should work."""
del self._test_header['DP1.A*...']
assert len(self._test_header) == 2
assert list(self._test_header)[0] == 'DP1.NAXIS'
assert self._test_header[0] == 2
assert list(self._test_header)[1] == 'DP1.NAUX'
assert self._test_header[1] == 2
def test_successive_pattern_matching(self):
"""
A card list returned via a filter string should be further filterable.
"""
cl = self._test_header['DP1.A*...']
assert ([str(c).strip() for c in cl.cards] ==
["DP1 = 'AXIS.1: 1'",
"DP1 = 'AXIS.2: 2'",
"DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
cl2 = cl['*.*AUX...']
assert ([str(c).strip() for c in cl2.cards] ==
["DP1 = 'AUX.1.COEFF.0: 0'",
"DP1 = 'AUX.1.POWER.0: 1'",
"DP1 = 'AUX.1.COEFF.1: 0.00048828125'",
"DP1 = 'AUX.1.POWER.1: 1'"])
def test_rvkc_in_cardlist_keys(self):
"""
The CardList.keys() method should return full keyword/field-spec values
for RVKCs.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl) == ['DP1.AXIS.1', 'DP1.AXIS.2']
def test_rvkc_in_cardlist_values(self):
"""
The CardList.values() method should return the values of all RVKCs as
floating point values.
"""
cl = self._test_header['DP1.AXIS.*']
assert list(cl.values()) == [1.0, 2.0]
def test_rvkc_value_attribute(self):
"""
Individual card values should be accessible by the .value attribute
(which should return a float).
"""
cl = self._test_header['DP1.AXIS.*']
assert cl.cards[0].value == 1.0
assert isinstance(cl.cards[0].value, float)
def test_overly_permissive_parsing(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/183
Ensures that cards with standard commentary keywords are never treated
as RVKCs. Also ensures that cards not strictly matching the RVKC
pattern are not treated as such.
"""
h = fits.Header()
h['HISTORY'] = 'AXIS.1: 2'
h['HISTORY'] = 'AXIS.2: 2'
assert 'HISTORY.AXIS' not in h
assert 'HISTORY.AXIS.1' not in h
assert 'HISTORY.AXIS.2' not in h
assert h['HISTORY'] == ['AXIS.1: 2', 'AXIS.2: 2']
# This is an example straight out of the ticket where everything after
# the '2012' in the date value was being ignored, allowing the value to
# successfully be parsed as a "float"
h = fits.Header()
h['HISTORY'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'HISTORY.Date' not in h
assert str(h.cards[0]) == _pad('HISTORY Date: 2012-09-19T13:58:53.756061')
c = fits.Card.fromstring(
" 'Date: 2012-09-19T13:58:53.756061'")
assert c.keyword == ''
assert c.value == "'Date: 2012-09-19T13:58:53.756061'"
assert c.field_specifier is None
h = fits.Header()
h['FOO'] = 'Date: 2012-09-19T13:58:53.756061'
assert 'FOO.Date' not in h
assert (str(h.cards[0]) ==
_pad("FOO = 'Date: 2012-09-19T13:58:53.756061'"))
def test_overly_aggressive_rvkc_lookup(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/184
Ensures that looking up a RVKC by keyword only (without the
field-specifier) in a header returns the full string value of that card
without parsing it as a RVKC. Also ensures that a full field-specifier
is required to match a RVKC--a partial field-specifier that doesn't
explicitly match any record-valued keyword should result in a KeyError.
"""
c1 = fits.Card.fromstring("FOO = 'AXIS.1: 2'")
c2 = fits.Card.fromstring("FOO = 'AXIS.2: 4'")
h = fits.Header([c1, c2])
assert h['FOO'] == 'AXIS.1: 2'
assert h[('FOO', 1)] == 'AXIS.2: 4'
assert h['FOO.AXIS.1'] == 2.0
assert h['FOO.AXIS.2'] == 4.0
assert 'FOO.AXIS' not in h
assert 'FOO.AXIS.' not in h
assert 'FOO.' not in h
pytest.raises(KeyError, lambda: h['FOO.AXIS'])
pytest.raises(KeyError, lambda: h['FOO.AXIS.'])
pytest.raises(KeyError, lambda: h['FOO.'])
def test_fitsheader_script(self):
"""Tests the basic functionality of the `fitsheader` script."""
from astropy.io.fits.scripts import fitsheader
# Can an extension by specified by the EXTNAME keyword?
hf = fitsheader.HeaderFormatter(self.data('zerowidth.fits'))
output = hf.parse(extensions=['AIPS FQ'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX" in output
# Can we limit the display to one specific keyword?
output = hf.parse(extensions=['AIPS FQ'], keywords=['EXTNAME'])
assert "EXTNAME = 'AIPS FQ" in output
assert "BITPIX =" not in output
assert len(output.split('\n')) == 3
# Can we limit the display to two specific keywords?
output = hf.parse(extensions=[1],
keywords=['EXTNAME', 'BITPIX'])
assert "EXTNAME =" in output
assert "BITPIX =" in output
assert len(output.split('\n')) == 4
# Can we use wildcards for keywords?
output = hf.parse(extensions=[1], keywords=['NAXIS*'])
assert "NAXIS =" in output
assert "NAXIS1 =" in output
assert "NAXIS2 =" in output
hf.close()
# Can an extension by specified by the EXTNAME+EXTVER keywords?
hf = fitsheader.HeaderFormatter(self.data('test0.fits'))
assert "EXTNAME = 'SCI" in hf.parse(extensions=['SCI,2'])
hf.close()
# Can we print the original header before decompression?
hf = fitsheader.HeaderFormatter(self.data('comp.fits'))
assert "XTENSION= 'IMAGE" in hf.parse(extensions=[1],
compressed=False)
assert "XTENSION= 'BINTABLE" in hf.parse(extensions=[1],
compressed=True)
hf.close()
def test_fitsheader_table_feature(self):
"""Tests the `--table` feature of the `fitsheader` script."""
from astropy.io import fits
from astropy.io.fits.scripts import fitsheader
test_filename = self.data('zerowidth.fits')
formatter = fitsheader.TableHeaderFormatter(test_filename)
with fits.open(test_filename) as fitsobj:
# Does the table contain the expected number of rows?
mytable = formatter.parse([0])
assert len(mytable) == len(fitsobj[0].header)
# Repeat the above test when multiple HDUs are requested
mytable = formatter.parse(extensions=['AIPS FQ', 2, "4"])
assert len(mytable) == (len(fitsobj['AIPS FQ'].header)
+ len(fitsobj[2].header)
+ len(fitsobj[4].header))
# Can we recover the filename and extension name from the table?
mytable = formatter.parse(extensions=['AIPS FQ'])
assert np.all(mytable['filename'] == test_filename)
assert np.all(mytable['hdu'] == 'AIPS FQ')
assert mytable['value'][mytable['keyword'] == "EXTNAME"] == "AIPS FQ"
# Can we specify a single extension/keyword?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['EXTNAME'])
assert len(mytable) == 1
assert mytable['hdu'][0] == "AIPS FQ"
assert mytable['keyword'][0] == "EXTNAME"
assert mytable['value'][0] == "AIPS FQ"
# Is an incorrect extension dealt with gracefully?
mytable = formatter.parse(extensions=['DOES_NOT_EXIST'])
assert mytable is None
# Is an incorrect keyword dealt with gracefully?
mytable = formatter.parse(extensions=['AIPS FQ'],
keywords=['DOES_NOT_EXIST'])
assert mytable is None
formatter.close()
@pytest.mark.parametrize('mode', ['wb', 'wb+', 'ab', 'ab+'])
def test_hdu_writeto_mode(self, mode):
with open(self.temp('mode.fits'), mode=mode) as ff:
hdu = fits.ImageHDU(data=np.ones(5))
hdu.writeto(ff)
def test_subclass():
"""Check that subclasses don't get ignored on slicing and copying."""
class MyHeader(fits.Header):
def append(self, card, *args, **kwargs):
if isinstance(card, tuple) and len(card) == 2:
# Just for our checks we add a comment if there is none.
card += ('no comment',)
return super().append(card, *args, **kwargs)
my_header = MyHeader((('a', 1., 'first'),
('b', 2., 'second'),
('c', 3.,)))
assert my_header.comments['a'] == 'first'
assert my_header.comments['b'] == 'second'
assert my_header.comments['c'] == 'no comment'
slice_ = my_header[1:]
assert type(slice_) is MyHeader
assert slice_.comments['b'] == 'second'
assert slice_.comments['c'] == 'no comment'
selection = my_header['c*']
assert type(selection) is MyHeader
assert selection.comments['c'] == 'no comment'
copy_ = my_header.copy()
assert type(copy_) is MyHeader
assert copy_.comments['b'] == 'second'
assert copy_.comments['c'] == 'no comment'
my_header.extend((('d', 4.),))
assert my_header.comments['d'] == 'no comment'
|
1f174301a1819e4356366cc5cd91f87d077dde497d3a12ae6b2617348651238e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import os
import functools
from contextlib import nullcontext
from io import BytesIO
import re
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from astropy.table import Table, MaskedColumn
from astropy.io import ascii
from astropy.io.ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from astropy.io.ascii.fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyWarning
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii')) # noqa
CI = os.environ.get('CI', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join(f'table{_filename_counter}.txt'))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize('delimiter', [',', '\t', ' ', 'csv'])
@pytest.mark.parametrize('quotechar', ['"', "'"])
@pytest.mark.parametrize('fast', [False, True])
def test_embedded_newlines(delimiter, quotechar, fast):
"""Test that embedded newlines are supported for io.ascii readers
and writers, both fast and Python readers."""
# Start with an assortment of values with different embedded newlines and whitespace
dat = [['\t a ', ' b \n cd ', '\n'],
[' 1\n ', '2 \n" \t 3\n4\n5', "1\n '2\n"],
[' x,y \nz\t', '\t 12\n\t34\t ', '56\t\n'],
]
dat = Table(dat, names=('a', 'b', 'c'))
# Construct a table which is our expected result of writing the table and
# reading it back. Certain stripping of whitespace is expected.
exp = {} # expected output from reading
for col in dat.itercols():
vals = []
for val in col:
# Readers and writers both strip whitespace from ends of values
val = val.strip(' \t')
if not fast:
# Pure Python reader has a "feature" where it strips trailing
# whitespace from each input line. This means a value like
# " x \ny \t\n" gets read as "x\ny".
bits = val.splitlines(keepends=True)
bits_out = []
for bit in bits:
bit = re.sub(r'[ \t]+(\n?)$', r'\1', bit.strip(' \t'))
bits_out.append(bit)
val = ''.join(bits_out)
vals.append(val)
exp[col.info.name] = vals
exp = Table(exp)
if delimiter == 'csv':
format = 'csv'
delimiter = ','
else:
format = 'basic'
# Write the table to `text`
fh = io.StringIO()
ascii.write(dat, fh, format=format, delimiter=delimiter,
quotechar=quotechar, fast_writer=fast)
text = fh.getvalue()
# Read it back and compare to the expected
dat_out = ascii.read(text, format=format, guess=False, delimiter=delimiter,
quotechar=quotechar, fast_reader=fast)
eq = dat_out.values_equal(exp)
assert all(np.all(col) for col in eq.itercols())
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']],
names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = dedent("""
COL1 COL2 COL3
1 A -1
2 B -2
""")
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = dedent("""
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
""")
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
def test_doubled_quotes(read_csv):
"""
Test #8283 (fix for #8281), parsing doubled-quotes "ab""cd" in a quoted
field was incorrect.
"""
tbl = '\n'.join(['a,b',
'"d""","d""q"',
'"""q",""""'])
expected = Table([['d"', '"q'],
['d"q', '"']],
names=('a', 'b'))
dat = read_csv(tbl)
assert_table_equal(dat, expected)
# In addition to the local read_csv wrapper, check that default
# parsing with guessing gives the right answer.
for fast_reader in True, False:
dat = ascii.read(tbl, fast_reader=fast_reader)
assert_table_equal(dat, expected)
@pytest.mark.filterwarnings("ignore:OverflowError converting to IntType in column TIMESTAMP")
def test_doubled_quotes_segv():
"""
Test the exact example from #8281 which resulted in SEGV prior to #8283
(in contrast to the tests above that just gave the wrong answer).
Attempts to produce a more minimal example were unsuccessful, so the whole
thing is included.
"""
tbl = dedent("""
"ID","TIMESTAMP","addendum_id","bib_reference","bib_reference_url","client_application","client_category","client_sort_key","color","coordsys","creator","creator_did","data_pixel_bitpix","dataproduct_subtype","dataproduct_type","em_max","em_min","format","hips_builder","hips_copyright","hips_creation_date","hips_creation_date_1","hips_creator","hips_data_range","hips_estsize","hips_frame","hips_glu_tag","hips_hierarchy","hips_initial_dec","hips_initial_fov","hips_initial_ra","hips_lon_asc","hips_master_url","hips_order","hips_order_1","hips_order_4","hips_order_min","hips_overlay","hips_pixel_bitpix","hips_pixel_cut","hips_pixel_scale","hips_progenitor_url","hips_publisher","hips_release_date","hips_release_date_1","hips_rgb_blue","hips_rgb_green","hips_rgb_red","hips_sampling","hips_service_url","hips_service_url_1","hips_service_url_2","hips_service_url_3","hips_service_url_4","hips_service_url_5","hips_service_url_6","hips_service_url_7","hips_service_url_8","hips_skyval","hips_skyval_method","hips_skyval_value","hips_status","hips_status_1","hips_status_2","hips_status_3","hips_status_4","hips_status_5","hips_status_6","hips_status_7","hips_status_8","hips_tile_format","hips_tile_format_1","hips_tile_format_4","hips_tile_width","hips_version","hipsgen_date","hipsgen_date_1","hipsgen_date_10","hipsgen_date_11","hipsgen_date_12","hipsgen_date_2","hipsgen_date_3","hipsgen_date_4","hipsgen_date_5","hipsgen_date_6","hipsgen_date_7","hipsgen_date_8","hipsgen_date_9","hipsgen_params","hipsgen_params_1","hipsgen_params_10","hipsgen_params_11","hipsgen_params_12","hipsgen_params_2","hipsgen_params_3","hipsgen_params_4","hipsgen_params_5","hipsgen_params_6","hipsgen_params_7","hipsgen_params_8","hipsgen_params_9","label","maxOrder","moc_access_url","moc_order","moc_release_date","moc_sky_fraction","obs_ack","obs_collection","obs_copyrigh_url","obs_copyright","obs_copyright_1","obs_copyright_url","obs_copyright_url_1","obs_description","obs_description_url","obs_descrition_url","obs_id","obs_initial_dec","obs_initial_fov","obs_initial_ra","obs_provenance","obs_regime","obs_title","ohips_frame","pixelCut","pixelRange","prov_did","prov_progenitor","prov_progenitor_url","publisher_did","publisher_id","s_pixel_scale","t_max","t_min"
"CDS/P/2MASS/H","1524123841000","","2006AJ....131.1163S","http://cdsbib.u-strasbg.fr/cgi-bin/cdsbib?2006AJ....131.1163S","AladinDesktop","Image/Infrared/2MASS","04-001-03","","","","ivo://CDS/P/2MASS/H","","","image","1.798E-6","1.525E-6","","Aladin/HipsGen v9.017","CNRS/Unistra","2013-05-06T20:36Z","","CDS (A.Oberto)","","","equatorial","","mean","","","","","","9","","","","","","0 60","2.236E-4","","","2016-04-22T13:48Z","","","","","","http://alasky.u-strasbg.fr/2MASS/H","https://irsa.ipac.caltech.edu/data/hips/CDS/2MASS/H","http://alaskybis.u-strasbg.fr/2MASS/H","https://alaskybis.u-strasbg.fr/2MASS/H","","","","","","","","","public master clonableOnce","public mirror unclonable","public mirror clonableOnce","public mirror clonableOnce","","","","","","jpeg fits","","","512","1.31","","","","","","","","","","","","","","","","","","","","","","","","","","","","","http://alasky.u-strasbg.fr/2MASS/H/Moc.fits","9","","1","University of Massachusetts & IPAC/Caltech","The Two Micron All Sky Survey - H band (2MASS H)","","University of Massachusetts & IPAC/Caltech","","http://www.ipac.caltech.edu/2mass/","","2MASS has uniformly scanned the entire sky in three near-infrared bands to detect and characterize point sources brighter than about 1 mJy in each band, with signal-to-noise ratio (SNR) greater than 10, using a pixel size of 2.0"". This has achieved an 80,000-fold improvement in sensitivity relative to earlier surveys. 2MASS used two highly-automated 1.3-m telescopes, one at Mt. Hopkins, AZ, and one at CTIO, Chile. Each telescope was equipped with a three-channel camera, each channel consisting of a 256x256 array of HgCdTe detectors, capable of observing the sky simultaneously at J (1.25 microns), H (1.65 microns), and Ks (2.17 microns). The University of Massachusetts (UMass) was responsible for the overall management of the project, and for developing the infrared cameras and on-site computing systems at both facilities. The Infrared Processing and Analysis Center (IPAC) is responsible for all data processing through the Production Pipeline, and construction and distribution of the data products. Funding is provided primarily by NASA and the NSF","","","","+0","0.11451621372724685","0","","Infrared","2MASS H (1.66um)","","","","","IPAC/NASA","","","","","51941","50600"
""") # noqa
ascii.read(tbl, format='csv', fast_reader=True, guess=False)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = dedent("""
"A B" C D
1.5 2.1 -37.1
a b " c
d"
""")
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'c\nd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', {i + 1: ascii.convert_numpy(np.uint) for i in range(3)}), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = dedent("""
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
""")
with pytest.raises(InconsistentTableError) as e:
FastBasic().read(text)
assert 'Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e.value)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
FastCsv().read(text)
assert 'Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e.value)
def test_too_many_cols4():
# https://github.com/astropy/astropy/issues/9922
with pytest.raises(InconsistentTableError) as e:
ascii.read(get_pkg_data_filename('data/conf_py.txt'),
fast_reader=True, guess=True)
assert 'Unable to guess table format with the guesses listed below' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError):
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',',
fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'],
fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d\n e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0,
parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1,
parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e.value)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e.value)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], ["6", "9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], ["9\n1", "12"]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] == '\n' # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e.value)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([MaskedColumn([1, 4, 7]), [2, 5, 8], MaskedColumn([3, 6, 9])],
names=('a', 'b', 'c'))
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t""" # noqa
head = [f'A{i}' for i in range(28)]
read_tab(content, data_start=1, parallel=parallel, names=head)
@pytest.mark.hugemem
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
``--run-hugemem`` cli option is given. Note that running the test requires
quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table ({NB_COLS} columns).")
data = np.random.random(NB_ROWS)
t = Table(data=[data] * NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
@pytest.mark.hugemem
def test_read_big_table2(tmpdir):
"""Test reading of a file with a huge column.
"""
# (2**32 // 2) : max value for int
# // 10 : we use a value for rows that have 10 chars (1e9)
# + 5 : add a few lines so the length cannot be stored by an int
NB_ROWS = 2**32 // 2 // 10 + 5
filename = str(tmpdir.join("big_table.csv"))
print(f"Creating a {NB_ROWS} rows table.")
data = np.full(NB_ROWS, int(1e9), dtype=np.int32)
t = Table(data=[data], names=['a'], copy=False)
print(f"Saving the table to {filename}")
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {NB_ROWS} + 1 (header).")
with open(filename) as f:
assert sum(1 for line in f) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
test_for_warnings = fast_reader and not parallel
if not parallel and not fast_reader:
ctx = nullcontext()
else:
ctx = pytest.warns()
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
# NOTE: Warning behavior varies for the parameters being passed in.
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 2-5
assert len(w) == 4
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+2}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings: # Assert precision warnings for cols 4-6
assert len(w) == 3
for i in range(len(w)):
assert (f"OverflowError converting to FloatType in column col{i+4}"
in str(w[i].message))
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
with ctx as w:
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
if test_for_warnings:
assert len(w) == 3
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
@pytest.mark.parametrize("parallel", [False, True])
def test_data_at_range_limit(parallel, fast_reader, guess):
"""
Test parsing of fixed-format float64 numbers near range limits
(|~4.94e-324 to 1.7977e+308|) - within limit for full precision
(|~2.5e-307| for strtod C parser, factor 10 better for fast_converter)
exact numbers shall be returned, beyond that an Overflow warning raised.
Input of exactly 0.0 must not raise an OverflowError.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict; adapt relative precision for fast_converter
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int_).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
# Test very long fixed-format strings (to strtod range limit w/o Overflow)
for D in 99, 202, 305:
t = ascii.read(StringIO(99 * '0' + '.' + D * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**-(D + 1), rtol=rtol, atol=1.e-324)
for D in 99, 202, 308:
t = ascii.read(StringIO('1' + D * '0' + '.0'), format='no_header',
guess=guess, fast_reader=fast_reader)
assert_almost_equal(t['col1'][0], 10.**D, rtol=rtol, atol=1.e-324)
# 0.0 is always exact (no Overflow warning)!
for s in '0.0', '0.0e+0', 399 * '0' + '.' + 365 * '0':
t = ascii.read(StringIO(s), format='no_header',
guess=guess, fast_reader=fast_reader)
assert t['col1'][0] == 0.0
# Test OverflowError at precision limit with laxer rtol
if parallel:
pytest.skip("Catching warnings broken in parallel mode")
elif not fast_reader:
pytest.skip("Python/numpy reader does not raise on Overflow")
with pytest.warns() as warning_lines:
t = ascii.read(StringIO('0.' + 314 * '0' + '1'), format='no_header',
guess=guess, fast_reader=fast_reader)
n_warns = len(warning_lines)
assert n_warns in (0, 1), f'Expected 0 or 1 warning, found {n_warns}'
if n_warns == 1:
assert 'OverflowError converting to FloatType in column col1, possibly resulting in degraded precision' in str(warning_lines[0].message) # noqa
assert_almost_equal(t['col1'][0], 1.e-315, rtol=1.e-10, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min + 1
imax = np.iinfo(int).max - 1
huge = f'{imax+2:d}'
text = f'P M S\n {imax:d} {imin:d} {huge:s}'
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
# NOTE: Warning behavior varies for the parameters being passed in.
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
# Check with leading zeroes to make sure strtol does not read them as octal
text = f'P M S\n000{imax:d} -0{-imin:d} 00{huge:s}'
expected = Table([[imax], [imin], ['00' + huge]], names=('P', 'M', 'S'))
with pytest.warns() as w:
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
if not parallel:
assert len(w) == 1
assert ("OverflowError converting to IntType in column S, reverting to String"
in str(w[0].message))
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
def test_int_out_of_order(guess):
"""
Mixed columns should be returned as float, but if the out-of-range integer
shows up first, it will produce a string column - with both readers.
Broken with the parallel fast_reader.
"""
imax = np.iinfo(int).max - 1
text = f'A B\n 12.3 {imax:d}0\n {imax:d}0 45.6e7'
expected = Table([[12.3, 10. * imax], [f'{imax:d}0', '45.6e7']],
names=('A', 'B'))
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=True)
assert_table_equal(table, expected)
with pytest.warns(AstropyWarning, match=r'OverflowError converting to '
r'IntType in column B, reverting to String'):
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# Check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6 * ('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3 * ('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6 * ('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e.value)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [False, True])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and CI:
pytest.xfail("Multiprocessing can sometimes fail on CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header) + '\n' + s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognizes 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
@pytest.mark.parametrize('fast_reader', [False,
dict(parallel=True),
dict(parallel=False)])
def test_read_empty_basic_table_with_comments(fast_reader):
"""
Test for reading a "basic" format table that has no data but has comments.
Tests the fix for #8267.
"""
dat = """
# comment 1
# comment 2
col1 col2
"""
t = ascii.read(dat, fast_reader=fast_reader)
assert t.meta['comments'] == ['comment 1', 'comment 2']
assert len(t) == 0
assert t.colnames == ['col1', 'col2']
@pytest.mark.parametrize('fast_reader', [dict(use_fast_converter=True),
dict(exponent_style='A')])
def test_conversion_fast(fast_reader):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, i.e. on parsing
non-numeric input including isolated positive/negative signs, it should
fall back to strings.
"""
text = """
A B C D E F G H
1 a 3 4 5 6 7 8
2. 1 9 -.1e1 10.0 8.7 6 -5.3e4
4 2 -12 .4 +.e1 - + six
"""
table = ascii.read(text, fast_reader=fast_reader)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
assert table['F'].dtype.kind in ('S', 'U')
assert table['G'].dtype.kind in ('S', 'U')
assert table['H'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize('delimiter', ['\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_newline_as_delimiter(delimiter, fast_reader):
"""
Check that newline characters are correctly handled as delimiters.
Tests the fix for #9928.
"""
if delimiter == '\r':
eol = '\n'
else:
eol = '\r'
inp0 = ["a | b | c ", " 1 | '2' | 3.00000 "]
inp1 = "a {0:s} b {0:s}c{1:s} 1 {0:s}'2'{0:s} 3.0".format(delimiter, eol)
inp2 = [f"a {delimiter} b{delimiter} c",
f"1{delimiter} '2' {delimiter} 3.0"]
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read(inp2, delimiter=delimiter, fast_reader=fast_reader)
assert t1.colnames == t2.colnames == ['a', 'b', 'c']
assert len(t1) == len(t2) == 1
assert t1['b'].dtype.kind in ('S', 'U')
assert t2['b'].dtype.kind in ('S', 'U')
assert_table_equal(t1, t0)
assert_table_equal(t2, t0)
inp0 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format('|', eol)
inp1 = 'a {0:s} b {0:s} c{1:s} 1 {0:s}"2"{0:s} 3.0'.format(delimiter, eol)
t0 = ascii.read(inp0, delimiter='|', fast_reader=fast_reader)
t1 = ascii.read(inp1, delimiter=delimiter, fast_reader=fast_reader)
if not fast_reader:
pytest.xfail("Quoted fields are not parsed correctly by BaseSplitter")
assert_equal(t1['b'].dtype.kind, 'i')
@pytest.mark.parametrize('delimiter', [' ', '|', '\n', '\r'])
@pytest.mark.parametrize('fast_reader', [False, True, 'force'])
def test_single_line_string(delimiter, fast_reader):
"""
String input without a newline character is interpreted as filename,
unless element of an iterable. Maybe not logical, but test that it is
at least treated consistently.
"""
expected = Table([[1], [2], [3.00]], names=('col1', 'col2', 'col3'))
text = "1{0:s}2{0:s}3.0".format(delimiter)
if delimiter in ('\r', '\n'):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t1, expected)
else:
# Windows raises OSError, but not the other OSes.
with pytest.raises((FileNotFoundError, OSError)):
t1 = ascii.read(text, format='no_header', delimiter=delimiter, fast_reader=fast_reader)
t2 = ascii.read([text], format='no_header', delimiter=delimiter, fast_reader=fast_reader)
assert_table_equal(t2, expected)
|
45e0b57ce2d2ab35ebd215581d4b2c268fbc6b17ee72bd63adbbba34fba4c05b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``HTML``
reader/writer and aims to document its functionality.
Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
to be installed.
"""
from io import StringIO
from astropy.io.ascii import html
from astropy.io.ascii import core
from astropy.table import Table
import pytest
import numpy as np
from .common import setup_function, teardown_function # noqa
from astropy.io import ascii
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound
@pytest.mark.skipif('not HAS_BS4')
def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>',
'html.parser')
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == '<html><head></head><body><p>foo</p></body></html>'
assert soup_str.soup is soup
def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in 'abcde':
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup('<html><body></body></html>', 'html.parser')
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>'
'<td>B</td></tr></table>', 'html.parser').table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {'table_id': 2}, 1) is False
assert html.identify_table(soup, {'table_id': 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False
assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True
@pytest.mark.skipif('not HAS_BS4')
def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td></td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
# Now with a specific value '...' => missing
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td>...</td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
@pytest.mark.skipif('not HAS_BS4')
def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = ['<table>',
'<tr><th>A</th> <th>B</th></tr>',
'<tr><td>1</td><td>2</td></tr>',
'</table>']
# Swap column names
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])
assert dat.colnames == ['B', 'A']
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])
assert dat.colnames == ['A']
assert len(dat) == 1
assert np.all(dat['A'] == 2)
@pytest.mark.skipif('not HAS_BS4')
def test_no_names():
"""
Test reading a table with no column header
"""
table_in = ['<table>',
'<tr><td>1</td></tr>',
'<tr><td>2</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.colnames == ['col1']
assert len(dat) == 2
dat = Table.read(table_in, format='ascii.html', names=['a'])
assert dat.colnames == ['a']
assert len(dat) == 2
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>',
'<tr><td>B</td></tr></table>']
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},
guess=False)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},
guess=False)
assert err.match("ERROR: HTML table number 3 not found$")
@pytest.mark.skipif('not HAS_BS4')
def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):
try:
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': parser}, guess=False)
except FeatureNotFound:
if parser == 'html.parser':
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': 'foo'}, guess=False)
@pytest.mark.skipif('HAS_BS4')
def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([])
@pytest.mark.skipif('not HAS_BS4')
def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>',
'<tr><td>1</td><td>a</td><td>1.05</td></tr>',
'<tr><td>2</td><td>b</td><td>2.75</td></tr>',
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>',
'<tr><td>4</td><td>d</td><td>10.5</td></tr>',
'<tr><td>5</td><td>e</td><td>27.5</td></tr>',
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>',
'<tr><td>7</td><td>g</td><td>105.0</td></tr>',
'<tr><td>8</td><td>h</td><td>275.0</td></tr>',
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
@pytest.mark.skipif('not HAS_BS4')
def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>',
'html.parser').tr)]
expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>')
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([]))
@pytest.mark.skipif('not HAS_BS4')
def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>C1</th><th>C2</th><th>C3</th></tr>'
# start_line should return None if no valid header is found
lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><th>Header</th></tr>')
with pytest.raises(TypeError):
header.start_line(lines)
@pytest.mark.skipif('not HAS_BS4')
def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>1</td><td>a</td><td>1.05</td></tr>'
# end_line returns the index of the last data element + 1
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>4</td><td>d</td><td>10.5</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>7</td><td>g</td><td>105.0</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>'
# start_line should raise an error if no table data exists
lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><td>Data</td></tr>')
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines)
def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()
assert out == expected.strip()
def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({'multicol': False}).write(table)[0].strip() == \
expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read('data/html2.html', format='ascii.html')
str_type = np.dtype((str, 21))
expected = Table(np.array([(['1', '2.5000000000000000001'], 3),
(['1a', '1'], 3.5)],
dtype=[('A', str_type, (2,)), ('B', '<f8')]))
assert np.all(table == expected)
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach # noqa
t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c'])
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(out, format='ascii.html',
htmldict={'raw_html_cols': t.colnames,
'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world'),
format='html')
t_expected = Table([['Hello world'], [2]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'),
format='html')
t_expected = Table([[1], ['Hello world']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8'))
t['a'] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'),
format='html')
t_expected = Table([['TEST'], [1]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t, buffer_output, fill_values=('a', 'z'),
format='html')
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True)
t['a'][0:2] = np.ma.masked
t['b'][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')],
format='html')
t_expected = Table([['MASKED', 'MASKED', 3, 4], [
'MASKED', 'MASKED', '--', 'b']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_formatted_columns():
"""
Test to make sure that the HTML writer writes out using the
supplied formatting.
"""
col1 = [1, 2]
col2 = [1.234567e-11, -9.876543e11]
formats = {"C1": "04d", "C2": ".2e"}
table = Table([col1, col2], names=formats.keys())
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
</tr>
</thead>
<tr>
<td>0001</td>
<td>1.23e-11</td>
</tr>
<tr>
<td>0002</td>
<td>-9.88e+11</td>
</tr>
</table>
</body>
</html>
"""
with StringIO() as sp:
table.write(sp, format="html", formats=formats)
out = sp.getvalue().strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = ['<table>',
'<tr><td>Δ</td></tr>',
'<tr><td>Δ</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert np.all(dat['col1'] == ['Δ', 'Δ'])
|
fba97372539a2b12d3c54a154bd3c1287fd58d764813042432242849df7ae3b6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
"""
from astropy.table.column import MaskedColumn
import os
import copy
import sys
from io import StringIO
from contextlib import nullcontext
import pytest
import numpy as np
import yaml
from astropy.table import Table, Column, QTable
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units import QuantityInfo
from astropy.utils.compat import NUMPY_LT_1_19_1
from astropy.io.ascii.ecsv import DELIMITERS, InvalidEcsvDatatypeWarning
from astropy.io import ascii
from astropy import units as u
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
from .common import TEST_DIR
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if not hasattr(np, 'float128') or os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 1.0',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table()
t['a'] = np.arange(24).reshape(2, 3, 4)
t['a'].info.description = 'description'
t['a'].info.meta = {1: 2}
t['b'] = [1, 2]
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t2['a'] == t['a'])
assert t2['a'].shape == t['a'].shape
assert t2['a'].dtype == t['a'].dtype
assert t2['a'].info.description == t['a'].info.description
assert t2['a'].info.meta == t['a'].info.meta
assert np.all(t2['b'] == t['b'])
def test_structured_input():
"""
Structured column in input.
"""
t = Table()
# Add unit, description and meta to make sure that round-trips as well.
t['a'] = Column([('B', (1., [2., 3.])),
('A', (9., [8., 7.]))],
dtype=[('s', 'U1'), ('v', [('p0', 'f8'), ('p1', '2f8')])],
description='description',
format='>', # Most formats do not work with structured!
unit='m', # Overall unit should round-trip.
meta={1: 2})
t['b'] = Column([[(1., 2.), (9., 8.)],
[(3., 4.), (7., 6.)]],
dtype='f8,f8',
unit=u.Unit('m,s') # Per part unit should round-trip too.
)
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
for col in t.colnames:
assert np.all(t2[col] == t[col])
assert t2[col].shape == t[col].shape
assert t2[col].dtype == t[col].dtype
assert t2[col].unit == t[col].unit
assert t2[col].format == t[col].format
assert t2[col].info.description == t[col].info.description
assert t2[col].info.meta == t[col].info.meta
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err.value)
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5 * u.km, "foo2": u.s}
t["bar"] = [7] * u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
assert obj1.shape == obj2.shape
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description',
'info.dtype']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
# For no attrs that means we just compare directly.
if not attrs:
if isinstance(obj1, np.ndarray) and obj1.dtype.kind == 'f':
assert quantity_allclose(obj1, obj2, rtol=1e-15)
else:
assert np.all(obj1 == obj2)
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
all_serialized_names = []
# ECSV stores times as value by default, so we just get the column back.
# One exception is tm3, which is set to serialize via jd1 and jd2.
for name in names:
s_names = serialized_names[name]
if not name.startswith('tm3'):
s_names = [s_name.replace('.jd1', '') for s_name in s_names
if not s_name.endswith('jd2')]
all_serialized_names.extend(s_names)
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == all_serialized_names
def make_multidim(col, ndim):
"""Take a col with length=2 and make it N-d by repeating elements.
For the special case of ndim==1 just return the original.
The output has shape [3] * ndim. By using 3 we can be sure that repeating
the two input elements gives an output that is sufficiently unique for
the multidim tests.
"""
if ndim > 1:
import itertools
idxs = [idx for idx, _ in zip(itertools.cycle([0, 1]), range(3 ** ndim))]
col = col[idxs].reshape([3] * ndim)
return col
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
@pytest.mark.parametrize('ndim', (1, 2, 3))
def test_ecsv_mixins_per_column(table_cls, name_col, ndim):
"""Test write/read one col at a time and do detailed validation.
This tests every input column type as 1-d, 2-d and 3-d.
"""
name, col = name_col
c = make_multidim(np.array([1.0, 2.0]), ndim)
col = make_multidim(col, ndim)
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert len(t2[colname].shape) == ndim
if colname in ('c1', 'c2'):
compare = ['data']
else:
# Storing Longitude as Column loses wrap_angle.
compare = [attr for attr in compare_attrs[colname]
if not (attr == 'wrap_angle' and table_cls is Table)]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmpdir):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
def test_round_trip_masked_table_serialize_mask(tmpdir):
"""Same as prev but set the serialize_method to 'data_mask' so mask is written out"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'][0] = '' # This would come back as masked for default "" NULL marker
# MaskedColumn with no masked elements. See table the MaskedColumnInfo class
# _represent_as_dict() method for info about how we test a column with no masked elements.
t['d'] = [1, 2, 3]
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_round_trip_user_defined_unit(table_cls, tmpdir):
"""Ensure that we can read-back enabled user-defined units."""
# Test adapted from #8897, where it was noted that this works
# but was not tested.
filename = str(tmpdir.join('test.ecsv'))
unit = u.def_unit('bandpass_sol_lum')
t = table_cls()
t['l'] = np.arange(5) * unit
t.write(filename)
# without the unit enabled, get UnrecognizedUnit
if table_cls is QTable:
ctx = pytest.warns(u.UnitsWarning, match=r"'bandpass_sol_lum' did not parse .*")
else:
ctx = nullcontext()
# Note: The read might also generate ResourceWarning, in addition to UnitsWarning
with ctx:
t2 = table_cls.read(filename)
assert isinstance(t2['l'].unit, u.UnrecognizedUnit)
assert str(t2['l'].unit) == 'bandpass_sol_lum'
if table_cls is QTable:
assert np.all(t2['l'].value == t['l'].value)
else:
assert np.all(t2['l'] == t['l'])
# But with it enabled, it works.
with u.add_enabled_units(unit):
t3 = table_cls.read(filename)
assert t3['l'].unit is unit
assert np.all(t3['l'] == t['l'])
# Just to be sure, aloso try writing with unit enabled.
filename2 = str(tmpdir.join('test2.ecsv'))
t3.write(filename2)
t4 = table_cls.read(filename)
assert t4['l'].unit is unit
assert np.all(t4['l'] == t['l'])
def test_read_masked_bool():
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: col0, datatype: bool}
# schema: astropy-2.0
col0
1
0
True
""
False
"""
dat = ascii.read(txt, format='ecsv')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
@pytest.mark.parametrize('serialize_method', ['null_value', 'data_mask'])
@pytest.mark.parametrize('dtype', [np.int64, np.float64, bool, str])
@pytest.mark.parametrize('delimiter', [',', ' '])
def test_roundtrip_multidim_masked_array(serialize_method, dtype, delimiter):
# TODO also test empty string with null value
t = Table()
col = MaskedColumn(np.arange(12).reshape(2, 3, 2), dtype=dtype)
if dtype is str:
# np does something funny and gives a dtype of U21.
col = col.astype('U2')
col.mask[0, 0, 0] = True
col.mask[1, 1, 1] = True
t['a'] = col
t['b'] = ['x', 'y'] # Add another column for kicks
out = StringIO()
t.write(out, format='ascii.ecsv', serialize_method=serialize_method)
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
if hasattr(t[name], 'mask'):
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('subtype', ['some-user-type', 'complex'])
def test_multidim_unknown_subtype(subtype):
"""Test an ECSV file with a string type but unknown subtype"""
txt = f"""\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: {subtype}
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match=rf"unexpected subtype '{subtype}' set for column 'a'"):
t = ascii.read(txt, format='ecsv')
assert t['a'].dtype.kind == 'U'
assert t['a'][0] == '[1,2]'
def test_multidim_bad_shape():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - name: a
# datatype: string
# subtype: int64[3]
# schema: astropy-2.0
a
[1,2]
[3,4]"""
with pytest.raises(ValueError, match="column 'a' failed to convert: shape mismatch"):
Table.read(txt, format='ascii.ecsv')
def test_write_not_json_serializable():
t = Table()
t['a'] = np.array([{1, 2}, 1], dtype=object)
match = "could not convert column 'a' to string: Object of type set is not JSON serializable"
out = StringIO()
with pytest.raises(TypeError, match=match):
t.write(out, format='ascii.ecsv')
def test_read_not_json_serializable():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: string, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: column value is not valid JSON"
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_read_bad_datatype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: object}
# schema: astropy-2.0
a
fail
[3,4]"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'object' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'][0] == "fail"
assert type(t['a'][1]) is str
assert type(t['a'].dtype) == np.dtype("O")
@pytest.mark.skipif(NUMPY_LT_1_19_1,
reason="numpy cannot parse 'complex' as string until 1.19+")
def test_read_complex():
"""Test an ECSV v1.0 file with a complex column"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: complex}
# schema: astropy-2.0
a
1+1j
2+2j"""
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'complex' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert t['a'].dtype.type is np.complex128
def test_read_str():
"""Test an ECSV file with a 'str' instead of 'string' datatype """
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: str}
# schema: astropy-2.0
a
sometext
S""" # also testing single character text
with pytest.warns(InvalidEcsvDatatypeWarning,
match="unexpected datatype 'str' of column 'a' is not in allowed"):
t = Table.read(txt, format='ascii.ecsv')
assert isinstance(t['a'][1], str)
assert isinstance(t['a'][0], np.str_)
def test_read_bad_datatype_for_object_subtype():
"""Test a malformed ECSV file"""
txt = """\
# %ECSV 1.0
# ---
# datatype:
# - {name: a, datatype: int64, subtype: json}
# schema: astropy-2.0
a
fail
[3,4]"""
match = "column 'a' failed to convert: datatype of column 'a' must be \"string\""
with pytest.raises(ValueError, match=match):
Table.read(txt, format='ascii.ecsv')
def test_full_repr_roundtrip():
"""Test round-trip of float values to full precision even with format
specified"""
t = Table()
t['a'] = np.array([np.pi, 1/7], dtype=np.float64)
t['a'].info.format = '.2f'
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert np.all(t['a'] == t2['a'])
assert t2['a'].info.format == '.2f'
#############################################################################
# Define a number of specialized columns for testing and the expected values
# of `datatype` for each column.
#############################################################################
# First here is some helper code used to make the expected outputs code.
def _get_ecsv_header_dict(text):
lines = [line.strip() for line in text.splitlines()]
lines = [line[2:] for line in lines if line.startswith('#')]
lines = lines[2:] # Get rid of the header
out = yaml.safe_load('\n'.join(lines))
return out
def _make_expected_values(cols):
from pprint import pformat
for name, col in cols.items():
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
fmt_hdr = pformat(hdr['datatype'])
print(f'exps[{name!r}] =', fmt_hdr[:1])
print(fmt_hdr[1:])
print()
# Expected values of `datatype` for each column
exps = {}
cols = {}
# Run of the mill scalar for completeness
cols['scalar'] = np.array([1, 2], dtype=np.int16)
exps['scalar'] = [
{'datatype': 'int16', 'name': 'scalar'}]
# Array of lists that works as a 2-d variable array. This is just treated
# as an object.
cols['2-d variable array lists'] = c = np.empty(shape=(2,), dtype=object)
c[0] = [[1, 2], ["a", 4]]
c[1] = [[1, 2, 3], [4, 5.25, 6]]
exps['2-d variable array lists'] = [
{'datatype': 'string',
'name': '2-d variable array lists',
'subtype': 'json'}]
# Array of numpy arrays that is a 2-d variable array
cols['2-d variable array numpy'] = c = np.empty(shape=(2,), dtype=object)
c[0] = np.array([[1, 2], [3, 4]], dtype=np.float32)
c[1] = np.array([[1, 2, 3], [4, 5.5, 6]], dtype=np.float32)
exps['2-d variable array numpy'] = [
{'datatype': 'string',
'name': '2-d variable array numpy',
'subtype': 'float32[2,null]'}]
cols['1-d variable array lists'] = np.array([[1, 2], [3, 4, 5]], dtype=object)
exps['1-d variable array lists'] = [
{'datatype': 'string',
'name': '1-d variable array lists',
'subtype': 'json'}]
# Variable-length array
cols['1-d variable array numpy'] = np.array(
[np.array([1, 2], dtype=np.uint8),
np.array([3, 4, 5], dtype=np.uint8)], dtype=object)
exps['1-d variable array numpy'] = [
{'datatype': 'string',
'name': '1-d variable array numpy',
'subtype': 'uint8[null]'}]
cols['1-d variable array numpy str'] = np.array(
[np.array(['a', 'b']),
np.array(['c', 'd', 'e'])], dtype=object)
exps['1-d variable array numpy str'] = [
{'datatype': 'string',
'name': '1-d variable array numpy str',
'subtype': 'string[null]'}]
cols['1-d variable array numpy bool'] = np.array(
[np.array([True, False]),
np.array([True, False, True])], dtype=object)
exps['1-d variable array numpy bool'] = [
{'datatype': 'string',
'name': '1-d variable array numpy bool',
'subtype': 'bool[null]'}]
cols['1-d regular array'] = np.array([[1, 2], [3, 4]], dtype=np.int8)
exps['1-d regular array'] = [
{'datatype': 'string',
'name': '1-d regular array',
'subtype': 'int8[2]'}]
cols['2-d regular array'] = np.arange(8, dtype=np.float16).reshape(2, 2, 2)
exps['2-d regular array'] = [
{'datatype': 'string',
'name': '2-d regular array',
'subtype': 'float16[2,2]'}]
cols['scalar object'] = np.array([{'a': 1}, {'b': 2}], dtype=object)
exps['scalar object'] = [
{'datatype': 'string', 'name': 'scalar object', 'subtype': 'json'}]
cols['1-d object'] = np.array(
[[{'a': 1}, {'b': 2}],
[{'a': 1}, {'b': 2}]], dtype=object)
exps['1-d object'] = [
{'datatype': 'string',
'name': '1-d object',
'subtype': 'json[2]'}]
@pytest.mark.parametrize('name,col,exp',
list(zip(cols, cols.values(), exps.values())))
def test_specialized_columns(name, col, exp):
"""Test variable length lists, multidim columns, object columns.
"""
t = Table()
t[name] = col
out = StringIO()
t.write(out, format='ascii.ecsv')
hdr = _get_ecsv_header_dict(out.getvalue())
assert hdr['datatype'] == exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
assert np.all(val1 == val2)
def test_full_subtypes():
"""Read ECSV file created by M. Taylor that includes scalar, fixed array,
variable array for all datatypes. This file has missing values for all
columns as both per-value null and blank entries for the entire column
value.
Note: original file was modified to include blank values in f_float and
f_double columns.
"""
t = Table.read(os.path.join(TEST_DIR, 'data', 'subtypes.ecsv'))
colnames = ('i_index,'
's_byte,s_short,s_int,s_long,s_float,s_double,s_string,s_boolean,'
'f_byte,f_short,f_int,f_long,f_float,f_double,f_string,f_boolean,'
'v_byte,v_short,v_int,v_long,v_float,v_double,v_string,v_boolean,'
'm_int,m_double').split(',')
assert t.colnames == colnames
type_map = {'byte': 'int8',
'short': 'int16',
'int': 'int32',
'long': 'int64',
'float': 'float32',
'double': 'float64',
'string': 'str',
'boolean': 'bool'}
for col in t.itercols():
info = col.info
if info.name == 'i_index':
continue
assert isinstance(col, MaskedColumn)
type_name = info.name[2:] # short, int, etc
subtype = info.name[:1]
if subtype == 's': # Scalar
assert col.shape == (16,)
if subtype == 'f': # Fixed array
assert col.shape == (16, 3)
if subtype == 'v': # Variable array
assert col.shape == (16,)
assert info.dtype.name == 'object'
for val in col:
assert isinstance(val, np.ndarray)
assert val.dtype.name.startswith(type_map[type_name])
assert len(val) in [0, 1, 2, 3]
else:
assert info.dtype.name.startswith(type_map[type_name])
def test_masked_empty_subtypes():
"""Test blank field in subtypes. Similar to previous test but with explicit
checks of values"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: o, datatype: string, subtype: json}
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
o f v
null [0,1] [1]
"" "" ""
[1,2] [2,3] [2,3]
"""
t = Table.read(txt, format='ascii.ecsv')
assert np.all(t['o'] == np.array([None, -1, [1, 2]], dtype=object))
assert np.all(t['o'].mask == [False, True, False])
exp = np.ma.array([[0, 1], [-1, -1], [2, 3]], mask=[[0, 0], [1, 1], [0, 0]])
assert np.all(t['f'] == exp)
assert np.all(t['f'].mask == exp.mask)
assert np.all(t['v'][0] == [1])
assert np.all(t['v'][2] == [2, 3])
assert np.all(t['v'].mask == [False, True, False])
def test_masked_vals_in_array_subtypes():
"""Test null values in fixed and variable array subtypes."""
t = Table()
t['f'] = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]], dtype=np.int64)
t['v'] = np.empty(2, dtype=object)
t['v'][0] = np.ma.array([1, 2], mask=[0, 1], dtype=np.int64)
t['v'][1] = np.ma.array([3, 4, 5], mask=[1, 0, 0], dtype=np.int64)
out = StringIO()
t.write(out, format='ascii.ecsv')
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: f, datatype: string, subtype: 'int64[2]'}
# - {name: v, datatype: string, subtype: 'int64[null]'}
# schema: astropy-2.0
f v
[1,null] [1,null]
[null,4] [null,4,5]
"""
hdr = _get_ecsv_header_dict(out.getvalue())
hdr_exp = _get_ecsv_header_dict(txt)
assert hdr == hdr_exp
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t2.colnames == t.colnames
for name in t2.colnames:
assert t2[name].dtype == t[name].dtype
assert type(t2[name]) is type(t[name]) # noqa
for val1, val2 in zip(t2[name], t[name]):
if isinstance(val1, np.ndarray):
assert val1.dtype == val2.dtype
if isinstance(val1, np.ma.MaskedArray):
assert np.all(val1.mask == val2.mask)
assert np.all(val1 == val2)
def test_guess_ecsv_with_one_column():
"""Except for ECSV, guessing always requires at least 2 columns"""
txt = """
# %ECSV 1.0
# ---
# datatype:
# - {name: col, datatype: string, description: hello}
# schema: astropy-2.0
col
1
2
"""
t = ascii.read(txt)
assert t['col'].dtype.kind == 'U' # would be int with basic format
assert t['col'].description == 'hello'
|
abc0d2a1de389e113d69239aa5ec4152c014e4242f2cb13aafd36cede9c166a1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io.ascii.core import convert_numpy
import re
from io import BytesIO
from collections import OrderedDict
import locale
import platform
from io import StringIO
import pathlib
import pytest
import numpy as np
from astropy.io import ascii
from astropy.table import Table, MaskedColumn
from astropy import table
from astropy.units import Unit
from astropy.table.table_helpers import simple_table
from .common import (assert_equal, assert_almost_equal,
assert_true)
from astropy.io.ascii import core
from astropy.io.ascii.ui import _probably_html, get_read_trace
from astropy.utils.data import get_pkg_data_path
from astropy.utils.exceptions import AstropyWarning
# NOTE: Python can be built without bz2.
from astropy.utils.compat.optional_deps import HAS_BZ2 # noqa
# setup/teardown function to have the tests run in the correct directory
from .common import setup_function, teardown_function # noqa
def asciiIO(x):
return BytesIO(x.encode('ascii'))
@pytest.fixture
def home_is_data(monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the data directory.
"""
path = get_pkg_data_path('data')
# For Unix
monkeypatch.setenv('HOME', path)
# For Windows
monkeypatch.setenv('USERPROFILE', path)
@pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False},
{'use_fast_converter': True}, 'force'])
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = 'U'
with pytest.warns(AstropyWarning, match="OverflowError converting to IntType in column a"):
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind == expected_kind
def test_read_specify_converters_with_names():
"""
Exact example from #9701: When using ascii.read with both the names and
converters arguments, the converters dictionary ignores the user-supplied
names and requires that you know the guessed names.
"""
csv_text = ['a,b,c', '1,2,3', '4,5,6']
names = ['A', 'B', 'C']
converters = {
'A': [ascii.convert_numpy(float)],
'B': [ascii.convert_numpy(int)],
'C': [ascii.convert_numpy(str)]
}
t = ascii.read(csv_text, format='csv', names=names, converters=converters)
assert t['A'].dtype.kind == 'f'
assert t['B'].dtype.kind == 'i'
assert t['C'].dtype.kind == 'U'
def test_read_remove_and_rename_columns():
csv_text = ['a,b,c', '1,2,3', '4,5,6']
reader = ascii.get_reader(Reader=ascii.Csv)
reader.read(csv_text)
header = reader.header
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.remove_columns(['NOT-EXIST'])
header.remove_columns(['c'])
assert header.colnames == ('a', 'b')
header.rename_column('a', 'aa')
assert header.colnames == ('aa', 'b')
with pytest.raises(KeyError, match='Column NOT-EXIST does not exist'):
header.rename_column('NOT-EXIST', 'aa')
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))
assert len(dat) == 2
assert dat.colnames == ['a', 'b']
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c d', 'e f'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(['1,2', '3,4'], format='basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# For good measure check the same in the unified I/O interface
dat = Table.read(['1,2', '3,4'], format='ascii.basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ['10.1E+19', '3.14', '2048', '-23']
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(', '.join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ')
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v + ','
def test_reading_mixed_delimiter_tabs_spaces():
# Regression test for https://github.com/astropy/astropy/issues/6770
dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc'))
assert len(dat) == 2
Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header',
names=['a', 'b', 'c'])
assert len(dat) == 2
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismatch in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_read_all_files(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if 'tilde' in path_format:
if 'str' in path_format:
testfile['name'] = '~/' + testfile['name'][5:]
else:
testfile['name'] = pathlib.Path('~/', testfile['name'][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if ('Reader' in test_opts and f"fast_{test_opts['Reader']._format_name}"
in core.FAST_CLASSES): # has fast version
if 'Inputter' not in test_opts: # fast reader doesn't allow this
test_opts['fast_reader'] = fast_reader
table = ascii.read(testfile['name'], **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_read_all_files_via_table(fast_reader, path_format, home_is_data):
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if 'tilde' in path_format:
if 'str' in path_format:
testfile['name'] = '~/' + testfile['name'][5:]
else:
testfile['name'] = pathlib.Path('~/', testfile['name'][5:])
print(f"\n\n******** READING {testfile['name']}")
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts:
format = f"ascii.{test_opts['Reader']._format_name}"
del test_opts['Reader']
else:
format = 'ascii'
if f'fast_{format}' in core.FAST_CLASSES:
test_opts['fast_reader'] = fast_reader
table = Table.read(testfile['name'], format=format, **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get('skip'):
print(f"\n\n******** SKIPPING {testfile['name']}")
continue
if not testfile['opts'].get('guess', True):
continue
print(f"\n\n******** READING {testfile['name']}")
for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):
# Copy read options except for those in filter_read_opts
guess_opts = {k: v for k, v in testfile['opts'].items()
if k not in filter_read_opts}
table = ascii.read(testfile['name'], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_validate_read_kwargs():
lines = ['a b', '1 2', '3 4']
# Check that numpy integers are allowed
out = ascii.read(lines, data_start=np.int16(2))
assert np.all(out['a'] == [3])
with pytest.raises(TypeError, match=r"read\(\) argument 'data_end' must be a "
r"<class 'int'> object, "
r"got <class 'str'> instead"):
ascii.read(lines, data_end='needs integer')
with pytest.raises(TypeError, match=r"read\(\) argument 'fill_include_names' must "
r"be a list-like object, got <class 'str'> instead"):
ascii.read(lines, fill_include_names='ID')
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
for col in table.itercols():
# Four columns have all INDEF values and are masked, rest are normal Column
if col.name in ('OTIME', 'MAG', 'MERR', 'XAIRMASS'):
assert np.all(col.mask)
else:
assert not hasattr(col, 'mask')
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read('data/daophot2.dat', Reader=ascii.Daophot)
assert table['LID'].dtype.char in 'fd' # float or double
assert table['MAG'].dtype.char in 'fd' # even without any data values
assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)
assert table['ID'].dtype.char in 'il' # int or long
def test_daophot_header_keywords():
table = ascii.read('data/daophot.dat', Reader=ascii.Daophot)
expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),
('REJFILE', '"hello world"', 'filename', '%-23s'),
('SCALE', '1.', 'units/pix', '%-23.7g'),)
keywords = table.meta['keywords'] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword['value'], value)
assert_equal(keyword['units'], units)
assert_equal(keyword['format'], format_)
def test_daophot_multiple_aperture():
table = ascii.read('data/daophot3.dat', Reader=ascii.Daophot)
assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names
assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file
assert table['MERR2'][0] == 1.171
assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read('data/daophot4.dat', Reader=ascii.Daophot)
assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name
assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file
assert table['MERR2'][0] == 0.049
assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/no_data_without_header.dat', Reader=ascii.NoHeader,
guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple.txt', guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/bad.txt', fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('data/simple5.txt', delimiter='|', fast_reader=fast_reader)
def test_missing_file():
with pytest.raises(OSError):
ascii.read('does_not_exist')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
data = ascii.read('data/simple3.txt', names=names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_include_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
include_names = ('c1', 'c3')
data = ascii.read('data/simple3.txt', names=names, include_names=include_names,
delimiter='|', fast_reader=fast_reader)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_exclude_names(fast_reader):
exclude_names = ('Y', 'object')
data = ascii.read('data/simple3.txt', exclude_names=exclude_names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))
def test_include_names_daophot():
include_names = ('ID', 'MAG', 'PIER')
data = ascii.read('data/daophot.dat', include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')
data = ascii.read('data/daophot.dat', exclude_names=exclude_names)
assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter='|')
reader.inputter.process_lines = process_lines
data = reader.read('data/bars_at_ends.txt')
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r'^\|\s*', '', line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter='|')
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read('data/nls1_stackinfo.dbout')
cols = get_testfiles('data/nls1_stackinfo.dbout')['cols']
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = 'data/test4.dat'
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)
assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)
assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)
assert_equal(data.field('statname')[2], 'chi2modvar')
assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_start_end():
data = ascii.read('data/test5.dat', header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field('statname')[0], 'chi2xspecvar')
assert_equal(data.field('statname')[-1], 'chi2gehrels')
def test_set_converters():
converters = {'zabs1.nh': [ascii.convert_numpy('int32'),
ascii.convert_numpy('float32')],
'p1.gamma': [ascii.convert_numpy('str')]
}
data = ascii.read('data/test4.dat', converters=converters)
assert_equal(str(data['zabs1.nh'].dtype), 'float32')
assert_equal(data['p1.gamma'][0], '1.26764500000')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_string(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_filelike(fast_reader):
f = 'data/simple.txt'
testfile = get_testfiles(f)[0]
with open(f, 'rb') as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_lines(fast_reader):
f = 'data/simple.txt'
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)[0]
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read('data/apostrophe.rdb')
assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
assert_equal(data.meta['comments'], ['first comment', 'second comment'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
**testfile['opts'])
assert_true((data['a'].mask == [False, True]).all())
assert_true((data['a'] == [1, 1]).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_col(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,
**testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_include_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_include_names=['b'], **testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_exclude_names(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_exclude_names=['a'], **testfile['opts'])
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation """
assert not hasattr(data['a'], 'mask')
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data['b'] == [2, -999]).all())
data['b'].mask = False # explicitly unmask for comparison
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_list(fast_reader):
f = 'data/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],
fast_reader=fast_reader, **testfile['opts'])
data['a'].mask = False # explicitly unmask for comparison
assert_true((data['a'] == [42, 42]).all())
def test_masking_Cds_Mrt():
f = 'data/cds.dat' # Tested for CDS and MRT
for testfile in get_testfiles(f):
data = ascii.read(f,
**testfile['opts'])
assert_true(data['AK'].mask[0])
assert not hasattr(data['Fit'], 'mask')
def test_null_Ipac():
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
mask = np.array([(True, False, True, False, True),
(False, False, False, False, False)],
dtype=[('ra', '|b1'),
('dec', '|b1'),
('sai', '|b1'),
('v2', '|b1'),
('sptype', '|b1')])
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict((('intval', 1),
('floatval', 2.3e3),
('date', "Wed Sp 20 09:48:36 1995"),
('key_continue', 'IPAC keywords can continue across lines')))
comments = ['This is an example of a valid comment']
f = 'data/ipac.dat'
testfile = get_testfiles(f)[0]
data = ascii.read(f, **testfile['opts'])
assert data.meta['keywords'].keys() == keywords.keys()
for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):
assert data_kv['value'] == kv
assert data.meta['comments'] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read('data/space_delim_no_header.dat',
delimiter=',', guess=True)
assert(data.dtype.names == ('1 3.4 hello',))
assert(len(data) == 1)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconsistent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_default_missing(fast_reader):
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,,',
'2, , 4.0 , ss '])
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [' a ',
'---',
' --']
assert dat['a'].dtype.kind == 'i'
# Same test with a fixed width reader
table = '\n'.join([' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss'])
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.rdb',
'nrows': 2,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/apostrophe.tab',
'nrows': 2,
'opts': {'Reader': ascii.Tab}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Mrt}},
# Test malformed CDS file (issues #2241 #467)
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/cds_malformed.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader}},
{'cols': ('a', 'b', 'c'),
'name': 'data/commented_header2.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),
'name': 'data/continuation.dat',
'nrows': 2,
'opts': {'Inputter': ascii.ContinuationLinesInputter,
'Reader': ascii.NoHeader}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/daophot.dat',
'nrows': 2,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALU-ES',
'VALU-ES_1',
'FLAG'),
'name': 'data/sextractor.dat',
'nrows': 3,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('col0',
'objID',
'osrcid',
'xsrcid',
'SpecObjID',
'ra',
'dec',
'obsid',
'ccdid',
'z',
'modelMag_i',
'modelMagErr_i',
'modelMag_r',
'modelMagErr_r',
'expo',
'theta',
'rad_ecf_39',
'detlim90',
'fBlim90'),
'name': 'data/nls1_stackinfo.dbout',
'nrows': 58,
'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Cds}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 'data/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Mrt}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 'data/no_data_daophot.dat',
'nrows': 0,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALUES',
'VALUES_1',
'FLAG'),
'name': 'data/no_data_sextractor.dat',
'nrows': 0,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 'data/no_data_ipac.dat',
'nrows': 0,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('ra', 'v2'),
'name': 'data/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},
{'cols': ('a', 'b', 'c'),
'name': 'data/no_data_with_header.dat',
'nrows': 0,
'opts': {}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.rdb',
'nrows': 7,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 'data/short.tab',
'nrows': 7,
'opts': {'Reader': ascii.Tab}},
{'cols': ('test 1a', 'test2', 'test3', 'test4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'"}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 'data/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple2.txt',
'nrows': 3,
'opts': {'delimiter': '|'}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 'data/simple3.txt',
'nrows': 2,
'opts': {'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),
'name': 'data/simple4.txt',
'nrows': 3,
'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader}},
{'cols': ('col1', 'col2', 'col3'),
'name': 'data/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader, 'header_start': None}},
{'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),
'name': 'data/space_delim_blank_lines.txt',
'nrows': 3,
'opts': {}},
{'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),
'name': 'data/test4.dat',
'nrows': 9,
'opts': {}},
{'cols': ('a', 'b', 'c'),
'name': 'data/fill_values.txt',
'nrows': 2,
'opts': {'delimiter': ','}},
{'name': 'data/whitespace.dat',
'cols': ('quoted colname with tab\tinside', 'col2', 'col3'),
'nrows': 2,
'opts': {'delimiter': r'\s'}},
{'name': 'data/simple_csv.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'opts': {'Reader': ascii.Csv}},
{'name': 'data/simple_csv_missing.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'skip': True,
'opts': {'Reader': ascii.Csv}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex1.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Facility', 'Id', 'exposure', 'date'),
'name': 'data/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 'data/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 'data/fixed_width_2_line.txt',
'nrows': 2,
'opts': {'Reader': ascii.FixedWidthTwoLine}},
]
try:
import bs4 # noqa
testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),
'name': 'data/html.html',
'nrows': 3,
'opts': {'Reader': ascii.HTML}})
except ImportError:
pass
if name is not None:
# If there are multiple matches then return a list, else return just
# the one match.
out = [x for x in testfiles if x['name'] == name]
if len(out) == 1:
out = out[0]
else:
out = testfiles
return out
def test_header_start_exception():
'''Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
'''
for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,
ascii.BaseReader, ascii.FixedWidthNoHeader,
ascii.Cds, ascii.Mrt, ascii.Daophot]:
with pytest.raises(ValueError):
ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ['# a, b',
'1, 2',
'3, 4']
t = ascii.read(lines)
assert t.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)
assert t.colnames == ['b', 'a']
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read('data/sextractor2.dat', Reader=ascii.SExtractor, guess=False)
expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),
Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),
Unit('mag * arcsec**(-2)')]
expected_descrs = ['Running object number',
'Windowed position estimate along x',
'Windowed position estimate along y',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',
'Extraction flags',
None,
'Barycenter position along MAMA x axis',
'Peak surface brightness above background']
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read('data/sextractor3.dat', Reader=ascii.SExtractor, guess=False)
expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',
'MAG_AUTO', 'MAGERR_AUTO',
'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3',
'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',
'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3',
'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']
expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),
Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag')]
expected_descrs = ['Object position along x', None,
'Right ascension of barycenter (J2000)',
'Declination of barycenter (J2000)',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude', ] + [
'Fixed aperture magnitude vector'] * 7 + [
'RMS error vector for fixed aperture mag.'] * 7
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ['abc']
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')
assert t.colnames == ['#a', 'b']
assert len(t) == 2
assert t['#a'][1] == '#3'
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3'])
assert t.colnames == ['a', 'b', 'c']
assert t.meta['comments'] == ['comment1', 'comment2']
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert "** To figure out why the table did not read, use guess=False and" in str(err.value)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert 'At least one header line beginning and ending with delimiter required' in str(err.value)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic',
quotechar='"', delimiter=' ', fast_reader=False)
assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)
@pytest.mark.xfail('not HAS_BZ2')
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
with open('data/ipac.dat.bz2', 'rb') as fd:
t = ascii.read(fd)
assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype']
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,1.11,1',
'2, 2, 4.0 , ss '])
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',
'| r | rE | rea | real | D | do | dou | f | i | l | da| c |',
' 1 2 3 4 5 6 7 8 9 10 11 12 ']
dat = ascii.read(lines, format='ipac')
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == 'f'
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == 'i'
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ('U', 'S')
def test_almost_but_not_quite_daophot():
'''Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
'''
lines = ["# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9"]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize('fast', [False, 'force'])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ['comment 1', 'comment 2', 'comment 3']
lines = ['# a b',
'# comment 1',
'# comment 2',
'# comment 3',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
out = StringIO()
ascii.write(dat, out, format='commented_header', fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(3, lines.pop(2))
dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines = ['# a b',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert 'comments' not in dat.meta
assert dat.colnames == ['a', 'b']
def test_probably_html(home_is_data):
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for tabl0 in ('data/html.html',
'~/html.html',
'http://blah.com/table.html',
'https://blah.com/table.html',
'file://blah/table.htm',
'ftp://blah.com/table.html',
'file://blah.com/table.htm',
' <! doctype html > hello world',
'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype html > ', ' hello world'),
):
assert _probably_html(tabl0) is True
for tabl0 in ('data/html.htms',
'Xhttp://blah.com/table.html',
' https://blah.com/table.htm',
'fole://blah/table.htm',
' < doctype html > hello world',
'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype htm > ', ' hello world'),
[[1, 2, 3]],
):
assert _probably_html(tabl0) is False
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_data_header_start(fast_reader):
tests = [(['# comment',
'',
' ',
'skip this line', # line 0
'a b', # line 1
'1 2'], # line 2
[{'header_start': 1},
{'header_start': 1, 'data_start': 2}
]
),
(['# comment',
'',
' \t',
'skip this line', # line 0
'a b', # line 1
'',
' \t',
'skip this line', # line 2
'1 2'], # line 3
[{'header_start': 1, 'data_start': 3}]),
(['# comment',
'',
' ',
'a b', # line 0
'',
' ',
'skip this line', # line 1
'1 2'], # line 2
[{'header_start': 0, 'data_start': 2},
{'data_start': 2}])]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(lines, format='basic', fast_reader=fast_reader,
guess=True, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 1
assert np.all(t['a'] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format='basic')
assert 'No header line found' in str(err.value)
table = BytesIO()
t = ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
assert not t and t.as_array().size == 0
# Put a single line of column names but with no newline
for kwargs in [dict(),
dict(guess=False, fast_reader=False, format='basic'),
dict(guess=False, fast_reader=True, format='fast_basic')]:
table = BytesIO()
table.write(b'a b')
t = ascii.read(table, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 0
def test_path_object():
fpath = pathlib.Path('data/simple.txt')
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']
assert data['test2'][1] == 'hat2'
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format='ipac')
assert 'Column col0 failed to convert:' in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})
assert 'no converters' in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, 'fr_FR')
else:
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
for fast_reader in (True,
False,
{'use_fast_converter': False},
{'use_fast_converter': True}):
t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,
fast_reader=fast_reader)
assert t['a'].dtype.kind == 'f'
except locale.Error as e:
pytest.skip(f'Locale error: {e}')
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
'''Test that a char column of a Table is assigned no unit and not
a dimensionless unit.'''
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {'--': '0'}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert reader.read("""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """)['a'][0] is np.ma.masked
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format='latex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format='aastex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
@pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252'])
def test_read_with_encoding(tmpdir, encoding):
data = {
'commented_header': '# à b è \n 1 2 héllo',
'csv': 'à,b,è\n1,2,héllo'
}
testfile = str(tmpdir.join('test.txt'))
for fmt, content in data.items():
with open(testfile, 'w', encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
for guess in (True, False):
table = ascii.read(testfile, format=fmt, fast_reader=False,
encoding=encoding, guess=guess)
assert table['è'].dtype.kind == 'U'
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
def test_unsupported_read_with_encoding(tmpdir):
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read('data/simple3.txt', guess=False, fast_reader='force',
encoding='latin1', format='fast_csv')
def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = 'data/test5.dat'
t1 = ascii.read(fpath, header_start=1, data_start=3, )
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
t_gen = ascii.read(fp, header_start=1, data_start=3,
guess=False, format='fast_basic',
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
with open(fpath) as fd1, open(fpath) as fd2:
for fp in (fpath, fd1, fd2.read()):
# Now read the full table in chunks
t3 = ascii.read(fp, header_start=1, data_start=3,
fast_reader={'chunk_size': 300})
assert np.all(t1 == t3)
@pytest.mark.parametrize('masked', [True, False])
def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, f'col{i + 1}')
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in 'tab', 'csv', 'no_header', 'rdb', 'basic':
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(out.getvalue(), format=format,
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400})
assert np.all(t1 == t3)
def test_read_chunks_chunk_size_too_small():
fpath = 'data/test5.dat'
with pytest.raises(ValueError) as err:
ascii.read(fpath, header_start=1, data_start=3,
fast_reader={'chunk_size': 10})
assert 'no newline found in chunk (chunk_size too small?)' in str(err.value)
def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50
table = '\n'.join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={'chunk_size': 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2)
def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv')
assert np.all(table['col1'] == ['\u2119', '1'])
assert np.all(table['col2'] == ['\u01b4', '2'])
@pytest.mark.parametrize('enable', [True, False, 'force'])
def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence.
"""
# Fails for enable=(True, 'force') - #5578
ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable))
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Tab if (enable is False) else ascii.FastTab)
for k in get_read_trace():
if not k.get('status', 'Disabled').startswith('Disabled'):
assert k.get('kwargs').get('fast_reader').get('enable') is enable
def _get_lines(rdb):
lines = ['a a_2 a_1 a a']
if rdb:
lines += ['N N N N N']
lines += ['1 2 3 4 5', '10 20 30 40 50']
if rdb:
lines = ['\t'.join(line.split()) for line in lines]
return lines
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_basic(rdb, fast_reader):
"""Test that duplicate column names are successfully de-duplicated for the
basic format. Skip the case of rdb=True and fast_reader='force' when selecting
include_names, since that fails and is tested below.
"""
lines = _get_lines(rdb)
dat = ascii.read(lines, fast_reader=fast_reader)
assert dat.colnames == ['a', 'a_2', 'a_1', 'a_3', 'a_4']
assert len(dat) == 2
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat.colnames == ['a', 'a_2', 'a_3']
assert np.all(dat['a'] == [1, 10])
assert np.all(dat['a_2'] == [2, 20])
assert np.all(dat['a_3'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['b1', 'b2', 'a_4', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
dat = ascii.read(lines, fast_reader=fast_reader,
names=['b1', 'b2', 'b3', 'b4', 'b5'],
exclude_names=['b3', 'b5', 'a_3', 'a_4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert np.all(dat['b1'] == [1, 10])
assert np.all(dat['b2'] == [2, 20])
assert np.all(dat['b4'] == [4, 40])
def test_include_names_rdb_fast():
"""Test that selecting column names via `include_names` works for the RDB format
with fast reader. This is testing the fix for a bug identified in #9939.
"""
lines = _get_lines(True)
lines[0] = 'a\ta_2\ta_1\ta_3\ta_4'
dat = ascii.read(lines, fast_reader='force', include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a'].dtype == int
assert dat['a_2'].dtype == int
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_deduplicate_names_with_types(fast_reader):
"""Test that on selecting column names via `include_names` in the RDB format with
different types and duplicate column names type assignment is correctly preserved.
"""
lines = _get_lines(True)
lines[1] = 'N\tN\tN\tS\tS'
dat = ascii.read(lines, fast_reader=fast_reader, include_names=['a', 'a_2', 'a_3'])
assert len(dat) == 2
assert dat['a_2'].dtype.kind == 'i'
assert dat['a_3'].dtype.kind == 'U'
dat = ascii.read(lines, fast_reader=fast_reader, names=['b1', 'b2', 'b3', 'b4', 'b5'],
include_names=['a1', 'a_2', 'b1', 'b2', 'b4'])
assert len(dat) == 2
assert dat.colnames == ['b1', 'b2', 'b4']
assert dat['b2'].dtype.kind == 'i'
assert dat['b4'].dtype.kind == 'U'
@pytest.mark.parametrize('rdb', [False, True])
@pytest.mark.parametrize('fast_reader', [False, 'force'])
def test_set_invalid_names(rdb, fast_reader):
"""Test exceptions for invalid (duplicate or `None`) names specified via argument."""
lines = _get_lines(rdb)
if rdb:
fmt = 'rdb'
else:
fmt = 'basic'
with pytest.raises(ValueError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', 'b4', 'b5'])
assert 'Duplicate column names' in str(err.value)
with pytest.raises(TypeError) as err:
ascii.read(lines, fast_reader=fast_reader, format=fmt, guess=rdb,
names=['b1', 'b2', 'b1', None, None])
assert 'Cannot have None for column name' in str(err.value)
def test_read_masked_bool():
txt = """\
col0 col1
1 1
0 2
True 3
"" 4
False 5
"""
# Reading without converters returns col0 as a string
dat = ascii.read(txt, format='basic')
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'U'
assert col[0] == "1"
# Force col0 to be read as bool
converters = {'col0': [convert_numpy(bool)]}
dat = ascii.read(txt, format='basic', converters=converters)
col = dat['col0']
assert isinstance(col, MaskedColumn)
assert col.dtype.kind == 'b'
assert np.all(col.mask == [False, False, False, True, False])
assert np.all(col == [True, False, True, False, False])
def test_read_converters_wildcard():
'''Test converters where the column name is specified with
a wildcard.
'''
converters = {'F*': [ascii.convert_numpy(np.float32)]}
t = ascii.read(['Fabc Iabc', '1 2'], converters=converters)
assert np.issubdtype(t['Fabc'].dtype, np.float32)
assert not np.issubdtype(t['Iabc'].dtype, np.float32)
def test_read_converters_simplified():
"""Test providing io.ascii read converters as type or dtypes instead of
convert_numpy(type) outputs"""
t = Table()
t['a'] = [1, 2]
t['b'] = [3.5, 4]
t['c'] = ['True', 'False']
t['d'] = ['true', 'false'] # Looks kindof like boolean but actually a string
t['e'] = [5, 6]
out = StringIO()
t.write(out, format='ascii.basic')
converters = {'a': str, 'e': np.float32}
t2 = Table.read(out.getvalue(), format='ascii.basic', converters=converters)
assert t2.pformat(show_dtype=True) == [
' a b c d e ',
'str1 float64 str5 str5 float32',
'---- ------- ----- ----- -------',
' 1 3.5 True true 5.0',
' 2 4.0 False false 6.0'
]
converters = {'a': float, '*': [np.int64, float, bool, str]}
t2 = Table.read(out.getvalue(), format='ascii.basic', converters=converters)
assert t2.pformat_all(show_dtype=True) == [
' a b c d e ',
'float64 float64 bool str5 int64',
'------- ------- ----- ----- -----',
' 1.0 3.5 True true 5',
' 2.0 4.0 False false 6'
]
# Test failures
for converters in ({'*': [int, 1, bool, str]}, # bad converter type
# Tuple converter where 2nd element is not a subclass of NoType
{'a': [(int, int)]},
# Tuple converter with 3 elements not 2
{'a': [(int, int, int)]}):
with pytest.raises(ValueError, match='Error: invalid format for converters'):
t2 = Table.read(out.getvalue(), format='ascii.basic',
converters=converters, guess=False)
|
50c86a33f89156ca94e633de15ba99207d6226e5a8845385b828a68e74113a99 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
from contextlib import nullcontext
from io import StringIO
from itertools import chain
import pathlib
import pytest
import numpy as np
from astropy.io import ascii
from astropy import table
from astropy.table.table_helpers import simple_table
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.compat.optional_deps import HAS_BS4
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy import units as u
from .common import setup_function, teardown_function # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound # noqa
test_defs = [
dict(kwargs=dict(),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(delimiter=None),
out="""\
ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(formats={'XCENTER': '%12.1f',
'YCENTER': '{0:.1f}'},
include_names=['XCENTER', 'YCENTER'],
strip_whitespace=False),
out="""\
XCENTER YCENTER
" 138.5" 256.4
" 18.1" 280.2
"""
),
dict(kwargs=dict(Writer=ascii.Rdb, exclude_names=['CHI']),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tPIER\tPERROR
N\tN\tN\tN\tN\tN\tN\tN\tN\tS
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Tab),
out="""\
ID\tXCENTER\tYCENTER\tMAG\tMERR\tMSKY\tNITER\tSHARPNESS\tCHI\tPIER\tPERROR
14\t138.538\t256.405\t15.461\t0.003\t34.85955\t4\t-0.032\t0.802\t0\tNo_error
18\t18.114\t280.170\t22.329\t0.206\t30.12784\t4\t-2.544\t1.104\t0\tNo_error
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
ID,XCENTER,YCENTER,MAG,MERR,MSKY,NITER,SHARPNESS,CHI,PIER,PERROR
14,138.538,256.405,15.461,0.003,34.85955,4,-0.032,0.802,0,No_error
18,18.114,280.170,22.329,0.206,30.12784,4,-2.544,1.104,0,No_error
"""
),
dict(kwargs=dict(Writer=ascii.NoHeader),
out="""\
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader),
out="""\
# ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.CommentedHeader, comment='&'),
out="""\
&ID XCENTER YCENTER MAG MERR MSKY NITER SHARPNESS CHI PIER PERROR
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
"""
),
dict(kwargs=dict(Writer=ascii.Latex),
out="""\
\\begin{table}
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
\\end{table}
"""
),
dict(kwargs=dict(Writer=ascii.AASTex),
out="""\
\\begin{deluxetable}{ccccccccccc}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{pixels} & \\colhead{pixels} & \\colhead{magnitudes} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.AASTex, caption='Mag values \\label{tab1}', latexdict={
'units': {'MAG': '[mag]', 'XCENTER': '[pixel]'}, 'tabletype': 'deluxetable*',
'tablealign': 'htpb'}),
out="""\
\\begin{deluxetable*}{ccccccccccc}[htpb]
\\tablecaption{Mag values \\label{tab1}}
\\tablehead{\\colhead{ID} & \\colhead{XCENTER} & \\colhead{YCENTER} & \\colhead{MAG} & \\colhead{MERR} & \\colhead{MSKY} & \\colhead{NITER} & \\colhead{SHARPNESS} & \\colhead{CHI} & \\colhead{PIER} & \\colhead{PERROR}\\\\ \\colhead{ } & \\colhead{[pixel]} & \\colhead{pixels} & \\colhead{[mag]} & \\colhead{magnitudes} & \\colhead{counts} & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{ } & \\colhead{perrors}}
\\startdata
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error
\\enddata
\\end{deluxetable*}
""" # noqa
),
dict(
kwargs=dict(Writer=ascii.Latex, caption='Mag values \\label{tab1}',
latexdict={'preamble': '\\begin{center}', 'tablefoot': '\\end{center}',
'data_end': ['\\hline', '\\hline'],
'units':{'MAG': '[mag]', 'XCENTER': '[pixel]'},
'tabletype': 'table*',
'tablealign': 'h'},
col_align='|lcccccccccc|'),
out="""\
\\begin{table*}[h]
\\begin{center}
\\caption{Mag values \\label{tab1}}
\\begin{tabular}{|lcccccccccc|}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& [pixel] & pixels & [mag] & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\hline
\\hline
\\end{tabular}
\\end{center}
\\end{table*}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict=ascii.latexdicts['template']),
out="""\
\\begin{tabletype}[tablealign]
preamble
\\caption{caption}
\\begin{tabular}{col_align}
header_start
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
header_end
data_start
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
data_end
\\end{tabular}
tablefoot
\\end{tabletype}
"""
),
dict(kwargs=dict(Writer=ascii.Latex, latexdict={'tabletype': None}),
out="""\
\\begin{tabular}{ccccccccccc}
ID & XCENTER & YCENTER & MAG & MERR & MSKY & NITER & SHARPNESS & CHI & PIER & PERROR \\\\
& pixels & pixels & magnitudes & magnitudes & counts & & & & & perrors \\\\
14 & 138.538 & 256.405 & 15.461 & 0.003 & 34.85955 & 4 & -0.032 & 0.802 & 0 & No_error \\\\
18 & 18.114 & 280.170 & 22.329 & 0.206 & 30.12784 & 4 & -2.544 & 1.104 & 0 & No_error \\\\
\\end{tabular}
"""
),
dict(kwargs=dict(Writer=ascii.HTML, htmldict={'css': 'table,th,td{border:1px solid black;'}),
out="""\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
<style>
table,th,td{border:1px solid black; </style>
</head>
<body>
<table>
<thead>
<tr>
<th>ID</th>
<th>XCENTER</th>
<th>YCENTER</th>
<th>MAG</th>
<th>MERR</th>
<th>MSKY</th>
<th>NITER</th>
<th>SHARPNESS</th>
<th>CHI</th>
<th>PIER</th>
<th>PERROR</th>
</tr>
</thead>
<tr>
<td>14</td>
<td>138.538</td>
<td>256.405</td>
<td>15.461</td>
<td>0.003</td>
<td>34.85955</td>
<td>4</td>
<td>-0.032</td>
<td>0.802</td>
<td>0</td>
<td>No_error</td>
</tr>
<tr>
<td>18</td>
<td>18.114</td>
<td>280.170</td>
<td>22.329</td>
<td>0.206</td>
<td>30.12784</td>
<td>4</td>
<td>-2.544</td>
<td>1.104</td>
<td>0</td>
<td>No_error</td>
</tr>
</table>
</body>
</html>
"""
),
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\MERGERAD='INDEF'
\\IRAF='NOAO/IRAFV2.10EXPORT'
\\USER=''
\\HOST='tucana'
\\DATE='05-28-93'
\\TIME='14:46:13'
\\PACKAGE='daophot'
\\TASK='nstar'
\\IMAGE='test'
\\GRPFILE='test.psg.1'
\\PSFIMAGE='test.psf.1'
\\NSTARFILE='test.nst.1'
\\REJFILE='"hello world"'
\\SCALE='1.'
\\DATAMIN='50.'
\\DATAMAX='24500.'
\\GAIN='1.'
\\READNOISE='0.'
\\OTIME='00:07:59.0'
\\XAIRMASS='1.238106'
\\IFILTER='V'
\\RECENTER='yes'
\\FITSKY='no'
\\PSFMAG='16.594'
\\PSFRAD='5.'
\\FITRAD='3.'
\\MAXITER='50'
\\MAXGROUP='60'
\\FLATERROR='0.75'
\\PROFERROR='5.'
\\CLIPEXP='6'
\\CLIPRANGE='2.5'
| ID| XCENTER| YCENTER| MAG| MERR| MSKY| NITER| SHARPNESS| CHI| PIER| PERROR|
| long| double| double| double| double| double| long| double| double| long| char|
| | pixels| pixels| magnitudes| magnitudes| counts| | | | | perrors|
| null| null| null| null| null| null| null| null| null| null| null|
14 138.538 256.405 15.461 0.003 34.85955 4 -0.032 0.802 0 No_error
18 18.114 280.170 22.329 0.206 30.12784 4 -2.544 1.104 0 No_error
""" # noqa
),
]
test_defs_no_data = [
dict(kwargs=dict(Writer=ascii.Ipac),
out="""\
\\ This is an example of a valid comment.
\\ The 2nd data line is used to verify the exact column parsing
\\ (unclear if this is a valid for the IPAC format)
\\catalog='sao'
\\date='Wed Sp 20 09:48:36 1995'
\\mykeyword='Another way for defining keyvalue string'
| ra| dec| sai| v2|sptype|
|double|double|long|double| char|
| unit| unit|unit| unit| ergs|
| null| null|null| null| null|
"""
),
]
tab_to_fill = ['a b c', '1 2 3', '1 1 3']
test_defs_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w')),
out="""\
a b c
w 2 3
w w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w', 'b')),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['b']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_exclude_names=['a']),
out="""\
a b c
1 2 3
1 w 3
"""
),
dict(kwargs=dict(fill_values=('1', 'w'),
fill_include_names=['a'],
fill_exclude_names=['a', 'b']),
out="""\
a b c
1 2 3
1 1 3
"""
),
dict(kwargs=dict(fill_values=[('1', 'w')],
formats={'a': '%4.2f'}),
out="""\
a b c
1.00 2 3
1.00 w 3
"""
),
]
test_def_masked_fill_value = [
dict(kwargs=dict(),
out="""\
a b c
"" 2 3
1 1 ""
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'X')]),
out="""\
a b c
X 2 3
w w X
"""
),
dict(kwargs=dict(fill_values=[('1', 'w'), (ascii.masked, 'XXX')],
formats={'a': '%4.1f'}),
out="""\
a b c
XXX 2 3
1.0 w XXX
"""
),
dict(kwargs=dict(Writer=ascii.Csv),
out="""\
a,b,c
,2,3
1,1,
"""
),
]
@pytest.fixture
def home_is_tmpdir(monkeypatch, tmpdir):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv('HOME', str(tmpdir))
# For Windows
monkeypatch.setenv('USERPROFILE', str(tmpdir))
def check_write_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
try:
ascii.write(table, out, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith('~'):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{actual}')
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
def check_write_table_via_table(test_def, table, fast_writer, out=None):
if out is None:
out = StringIO()
test_def = copy.deepcopy(test_def)
if 'Writer' in test_def['kwargs']:
format = f"ascii.{test_def['kwargs']['Writer']._format_name}"
del test_def['kwargs']['Writer']
else:
format = 'ascii'
try:
table.write(out, format=format, fast_writer=fast_writer, **test_def['kwargs'])
except ValueError as e: # if format doesn't have a fast writer, ignore
if 'not in the list of formats with fast writers' not in str(e.value):
raise e
return
if isinstance(out, StringIO):
# Output went to a buffer
actual = out.getvalue()
else:
# Output went to a file
if str(out).startswith('~'):
# Ensure a file hasn't been accidentally written to a literal tilde
# path
assert not os.path.exists(out)
out = os.path.expanduser(out)
assert os.path.exists(out)
with open(out) as f:
actual = f.read()
os.remove(out)
print(f"Expected:\n{test_def['out']}")
print(f'Actual:\n{actual}')
assert [x.strip() for x in actual.strip().splitlines()] == [
x.strip() for x in test_def['out'].strip().splitlines()]
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize('path_format',
['buffer', 'plain', 'tilde-str', 'tilde-pathlib'])
def test_write_table(
fast_writer, tmpdir, home_is_tmpdir, path_format):
table = ascii.get_reader(Reader=ascii.Daophot)
data = table.read('data/daophot.dat')
if path_format == 'buffer':
out_name = None
elif path_format == 'plain':
out_name = os.path.join(tmpdir, 'table')
elif path_format == 'tilde-str':
out_name = os.path.join('~', 'table')
else:
out_name = pathlib.Path('~', 'table')
for test_def in test_defs:
check_write_table(test_def, data, fast_writer, out=out_name)
check_write_table_via_table(
test_def, data, fast_writer, out=out_name)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_values(fast_writer):
data = ascii.read(tab_to_fill)
for test_def in test_defs_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_fill_masked_different(fast_writer):
'''see discussion in #2255'''
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_no_data_ipac(fast_writer):
"""Write an IPAC table that contains no data."""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
for test_def in test_defs_no_data:
check_write_table(test_def, data, fast_writer)
check_write_table_via_table(test_def, data, fast_writer)
def test_write_invalid_toplevel_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored in the top-level metadata and therefore should
raise a warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['blah'] = 'extra'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*were not written.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_invalid_keyword_meta_ipac():
"""Write an IPAC table that contains no data but has invalid (incorrectly
specified) metadata stored appropriately in the ``keywords`` section
of the metadata but with invalid format and therefore should raise a
warning, and check that the warning has been raised"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = 'invalid'
out = StringIO()
with pytest.warns(AstropyWarning, match=r'.*has been skipped.*') as warn:
data.write(out, format='ascii.ipac')
assert len(warn) == 1
def test_write_valid_meta_ipac():
"""Write an IPAC table that contains no data and has *correctly* specified
metadata. No warnings should be issued"""
table = ascii.get_reader(Reader=ascii.Ipac)
data = table.read('data/no_data_ipac.dat')
data.meta['keywords']['blah'] = {'value': 'invalid'}
out = StringIO()
data.write(out, format='ascii.ipac')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_comments(fast_writer):
"""Write comments in output originally read by io.ascii."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1,2,3')
out = StringIO()
ascii.write(data, out, format='basic', fast_writer=fast_writer)
expected = ['# c1', '# c2', '# c3', 'a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
# header comes before comments for commented-header
out = StringIO()
ascii.write(data, out, format='commented_header', fast_writer=fast_writer)
expected = ['# a b c', '# c1', '# c2', '# c3', '1 2 3']
assert out.getvalue().splitlines() == expected
# setting comment=False should disable comment writing
out = StringIO()
ascii.write(data, out, format='basic', comment=False, fast_writer=fast_writer)
expected = ['a b c', '1 2 3']
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize("fmt", ['%0.1f', '.1f', '0.1f', '{0:0.1f}'])
def test_write_format(fast_writer, fmt):
"""Check different formats for a column."""
data = ascii.read('#c1\n # c2\t\na,b,c\n# c3\n1.11,2.22,3.33')
out = StringIO()
expected = ['# c1', '# c2', '# c3', 'a b c', '1.1 2.22 3.33']
data['a'].format = fmt
ascii.write(data, out, format='basic', fast_writer=fast_writer)
assert out.getvalue().splitlines() == expected
@pytest.mark.parametrize("fast_writer", [True, False])
def test_strip_names(fast_writer):
"""Names should be stripped of whitespace by default."""
data = table.Table([[1], [2], [3]], names=(' A', 'B ', ' C '))
out = StringIO()
ascii.write(data, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines()[0] == 'A,B,C'
def test_latex_units():
"""
Check to make sure that Latex and AASTex writers attempt to fall
back on the **unit** attribute of **Column** if the supplied
**latexdict** does not specify units.
"""
t = table.Table([table.Column(name='date', data=['a', 'b']),
table.Column(name='NUV exp.time', data=[1, 2])])
latexdict = copy.deepcopy(ascii.latexdicts['AA'])
latexdict['units'] = {'NUV exp.time': 's'}
out = StringIO()
expected = '''\
\\begin{table}{cc}
\\tablehead{\\colhead{date} & \\colhead{NUV exp.time}\\\\ \\colhead{ } & \\colhead{s}}
\\startdata
a & 1 \\\\
b & 2
\\enddata
\\end{table}
'''.replace('\n', os.linesep)
ascii.write(t, out, format='aastex', latexdict=latexdict)
assert out.getvalue() == expected
# use unit attribute instead
t['NUV exp.time'].unit = u.s
t['date'].unit = u.yr
out = StringIO()
ascii.write(t, out, format='aastex', latexdict=ascii.latexdicts['AA'])
assert out.getvalue() == expected.replace(
'colhead{s}', r'colhead{$\mathrm{s}$}').replace(
'colhead{ }', r'colhead{$\mathrm{yr}$}')
@pytest.mark.parametrize("fast_writer", [True, False])
def test_commented_header_comments(fast_writer):
"""
Test the fix for #3562 with confusing exception using comment=False
for the commented_header writer.
"""
t = table.Table([[1, 2]])
with pytest.raises(ValueError) as err:
out = StringIO()
ascii.write(t, out, format='commented_header', comment=False,
fast_writer=fast_writer)
assert "for the commented_header writer you must supply a string" in str(err.value)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_byte_string_output(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', 'World']], dtype=['S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0', 'Hello', 'World']
@pytest.mark.parametrize('names, include_names, exclude_names, formats, issues_warning', [
(['x', 'y'], ['x', 'y'], ['x'], {'x': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], ['y'], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'q': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'z': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {'x': '%d'}, False),
(['x', 'y'], ['x', 'y'], [], {'p': '%d', 'y': '%f'}, True),
(['x', 'y'], ['x', 'y'], [], {}, False)
])
def test_names_with_formats(names, include_names, exclude_names, formats, issues_warning):
"""Test for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, names=names, include_names=include_names,
exclude_names=exclude_names, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize('formats, issues_warning', [
({'p': '%d', 'y': '%f'}, True),
({'x': '%d', 'y': '%f'}, True),
({'z': '%f'}, True),
({}, False)
])
def test_columns_names_with_formats(formats, issues_warning):
"""Test the fix for #4508."""
t = table.Table([[1, 2, 3], [4.1, 5.2, 6.3]])
out = StringIO()
if issues_warning:
ctx = pytest.warns(AstropyWarning)
else:
ctx = nullcontext()
with ctx as warn:
ascii.write(t, out, formats=formats)
if issues_warning:
assert len(warn) == 1
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_quoted_empty_field(fast_writer):
"""
Test the fix for #4350 where byte strings were output with a
leading `b` on Py3.
"""
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0 col1', 'Hello ""', '"" ""']
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer, delimiter=',')
assert out.getvalue().splitlines() == ['col0,col1', 'Hello,', ',']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_empty_table(fast_writer):
"""Test writing empty table #8275."""
t = table.Table([[]], dtype=['S2'])
out = StringIO()
ascii.write(t, out, fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['col0']
@pytest.mark.parametrize("format", ['ascii', 'csv', 'html', 'latex',
'ascii.fixed_width', 'html'])
@pytest.mark.parametrize("fast_writer", [True, False])
@pytest.mark.parametrize('path_format', ['plain', 'tilde-str', 'tilde-pathlib'])
def test_write_overwrite_ascii(format, fast_writer, tmpdir, home_is_tmpdir,
path_format):
"""Test overwrite argument for various ASCII writers"""
true_filename = tmpdir.join("table-tmp.dat").strpath
if path_format == 'plain':
filename = true_filename
elif path_format == 'tilde-str':
filename = os.path.join('~', 'table-tmp.dat')
else:
filename = pathlib.Path('~', 'table-tmp.dat')
with open(true_filename, 'w'):
# create empty file
pass
t = table.Table([['Hello', ''], ['', '']], dtype=['S10', 'S10'])
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format=format, fast_writer=fast_writer)
t.write(filename, overwrite=True, format=format,
fast_writer=fast_writer)
# If the output is a file object, overwrite is ignored
with open(true_filename, 'w') as fp:
t.write(fp, overwrite=False, format=format,
fast_writer=fast_writer)
t.write(fp, overwrite=True, format=format,
fast_writer=fast_writer)
if 'tilde' in path_format:
# Ensure no files have been accidentally written to a literal tilde path
assert not os.path.exists(filename)
fmt_name_classes = list(chain(ascii.core.FAST_CLASSES.items(),
ascii.core.FORMAT_CLASSES.items()))
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_roundtrip_masked(fmt_name_class):
"""
Round trip a simple masked table through every writable format and confirm
that reading back gives the same result.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for fixed_width or HTML without bs4
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name == 'fixed_width'):
return
if 'qdp' in fmt_name:
# QDP tables are for numeric values only
t = simple_table(masked=True, kinds=['f', 'i'])
else:
t = simple_table(masked=True)
out = StringIO()
fast = fmt_name in ascii.core.FAST_CLASSES
try:
ascii.write(t, out, format=fmt_name, fast_writer=fast)
except ImportError: # Some failed dependency, skip test
return
# No-header formats need to be told the column names
kwargs = {'names': t.colnames} if 'no_header' in fmt_name else {}
if 'qdp' in fmt_name:
kwargs.update({'table_id': 0, 'names': t.colnames})
t2 = ascii.read(out.getvalue(), format=fmt_name, fast_reader=fast, guess=False, **kwargs)
assert t.colnames == t2.colnames
for col, col2 in zip(t.itercols(), t2.itercols()):
assert col.dtype.kind == col2.dtype.kind
assert np.all(col == col2)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_newlines(fast_writer, tmpdir):
# Regression test for https://github.com/astropy/astropy/issues/5126
# On windows, when writing to a filename (not e.g. StringIO), newlines were
# \r\r\n instead of \r\n.
filename = tmpdir.join('test').strpath
t = table.Table([['a', 'b', 'c']], names=['col'])
ascii.write(t, filename, fast_writer=fast_writer)
with open(filename, newline='') as f:
content = f.read()
assert content == os.linesep.join(['col', 'a', 'b', 'c']) + os.linesep
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_csv_with_comments(fast_writer):
"""
Test fix for #7357 where writing a Table with comments to 'csv' fails with
a cryptic message. The comments are dropped by default, but when comment='#'
is supplied they are still written.
"""
out = StringIO()
t = table.Table([[1, 2], [3, 4]], names=['a', 'b'])
t.meta['comments'] = ['hello']
ascii.write(t, out, format='csv', fast_writer=fast_writer)
assert out.getvalue().splitlines() == ['a,b', '1,3', '2,4']
out = StringIO()
ascii.write(t, out, format='csv', fast_writer=fast_writer, comment='#')
assert out.getvalue().splitlines() == ['#hello', 'a,b', '1,3', '2,4']
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_formatted_mixin(fast_writer):
"""
Test fix for #8680 where writing a QTable with a quantity mixin generates
an exception if a format is specified.
"""
out = StringIO()
t = table.QTable([[1, 2], [1, 2] * u.m], names=['a', 'b'])
ascii.write(t, out, fast_writer=fast_writer, formats={'a': '%02d', 'b': '%.2f'})
assert out.getvalue().splitlines() == ['a b',
'01 1.00',
'02 2.00']
def test_validate_write_kwargs():
out = StringIO()
t = table.QTable([[1, 2], [1, 2]], names=['a', 'b'])
with pytest.raises(TypeError, match=r"write\(\) argument 'fast_writer' must be a "
r"\(<class 'bool'>, <class 'str'>\) object, "
r"got <class 'int'> instead"):
ascii.write(t, out, fast_writer=12)
@pytest.mark.parametrize("fmt_name_class", fmt_name_classes)
def test_multidim_column_error(fmt_name_class):
"""
Test that trying to write a multidim column fails in every format except
ECSV.
"""
fmt_name, fmt_cls = fmt_name_class
if not getattr(fmt_cls, '_io_registry_can_write', True):
return
# Skip tests for ecsv or HTML without bs4. See the comment in latex.py
# Latex class where max_ndim = None is defined regarding latex and aastex.
if ((fmt_name == 'html' and not HAS_BS4)
or fmt_name in ('ecsv', 'latex', 'aastex')):
return
out = StringIO()
t = table.Table()
t['a'] = np.arange(16).reshape(2, 2, 2, 2)
t['b'] = [1, 2]
fast = fmt_name in ascii.core.FAST_CLASSES
with pytest.raises(ValueError, match=r'column\(s\) with dimension'):
ascii.write(t, out, format=fmt_name, fast_writer=fast)
@pytest.mark.parametrize("fast_writer", [True, False])
def test_write_as_columns(fast_writer):
"""
Test that writing a set of columns also roundtrips (as long as the
table does not have metadata, etc.)
"""
# Use masked in case that makes it more difficult.
data = ascii.read(tab_to_fill)
data = table.Table(data, masked=True)
data['a'].mask = [True, False]
data['c'].mask = [False, True]
data = list(data.columns.values())
for test_def in test_def_masked_fill_value:
check_write_table(test_def, data, fast_writer)
|
5c6eec5a823f06867621373625ed62d7c5e5c7c12409fbcf78c4e45aba0a830c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects ASDF to the astropy.table.Table class
import warnings
from astropy.io import registry as io_registry
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.table import Table
from astropy.utils.compat import optional_deps
def read_table(filename, data_key=None, find_table=None, **kwargs):
"""
Read a `~astropy.table.Table` object from an ASDF file
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will look for a Table object with the key of
``data`` in the top-level ASDF tree. The parameters ``data_key`` and
``find_key`` can be used to override the default behavior.
This function is registered as the Table reader for ASDF files with the
unified I/O interface.
Parameters
----------
filename : str or :class:`py.lath:local`
Name of the file to be read
data_key : str
Optional top-level key to use for finding the Table in the tree. If not
provided, uses ``data`` by default. Use of this parameter is not
compatible with ``find_table``.
find_table : function
Optional function to be used for locating the Table in the tree. The
function takes a single parameter, which is a dictionary representing
the top of the ASDF tree. The function must return a
`~astropy.table.Table` instance.
Returns
-------
table : `~astropy.table.Table`
`~astropy.table.Table` instance
"""
warnings.warn(create_asdf_deprecation_warning())
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and find_table:
raise ValueError("Options 'data_key' and 'find_table' are not compatible")
with asdf.open(filename, **kwargs) as af:
if find_table:
return find_table(af.tree)
else:
return af[data_key or 'data']
def write_table(table, filename, data_key=None, make_tree=None, **kwargs):
"""
Write a `~astropy.table.Table` object to an ASDF file.
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will write a Table object in the top-level ASDF
tree using the key of ``data``. The parameters ``data_key`` and
``make_tree`` can be used to override the default behavior.
This function is registered as the Table writer for ASDF files with the
unified I/O interface.
Parameters
----------
table : `~astropy.table.Table`
`~astropy.table.Table` instance to be written
filename : str or :class:`py.path:local`
Name of the new ASDF file to be created
data_key : str
Optional top-level key in the ASDF tree to use when writing the Table.
If not provided, uses ``data`` by default. Use of this parameter is not
compatible with ``make_tree``.
make_tree : function
Optional function to be used for creating the ASDF tree. The function
takes a single parameter, which is the `~astropy.table.Table` instance
to be written. The function must return a `dict` representing the ASDF
tree to be created.
"""
warnings.warn(create_asdf_deprecation_warning())
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and make_tree:
raise ValueError("Options 'data_key' and 'make_tree' are not compatible")
if make_tree:
tree = make_tree(table)
else:
tree = {data_key or 'data' : table}
with asdf.AsdfFile(tree) as af:
af.write_to(filename, **kwargs)
def asdf_identify(origin, filepath, fileobj, *args, **kwargs):
try:
import asdf
except ImportError:
return False
return filepath is not None and filepath.endswith('.asdf')
if not optional_deps.HAS_ASDF_ASTROPY:
io_registry.register_reader('asdf', Table, read_table)
io_registry.register_writer('asdf', Table, write_table)
io_registry.register_identifier('asdf', Table, asdf_identify)
|
96e047ee2294c32f08c5251eebb68f89ced89a503acb184914f704d04154e3c8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from asdf.types import CustomType, ExtensionTypeMeta
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
__all__ = ['AstropyType', 'AstropyAsdfType']
# Names of AstropyType or AstropyAsdfType subclasses that are base classes
# and aren't used directly for serialization.
_TYPE_BASE_CLASS_NAMES = {'PolynomialTypeBase'}
_astropy_types = set()
_astropy_asdf_types = set()
class AstropyTypeMeta(ExtensionTypeMeta):
"""
Keeps track of `AstropyType` subclasses that are created so that they can
be stored automatically by astropy extensions for ASDF.
"""
def __new__(mcls, name, bases, attrs):
cls = super().__new__(mcls, name, bases, attrs)
# Classes using this metaclass are automatically added to the list of
# astropy extensions
if cls.__name__ not in _TYPE_BASE_CLASS_NAMES:
if cls.organization == 'astropy.org' and cls.standard == 'astropy':
_astropy_types.add(cls)
elif cls.organization == 'stsci.edu' and cls.standard == 'asdf':
_astropy_asdf_types.add(cls)
return cls
class AstropyType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas and tags that are defined by
Astropy.
IMPORTANT: This parent class should **not** be used for types that have
schemas that are defined by the ASDF standard.
"""
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
class AstropyAsdfType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas that are defined in the ASDF
standard, but have tags that are implemented within astropy.
IMPORTANT: This parent class should **not** be used for types that also
have schemas that are defined by astropy.
"""
organization = 'stsci.edu'
standard = 'asdf'
@classmethod
def to_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().to_tree_tagged(node, ctx)
@classmethod
def from_tree_tagged(cls, tree, ctx):
warnings.warn(create_asdf_deprecation_warning())
return super().from_tree_tagged(tree, ctx)
|
51b1c0bc2598f1eb6b5c56c5df580b539d4d65d9e1340103e7639920e1686cad | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from asdf.extension import AsdfExtension, BuiltinExtension
from asdf.util import filepath_to_url
# Make sure that all tag implementations are imported by the time we create
# the extension class so that _astropy_asdf_types is populated correctly. We
# could do this using __init__ files, except it causes pytest import errors in
# the case that asdf is not installed.
from .tags.coordinates.angle import * # noqa
from .tags.coordinates.frames import * # noqa
from .tags.coordinates.earthlocation import * # noqa
from .tags.coordinates.skycoord import * # noqa
from .tags.coordinates.representation import * # noqa
from .tags.coordinates.spectralcoord import * # noqa
from .tags.fits.fits import * # noqa
from .tags.table.table import * # noqa
from .tags.time.time import * # noqa
from .tags.time.timedelta import * # noqa
from .tags.transform.basic import * # noqa
from .tags.transform.compound import * # noqa
from .tags.transform.functional_models import * # noqa
from .tags.transform.physical_models import * # noqa
from .tags.transform.math import * # noqa
from .tags.transform.polynomial import * # noqa
from .tags.transform.powerlaws import * # noqa
from .tags.transform.projections import * # noqa
from .tags.transform.spline import * # noqa
from .tags.transform.tabular import * # noqa
from .tags.unit.quantity import * # noqa
from .tags.unit.unit import * # noqa
from .tags.unit.equivalency import * # noqa
from .types import _astropy_types, _astropy_asdf_types
__all__ = ['AstropyExtension', 'AstropyAsdfExtension']
ASTROPY_SCHEMA_URI_BASE = 'http://astropy.org/schemas/'
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'data', 'schemas'))
ASTROPY_URL_MAPPING = [
(ASTROPY_SCHEMA_URI_BASE,
filepath_to_url(
os.path.join(SCHEMA_PATH, 'astropy.org')) +
'/{url_suffix}.yaml')]
# This extension is used to register custom types that have both tags and
# schemas defined by Astropy.
class AstropyExtension(AsdfExtension):
@property
def types(self):
return _astropy_types
@property
def tag_mapping(self):
return [('tag:astropy.org:astropy',
ASTROPY_SCHEMA_URI_BASE + 'astropy{tag_suffix}')]
@property
def url_mapping(self):
return ASTROPY_URL_MAPPING
# This extension is used to register custom tag types that have schemas defined
# by ASDF, but have tag implementations defined in astropy.
class AstropyAsdfExtension(BuiltinExtension):
@property
def types(self):
return _astropy_asdf_types
|
a2dbf72c4589448a97042d9e62aec523e3b780a639941df932fb2b0955d2c398 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Define a constant to know if the entry points are installed, since this impacts
# whether we can run the tests.
from importlib.metadata import entry_points
import pytest
# TODO: Exclusively use select when Python minversion is 3.10
eps = entry_points()
if hasattr(eps, 'select'):
ep = [entry.name for entry in eps.select(group='asdf_extensions')]
else:
ep = [entry.name for entry in eps.get('asdf_extensions', [])]
ASDF_ENTRY_INSTALLED = 'astropy' in ep and 'astropy-asdf' in ep
del entry_points, eps, ep
if not ASDF_ENTRY_INSTALLED:
pytest.skip('The astropy asdf entry points are not installed',
allow_module_level=True)
|
689594fd111347106d6e3ac575ec74c37def58b73f0e8cbcd46ae0f9faa347ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
from astropy.table import Table
def make_table():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
return Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
def test_table_io(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
table.write(tmpfile)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'data' in af.keys()
assert isinstance(af['data'], Table)
assert all(af['data'] == table)
# Now test using the table reader
new_t = Table.read(tmpfile)
assert all(new_t == table)
def test_table_io_custom_key(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
table.write(tmpfile, data_key='something')
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'something' in af.keys()
assert 'data' not in af.keys()
assert isinstance(af['something'], Table)
assert all(af['something'] == table)
# Now test using the table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
new_t = Table.read(tmpfile, data_key='something')
assert all(new_t == table)
def test_table_io_custom_tree(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
def make_custom_tree(tab):
return dict(foo=dict(bar=tab))
table.write(tmpfile, make_tree=make_custom_tree)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'foo' in af.keys()
assert 'bar' in af['foo']
assert 'data' not in af.keys()
assert all(af['foo']['bar'] == table)
# Now test using table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
def find_table(asdffile):
return asdffile['foo']['bar']
new_t = Table.read(tmpfile, find_table=find_table)
assert all(new_t == table)
|
3811925cf287c3eeb7881dea21569759ef5b31c254ab01114c7a8dc2c26a1e03 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates.tests.helper import skycoord_equal as _skycoord_equal
from astropy.utils.decorators import deprecated
__all__ = ['skycoord_equal']
@deprecated("5.1", alternative="astropy.coordinates.tests.helper.skycoord_equal")
def skycoord_equal(sc1, sc2):
return _skycoord_equal(sc1, sc2)
|
a559311bd17121ecd678d7c205a62f52514db60fb499aba5010fed39f02113a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from asdf.tags.core.ndarray import NDArrayType
from astropy import table
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class TableType:
"""
This class defines to_tree and from_tree methods that are used by both the
AstropyTableType and the AsdfTableType defined below. The behavior is
differentiated by the ``_compat`` class attribute. When ``_compat==True``,
the behavior will conform to the table schema defined by the ASDF Standard.
Otherwise, the behavior will conform to the custom table schema defined by
Astropy.
"""
_compat = False
@classmethod
def from_tree(cls, node, ctx):
# This is getting meta, guys
meta = node.get('meta', {})
# This enables us to support files that use the table definition from
# the ASDF Standard, rather than the custom one that Astropy defines.
if cls._compat:
return table.Table(node['columns'], meta=meta)
if node.get('qtable', False):
t = table.QTable(meta=node.get('meta', {}))
else:
t = table.Table(meta=node.get('meta', {}))
for name, col in zip(node['colnames'], node['columns']):
t[name] = col
return t
@classmethod
def to_tree(cls, data, ctx):
columns = [data[name] for name in data.colnames]
node = dict(columns=columns)
# Files that use the table definition from the ASDF Standard (instead
# of the one defined by Astropy) will not contain these fields
if not cls._compat:
node['colnames'] = data.colnames
node['qtable'] = isinstance(data, table.QTable)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
try:
NDArrayType.assert_equal(np.array(old), np.array(new))
except (AttributeError, TypeError, ValueError):
for col0, col1 in zip(old, new):
try:
NDArrayType.assert_equal(np.array(col0), np.array(col1))
except (AttributeError, TypeError, ValueError):
assert col0 == col1
class AstropyTableType(TableType, AstropyType):
"""
This tag class reads and writes tables that conform to the custom schema
that is defined by Astropy (in contrast to the one that is defined by the
ASDF Standard). The primary reason for differentiating is to enable the
support of Astropy mixin columns, which are not supported by the ASDF
Standard.
"""
name = 'table/table'
types = ['astropy.table.Table']
requires = ['astropy']
class AsdfTableType(TableType, AstropyAsdfType):
"""
This tag class allows Astropy to read (and write) ASDF files that use the
table definition that is provided by the ASDF Standard (instead of the
custom one defined by Astropy). This is important to maintain for
cross-compatibility.
"""
name = 'core/table'
types = ['astropy.table.Table']
requires = ['astropy']
_compat = True
class ColumnType(AstropyAsdfType):
name = 'core/column'
types = ['astropy.table.Column', 'astropy.table.MaskedColumn']
requires = ['astropy']
handle_dynamic_subclasses = True
@classmethod
def from_tree(cls, node, ctx):
data = node['data']
name = node['name']
description = node.get('description')
unit = node.get('unit')
meta = node.get('meta', None)
return table.Column(
data=data._make_array(), name=name, description=description,
unit=unit, meta=meta)
@classmethod
def to_tree(cls, data, ctx):
node = {
'data': data.data,
'name': data.name
}
if data.description:
node['description'] = data.description
if data.unit:
node['unit'] = data.unit
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
assert old.description == new.description
assert old.unit == new.unit
NDArrayType.assert_equal(np.array(old), np.array(new))
|
681d4ebd2b88471ac0dfd4795d81c35f48645d47d2da32eb2a8596893eb64f4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy import table
from astropy.io import fits
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class FitsType:
name = 'fits/fits'
types = ['astropy.io.fits.HDUList']
requires = ['astropy']
@classmethod
def from_tree(cls, data, ctx):
hdus = []
first = True
for hdu_entry in data:
header = fits.Header([fits.Card(*x) for x in hdu_entry['header']])
data = hdu_entry.get('data')
if data is not None:
try:
data = data.__array__()
except ValueError:
data = None
if first:
hdu = fits.PrimaryHDU(data=data, header=header)
first = False
elif data.dtype.names is not None:
hdu = fits.BinTableHDU(data=data, header=header)
else:
hdu = fits.ImageHDU(data=data, header=header)
hdus.append(hdu)
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def to_tree(cls, hdulist, ctx):
units = []
for hdu in hdulist:
header_list = []
for card in hdu.header.cards:
if card.comment:
new_card = [card.keyword, card.value, card.comment]
else:
if card.value:
new_card = [card.keyword, card.value]
else:
if card.keyword:
new_card = [card.keyword]
else:
new_card = []
header_list.append(new_card)
hdu_dict = {}
hdu_dict['header'] = header_list
if hdu.data is not None:
if hdu.data.dtype.names is not None:
data = table.Table(hdu.data)
else:
data = hdu.data
hdu_dict['data'] = data
units.append(hdu_dict)
return units
@classmethod
def reserve_blocks(cls, data, ctx):
for hdu in data:
if hdu.data is not None:
yield ctx.blocks.find_or_create_block_for_array(hdu.data, ctx)
@classmethod
def assert_equal(cls, old, new):
for hdua, hdub in zip(old, new):
assert_array_equal(hdua.data, hdub.data)
for carda, cardb in zip(hdua.header.cards, hdub.header.cards):
assert tuple(carda) == tuple(cardb)
class AstropyFitsType(FitsType, AstropyType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by Astropy. It will be used by default when
writing new HDUs to ASDF files.
"""
class AsdfFitsType(FitsType, AstropyAsdfType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by the ASDF Standard. It will not be used by
default, except when reading files that use the ASDF Standard definition
rather than the one defined in Astropy. It will primarily be used for
backwards compatibility for reading older files. In the unlikely case that
another ASDF implementation uses the FITS schema from the ASDF Standard,
this tag could also be used to read a file it generated.
"""
|
6206ea36f53706f163af0878ba6c88d7a5f006938a767381de1d876e8cb9d16d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from asdf.versioning import AsdfVersion
from astropy.modeling.bounding_box import ModelBoundingBox, CompoundBoundingBox
from astropy.modeling import mappings
from astropy.modeling import functional_models
from astropy.modeling.core import CompoundModel
from astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType
from . import _parameter_to_value
__all__ = ['TransformType', 'IdentityType', 'ConstantType']
class TransformType(AstropyAsdfType):
version = '1.2.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'name' in node:
model.name = node['name']
if "inputs" in node:
model.inputs = tuple(node["inputs"])
if "outputs" in node:
model.outputs = tuple(node["outputs"])
if 'bounding_box' in node:
model.bounding_box = node['bounding_box']
elif 'selector_args' in node:
cbbox_keys = [tuple(key) for key in node['cbbox_keys']]
bbox_dict = dict(zip(cbbox_keys, node['cbbox_values']))
selector_args = node['selector_args']
model.bounding_box = CompoundBoundingBox.validate(model, bbox_dict, selector_args)
param_and_model_constraints = {}
for constraint in ['fixed', 'bounds']:
if constraint in node:
param_and_model_constraints[constraint] = node[constraint]
model._initialize_constraints(param_and_model_constraints)
if "input_units_equivalencies" in node:
# this still writes eqs. for compound, but operates on each sub model
if not isinstance(model, CompoundModel):
model.input_units_equivalencies = node['input_units_equivalencies']
yield model
if 'inverse' in node:
model.inverse = node['inverse']
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = model._user_inverse
if model.name is not None:
node['name'] = model.name
node['inputs'] = list(model.inputs)
node['outputs'] = list(model.outputs)
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if isinstance(bb, ModelBoundingBox):
bb = bb.bounding_box(order='C')
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in bb]
node['bounding_box'] = bb
elif isinstance(bb, CompoundBoundingBox):
selector_args = [[sa.index, sa.ignore] for sa in bb.selector_args]
node['selector_args'] = selector_args
node['cbbox_keys'] = list(bb.bounding_boxes.keys())
bounding_boxes = list(bb.bounding_boxes.values())
if len(model.inputs) - len(selector_args) == 1:
node['cbbox_values'] = [list(sbbox.bounding_box()) for sbbox in bounding_boxes]
else:
node['cbbox_values'] = [[list(item) for item in sbbox.bounding_box()
if np.isfinite(item[0])] for sbbox in bounding_boxes]
# model / parameter constraints
if not isinstance(model, CompoundModel):
fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}
if fixed_nondefaults:
node['fixed'] = fixed_nondefaults
bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}
if bounds_nondefaults:
node['bounds'] = bounds_nondefaults
if not isinstance(model, CompoundModel):
if model.input_units_equivalencies:
node['input_units_equivalencies'] = model.input_units_equivalencies
return node
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
# assert the bounding_boxes are the same
assert a.get_bounding_box() == b.get_bounding_box()
assert a.inputs == b.inputs
assert a.outputs == b.outputs
assert a.input_units_equivalencies == b.input_units_equivalencies
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
version = '1.4.0'
supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']
types = ['astropy.modeling.functional_models.Const1D',
'astropy.modeling.functional_models.Const2D']
@classmethod
def from_tree_transform(cls, node, ctx):
if cls.version < AsdfVersion('1.4.0'):
# The 'dimensions' property was added in 1.4.0,
# previously all values were 1D.
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 1:
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 2:
return functional_models.Const2D(node['value'])
else:
raise TypeError('Only 1D and 2D constant models are supported.')
@classmethod
def to_tree_transform(cls, data, ctx):
if cls.version < AsdfVersion('1.4.0'):
if not isinstance(data, functional_models.Const1D):
raise ValueError(
f'constant-{cls.version} does not support models with > 1 dimension')
return {
'value': _parameter_to_value(data.amplitude)
}
else:
if isinstance(data, functional_models.Const1D):
dimension = 1
elif isinstance(data, functional_models.Const2D):
dimension = 2
return {
'value': _parameter_to_value(data.amplitude),
'dimensions': dimension
}
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super().__init__(mapping)
self._n_outputs = n_outputs
self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))
@property
def inverse(self):
raise NotImplementedError()
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
class UnitsMappingType(AstropyType):
name = "transform/units_mapping"
version = "1.0.0"
types = [mappings.UnitsMapping]
@classmethod
def to_tree(cls, node, ctx):
tree = {}
if node.name is not None:
tree["name"] = node.name
inputs = []
outputs = []
for i, o, m in zip(node.inputs, node.outputs, node.mapping):
input = {
"name": i,
"allow_dimensionless": node.input_units_allow_dimensionless[i],
}
if m[0] is not None:
input["unit"] = m[0]
if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:
input["equivalencies"] = node.input_units_equivalencies[i]
inputs.append(input)
output = {
"name": o,
}
if m[-1] is not None:
output["unit"] = m[-1]
outputs.append(output)
tree["unit_inputs"] = inputs
tree["unit_outputs"] = outputs
return tree
@classmethod
def from_tree(cls, tree, ctx):
mapping = tuple((i.get("unit"), o.get("unit"))
for i, o in zip(tree["unit_inputs"], tree["unit_outputs"]))
equivalencies = None
for i in tree["unit_inputs"]:
if "equivalencies" in i:
if equivalencies is None:
equivalencies = {}
equivalencies[i["name"]] = i["equivalencies"]
kwargs = {
"input_units_equivalencies": equivalencies,
"input_units_allow_dimensionless": {
i["name"]: i.get("allow_dimensionless", False) for i in tree["unit_inputs"]},
}
if "name" in tree:
kwargs["name"] = tree["name"]
return mappings.UnitsMapping(mapping, **kwargs)
|
77e9c42d294ce53b94f443ef8271021e4a86d0a10ccf9c8ab0078cd5c4526215 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import astropy.units as u
def _parameter_to_value(param):
if param.unit is not None:
return u.Quantity(param)
else:
return param.value
|
d59d84d4a37fd8473d5193df3303444f118c04e6e4b49667c5baec57cc4ca995 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.modeling.math_functions import __all__ as math_classes
from astropy.modeling.math_functions import *
from astropy.modeling import math_functions
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['NpUfuncType']
class NpUfuncType(TransformType):
name = "transform/math_functions"
version = '1.0.0'
types = ['astropy.modeling.math_functions.'+ kl for kl in math_classes]
@classmethod
def from_tree_transform(cls, node, ctx):
klass_name = math_functions._make_class_name(node['func_name'])
klass = getattr(math_functions, klass_name)
return klass()
@classmethod
def to_tree_transform(cls, model, ctx):
return {'func_name': model.func.__name__}
|
a05646dd73f8ca921743d9cdccde0a3c64cd20d126bb4d3eb01a08858bdb0532 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy.modeling import functional_models
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['AiryDisk2DType', 'Box1DType', 'Box2DType',
'Disk2DType', 'Ellipse2DType', 'Exponential1DType',
'Gaussian1DType', 'Gaussian2DType', 'KingProjectedAnalytic1DType',
'Logarithmic1DType', 'Lorentz1DType', 'Moffat1DType',
'Moffat2DType', 'Planar2D', 'RedshiftScaleFactorType',
'RickerWavelet1DType', 'RickerWavelet2DType', 'Ring2DType',
'Sersic1DType', 'Sersic2DType',
'Sine1DType', 'Cosine1DType', 'Tangent1DType',
'ArcSine1DType', 'ArcCosine1DType', 'ArcTangent1DType',
'Trapezoid1DType', 'TrapezoidDisk2DType', 'Voigt1DType']
class AiryDisk2DType(TransformType):
name = 'transform/airy_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.AiryDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.AiryDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
radius=node['radius'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'radius': _parameter_to_value(model.radius)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.AiryDisk2D) and
isinstance(b, functional_models.AiryDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.radius, b.radius)
class Box1DType(TransformType):
name = 'transform/box1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box1D) and
isinstance(b, functional_models.Box1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
class Box2DType(TransformType):
name = 'transform/box2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box2D(amplitude=node['amplitude'],
x_0=node['x_0'],
x_width=node['x_width'],
y_0=node['y_0'],
y_width=node['y_width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'x_width': _parameter_to_value(model.x_width),
'y_0': _parameter_to_value(model.y_0),
'y_width': _parameter_to_value(model.y_width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box2D) and
isinstance(b, functional_models.Box2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.x_width, b.x_width)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.y_width, b.y_width)
class Disk2DType(TransformType):
name = 'transform/disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Disk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Disk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Disk2D) and
isinstance(b, functional_models.Disk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
class Ellipse2DType(TransformType):
name = 'transform/ellipse2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ellipse2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ellipse2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
a=node['a'],
b=node['b'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'a': _parameter_to_value(model.a),
'b': _parameter_to_value(model.b),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ellipse2D) and
isinstance(b, functional_models.Ellipse2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.a, b.a)
assert_array_equal(a.b, b.b)
assert_array_equal(a.theta, b.theta)
class Exponential1DType(TransformType):
name = 'transform/exponential1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Exponential1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Exponential1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Exponential1D) and
isinstance(b, functional_models.Exponential1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Gaussian1DType(TransformType):
name = 'transform/gaussian1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian1D(amplitude=node['amplitude'],
mean=node['mean'],
stddev=node['stddev'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'mean': _parameter_to_value(model.mean),
'stddev': _parameter_to_value(model.stddev)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian1D) and
isinstance(b, functional_models.Gaussian1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.mean, b.mean)
assert_array_equal(a.stddev, b.stddev)
class Gaussian2DType(TransformType):
name = 'transform/gaussian2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian2D(amplitude=node['amplitude'],
x_mean=node['x_mean'],
y_mean=node['y_mean'],
x_stddev=node['x_stddev'],
y_stddev=node['y_stddev'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_mean': _parameter_to_value(model.x_mean),
'y_mean': _parameter_to_value(model.y_mean),
'x_stddev': _parameter_to_value(model.x_stddev),
'y_stddev': _parameter_to_value(model.y_stddev),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian2D) and
isinstance(b, functional_models.Gaussian2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_mean, b.x_mean)
assert_array_equal(a.y_mean, b.y_mean)
assert_array_equal(a.x_stddev, b.x_stddev)
assert_array_equal(a.y_stddev, b.y_stddev)
assert_array_equal(a.theta, b.theta)
class KingProjectedAnalytic1DType(TransformType):
name = 'transform/king_projected_analytic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.KingProjectedAnalytic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.KingProjectedAnalytic1D(
amplitude=node['amplitude'],
r_core=node['r_core'],
r_tide=node['r_tide'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_core': _parameter_to_value(model.r_core),
'r_tide': _parameter_to_value(model.r_tide)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.KingProjectedAnalytic1D) and
isinstance(b, functional_models.KingProjectedAnalytic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_core, b.r_core)
assert_array_equal(a.r_tide, b.r_tide)
class Logarithmic1DType(TransformType):
name = 'transform/logarithmic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Logarithmic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Logarithmic1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Logarithmic1D) and
isinstance(b, functional_models.Logarithmic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Lorentz1DType(TransformType):
name = 'transform/lorentz1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Lorentz1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Lorentz1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Lorentz1D) and
isinstance(b, functional_models.Lorentz1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Moffat1DType(TransformType):
name = 'transform/moffat1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat1D(amplitude=node['amplitude'],
x_0=node['x_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat1D) and
isinstance(b, functional_models.Moffat1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Moffat2DType(TransformType):
name = 'transform/moffat2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat2D) and
isinstance(b, functional_models.Moffat2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Planar2D(TransformType):
name = 'transform/planar2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Planar2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Planar2D(slope_x=node['slope_x'],
slope_y=node['slope_y'],
intercept=node['intercept'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'slope_x': _parameter_to_value(model.slope_x),
'slope_y': _parameter_to_value(model.slope_y),
'intercept': _parameter_to_value(model.intercept)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Planar2D) and
isinstance(b, functional_models.Planar2D))
assert_array_equal(a.slope_x, b.slope_x)
assert_array_equal(a.slope_y, b.slope_y)
assert_array_equal(a.intercept, b.intercept)
class RedshiftScaleFactorType(TransformType):
name = 'transform/redshift_scale_factor'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RedshiftScaleFactor']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RedshiftScaleFactor(z=node['z'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'z': _parameter_to_value(model.z)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RedshiftScaleFactor) and
isinstance(b, functional_models.RedshiftScaleFactor))
assert_array_equal(a.z, b.z)
class RickerWavelet1DType(TransformType):
name = 'transform/ricker_wavelet1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet1D(amplitude=node['amplitude'],
x_0=node['x_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet1D) and
isinstance(b, functional_models.RickerWavelet1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.sigma, b.sigma)
class RickerWavelet2DType(TransformType):
name = 'transform/ricker_wavelet2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet2D) and
isinstance(b, functional_models.RickerWavelet2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.sigma, b.sigma)
class Ring2DType(TransformType):
name = 'transform/ring2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ring2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ring2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
r_in=node['r_in'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'r_in': _parameter_to_value(model.r_in),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ring2D) and
isinstance(b, functional_models.Ring2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.r_in, b.r_in)
assert_array_equal(a.width, b.width)
class Sersic1DType(TransformType):
name = 'transform/sersic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic1D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic1D) and
isinstance(b, functional_models.Sersic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
class Sersic2DType(TransformType):
name = 'transform/sersic2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic2D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'],
x_0=node['x_0'],
y_0=node['y_0'],
ellip=node['ellip'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'ellip': _parameter_to_value(model.ellip),
'theta': _parameter_to_value(model.theta)
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic2D) and
isinstance(b, functional_models.Sersic2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.ellip, b.ellip)
assert_array_equal(a.theta, b.theta)
class Trigonometric1DType(TransformType):
_model = None
@classmethod
def from_tree_transform(cls, node, ctx):
return cls._model(amplitude=node['amplitude'],
frequency=node['frequency'],
phase=node['phase'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'frequency': _parameter_to_value(model.frequency),
'phase': _parameter_to_value(model.phase)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, cls._model) and
isinstance(b, cls._model))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.frequency, b.frequency)
assert_array_equal(a.phase, b.phase)
class Sine1DType(Trigonometric1DType):
name = 'transform/sine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sine1D']
_model = functional_models.Sine1D
class Cosine1DType(Trigonometric1DType):
name = 'transform/cosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Cosine1D']
_model = functional_models.Cosine1D
class Tangent1DType(Trigonometric1DType):
name = 'transform/tangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Tangent1D']
_model = functional_models.Tangent1D
class ArcSine1DType(Trigonometric1DType):
name = 'transform/arcsine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcSine1D']
_model = functional_models.ArcSine1D
class ArcCosine1DType(Trigonometric1DType):
name = 'transform/arccosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcCosine1D']
_model = functional_models.ArcCosine1D
class ArcTangent1DType(Trigonometric1DType):
name = 'transform/arctangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcTangent1D']
_model = functional_models.ArcTangent1D
class Trapezoid1DType(TransformType):
name = 'transform/trapezoid1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Trapezoid1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Trapezoid1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Trapezoid1D) and
isinstance(b, functional_models.Trapezoid1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
assert_array_equal(a.slope, b.slope)
class TrapezoidDisk2DType(TransformType):
name = 'transform/trapezoid_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.TrapezoidDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.TrapezoidDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.TrapezoidDisk2D) and
isinstance(b, functional_models.TrapezoidDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
assert_array_equal(a.slope, b.slope)
class Voigt1DType(TransformType):
name = 'transform/voigt1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Voigt1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Voigt1D(x_0=node['x_0'],
amplitude_L=node['amplitude_L'],
fwhm_L=node['fwhm_L'],
fwhm_G=node['fwhm_G'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'x_0': _parameter_to_value(model.x_0),
'amplitude_L': _parameter_to_value(model.amplitude_L),
'fwhm_L': _parameter_to_value(model.fwhm_L),
'fwhm_G': _parameter_to_value(model.fwhm_G)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Voigt1D) and
isinstance(b, functional_models.Voigt1D))
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.amplitude_L, b.amplitude_L)
assert_array_equal(a.fwhm_L, b.fwhm_L)
assert_array_equal(a.fwhm_G, b.fwhm_G)
|
71c5da54e55c037b69caed1c8d706649f0a9ffb30ef0c55902e3d9513e862a58 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['AffineType', 'Rotate2DType', 'Rotate3DType',
'RotationSequenceType']
class AffineType(TransformType):
name = "transform/affine"
version = '1.3.0'
types = ['astropy.modeling.projections.AffineTransformation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
matrix = node['matrix']
translation = node['translation']
if matrix.shape != (2, 2):
raise NotImplementedError(
"asdf currently only supports 2x2 (2D) rotation transformation "
"matrices")
if translation.shape != (2,):
raise NotImplementedError(
"asdf currently only supports 2D translation transformations.")
return modeling.projections.AffineTransformation2D(
matrix=matrix, translation=translation)
@classmethod
def to_tree_transform(cls, model, ctx):
return {'matrix': _parameter_to_value(model.matrix),
'translation': _parameter_to_value(model.translation)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (a.__class__ == b.__class__)
assert_array_equal(a.matrix, b.matrix)
assert_array_equal(a.translation, b.translation)
class Rotate2DType(TransformType):
name = "transform/rotate2d"
version = '1.3.0'
types = ['astropy.modeling.rotations.Rotation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return modeling.rotations.Rotation2D(node['angle'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {'angle': _parameter_to_value(model.angle)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.rotations.Rotation2D) and
isinstance(b, modeling.rotations.Rotation2D))
assert_array_equal(a.angle, b.angle)
class Rotate3DType(TransformType):
name = "transform/rotate3d"
version = '1.3.0'
types = ['astropy.modeling.rotations.RotateNative2Celestial',
'astropy.modeling.rotations.RotateCelestial2Native',
'astropy.modeling.rotations.EulerAngleRotation']
@classmethod
def from_tree_transform(cls, node, ctx):
if node['direction'] == 'native2celestial':
return modeling.rotations.RotateNative2Celestial(node["phi"],
node["theta"],
node["psi"])
elif node['direction'] == 'celestial2native':
return modeling.rotations.RotateCelestial2Native(node["phi"],
node["theta"],
node["psi"])
else:
return modeling.rotations.EulerAngleRotation(node["phi"],
node["theta"],
node["psi"],
axes_order=node["direction"])
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.rotations.RotateNative2Celestial):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "native2celestial"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "native2celestial"
}
elif isinstance(model, modeling.rotations.RotateCelestial2Native):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "celestial2native"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "celestial2native"
}
else:
node = {"phi": _parameter_to_value(model.phi),
"theta": _parameter_to_value(model.theta),
"psi": _parameter_to_value(model.psi),
"direction": model.axes_order
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
if a.__class__.__name__ == "EulerAngleRotation":
assert_array_equal(a.phi, b.phi)
assert_array_equal(a.psi, b.psi)
assert_array_equal(a.theta, b.theta)
else:
assert_array_equal(a.lon, b.lon)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon_pole, b.lon_pole)
class RotationSequenceType(TransformType):
name = "transform/rotate_sequence_3d"
types = ['astropy.modeling.rotations.RotationSequence3D',
'astropy.modeling.rotations.SphericalRotationSequence']
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
angles = node['angles']
axes_order = node['axes_order']
rotation_type = node['rotation_type']
if rotation_type == 'cartesian':
return modeling.rotations.RotationSequence3D(angles, axes_order=axes_order)
elif rotation_type == 'spherical':
return modeling.rotations.SphericalRotationSequence(angles, axes_order=axes_order)
else:
raise ValueError(f"Unrecognized rotation_type: {rotation_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'angles': list(model.angles.value)}
node['axes_order'] = model.axes_order
if isinstance(model, modeling.rotations.SphericalRotationSequence):
node['rotation_type'] = "spherical"
elif isinstance(model, modeling.rotations.RotationSequence3D):
node['rotation_type'] = "cartesian"
else:
raise ValueError(f"Cannot serialize model of type {type(model)}")
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.__class__.__name__ == b.__class__.__name__
assert_array_equal(a.angles, b.angles)
assert a.axes_order == b.axes_order
class GenericProjectionType(TransformType):
@classmethod
def from_tree_transform(cls, node, ctx):
args = []
for param_name, default in cls.params:
args.append(node.get(param_name, default))
if node['direction'] == 'pix2sky':
return cls.types[0](*args)
else:
return cls.types[1](*args)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if isinstance(model, cls.types[0]):
node['direction'] = 'pix2sky'
else:
node['direction'] = 'sky2pix'
for param_name, default in cls.params:
val = getattr(model, param_name).value
if val != default:
node[param_name] = val
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
_generic_projections = {
'zenithal_perspective': ('ZenithalPerspective', (('mu', 0.0), ('gamma', 0.0)), '1.3.0'),
'gnomonic': ('Gnomonic', (), None),
'stereographic': ('Stereographic', (), None),
'slant_orthographic': ('SlantOrthographic', (('xi', 0.0), ('eta', 0.0)), None),
'zenithal_equidistant': ('ZenithalEquidistant', (), None),
'zenithal_equal_area': ('ZenithalEqualArea', (), None),
'airy': ('Airy', (('theta_b', 90.0),), '1.2.0'),
'cylindrical_perspective': ('CylindricalPerspective', (('mu', 0.0), ('lam', 0.0)), '1.3.0'),
'cylindrical_equal_area': ('CylindricalEqualArea', (('lam', 0.0),), '1.3.0'),
'plate_carree': ('PlateCarree', (), None),
'mercator': ('Mercator', (), None),
'sanson_flamsteed': ('SansonFlamsteed', (), None),
'parabolic': ('Parabolic', (), None),
'molleweide': ('Molleweide', (), None),
'hammer_aitoff': ('HammerAitoff', (), None),
'conic_perspective': ('ConicPerspective', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equal_area': ('ConicEqualArea', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equidistant': ('ConicEquidistant', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_orthomorphic': ('ConicOrthomorphic', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'bonne_equal_area': ('BonneEqualArea', (('theta1', 0.0),), '1.3.0'),
'polyconic': ('Polyconic', (), None),
'tangential_spherical_cube': ('TangentialSphericalCube', (), None),
'cobe_quad_spherical_cube': ('COBEQuadSphericalCube', (), None),
'quad_spherical_cube': ('QuadSphericalCube', (), None),
'healpix': ('HEALPix', (('H', 4.0), ('X', 3.0)), None),
'healpix_polar': ('HEALPixPolar', (), None)
}
def make_projection_types():
for tag_name, (name, params, version) in _generic_projections.items():
class_name = f'{name}Type'
types = [f'astropy.modeling.projections.Pix2Sky_{name}',
f'astropy.modeling.projections.Sky2Pix_{name}']
members = {'name': f'transform/{tag_name}',
'types': types,
'params': params}
if version:
members['version'] = version
globals()[class_name] = type(
str(class_name),
(GenericProjectionType,),
members)
__all__.append(class_name)
make_projection_types()
|
d37f83d26d2be072cb865d4bd160bd1631bf75da0f94dd6443158a60664658b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfVersion
import astropy.units as u
from astropy import modeling
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'Linear1DType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
return {'offset': _parameter_to_value(offset)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class MultiplyType(TransformType):
name = "transform/multiplyscale"
version = '1.0.0'
types = ['astropy.modeling.models.Multiply']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
return modeling.models.Multiply(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Multiply) and
isinstance(b, modeling.models.Multiply))
assert_array_equal(a.factor, b.factor)
class PolynomialTypeBase(TransformType):
DOMAIN_WINDOW_MIN_VERSION = AsdfVersion("1.2.0")
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = modeling.models.Polynomial1D(coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
typeindex = cls.types.index(model.__class__)
ndim = (typeindex % 2) + 1
if cls.version >= PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 included an unrelated "domain"
# property. We can't serialize the new domain values with those
# versions because they don't validate.
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
if cls.version > PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 are known not to serialize
# domain or window.
if isinstance(a, modeling.models.Polynomial1D):
assert a.domain == b.domain
assert a.window == b.window
else:
assert a.x_domain == b.x_domain
assert a.x_window == b.x_window
assert a.y_domain == b.y_domain
assert a.y_window == b.y_window
class PolynomialType1_0(PolynomialTypeBase):
version = "1.0.0"
class PolynomialType1_1(PolynomialTypeBase):
version = "1.1.0"
class PolynomialType1_2(PolynomialTypeBase):
version = "1.2.0"
class OrthoPolynomialType(TransformType):
name = "transform/ortho_polynomial"
types = ['astropy.modeling.models.Legendre1D',
'astropy.modeling.models.Legendre2D',
'astropy.modeling.models.Chebyshev1D',
'astropy.modeling.models.Chebyshev2D',
'astropy.modeling.models.Hermite1D',
'astropy.modeling.models.Hermite2D']
typemap = {
'legendre': 0,
'chebyshev': 2,
'hermite': 4,
}
invtypemap = {v: k for k, v in typemap.items()}
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
poly_type = node['polynomial_type']
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = cls.types[cls.typemap[poly_type]](coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
coeffs = {}
shape = coefficients.shape
x_degree = shape[0] - 1
y_degree = shape[1] - 1
for i in range(x_degree + 1):
for j in range(y_degree + 1):
name = f'c{i}_{j}'
coeffs[name] = coefficients[i, j]
model = cls.types[cls.typemap[poly_type]+1](x_degree, y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transforms.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
typeindex = cls.types.index(model.__class__)
poly_type = cls.invtypemap[int(typeindex/2)*2]
ndim = (typeindex % 2) + 1
if ndim == 1:
coefficients = np.array(model.parameters)
else:
coefficients = np.zeros((model.x_degree + 1, model.y_degree + 1))
for i in range(model.x_degree + 1):
for j in range(model.y_degree + 1):
name = f'c{i}_{j}'
coefficients[i, j] = getattr(model, name).value
node = {'polynomial_type': poly_type, 'coefficients': coefficients}
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
# There should be a more elegant way of doing this
TransformType.assert_equal(a, b)
assert ((isinstance(a, (modeling.models.Legendre1D, modeling.models.Legendre2D)) and
isinstance(b, (modeling.models.Legendre1D, modeling.models.Legendre2D))) or
(isinstance(a, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D)) and
isinstance(b, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D))) or
(isinstance(a, (modeling.models.Hermite1D, modeling.models.Hermite2D)) and
isinstance(b, (modeling.models.Hermite1D, modeling.models.Hermite2D))))
assert_array_equal(a.parameters, b.parameters)
class Linear1DType(TransformType):
name = "transform/linear1d"
version = '1.0.0'
types = ['astropy.modeling.models.Linear1D']
@classmethod
def from_tree_transform(cls, node, ctx):
slope = node.get('slope', None)
intercept = node.get('intercept', None)
return modeling.models.Linear1D(slope=slope, intercept=intercept)
@classmethod
def to_tree_transform(cls, model, ctx):
return {
'slope': _parameter_to_value(model.slope),
'intercept': _parameter_to_value(model.intercept),
}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Linear1D) and
isinstance(b, modeling.models.Linear1D))
assert_array_equal(a.slope, b.slope)
assert_array_equal(a.intercept, b.intercept)
|
2057e193917b4f4a97930285478ec3ddee440aa5633b02a35a8cffc4128da812 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy import units as u
from astropy.modeling.bounding_box import ModelBoundingBox
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
version = '1.2.0'
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
elif dim == 2:
points = tuple(p[:] for p in node['points'])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple(p[:] for p in node['points'])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if model.fill_value is not None:
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
return node
@classmethod
def assert_equal(cls, a, b):
if isinstance(a.lookup_table, u.Quantity):
assert u.allclose(a.lookup_table, b.lookup_table)
assert u.allclose(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
for i in range(len(a_box)):
assert u.allclose(a_box[i], b_box[i])
else:
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
assert_array_equal(a_box, b_box)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
|
f0a377abdd0b81d00129dee4b4afdb781dcd74c77f0154127ed495f15bd5ab8a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy.modeling import physical_models
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['BlackBody', 'Drude1DType', 'Plummer1DType']
class BlackBody(TransformType):
name = 'transform/blackbody'
version = '1.0.0'
types = ['astropy.modeling.physical_models.BlackBody']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.BlackBody(scale=node['scale'],
temperature=node['temperature'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'scale': _parameter_to_value(model.scale),
'temperature': _parameter_to_value(model.temperature)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.BlackBody) and
isinstance(b, physical_models.BlackBody))
assert_array_equal(a.scale, b.scale)
assert_array_equal(a.temperature, b.temperature)
class Drude1DType(TransformType):
name = 'transform/drude1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Drude1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Drude1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Drude1D) and
isinstance(b, physical_models.Drude1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Plummer1DType(TransformType):
name = 'transform/plummer1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Plummer1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Plummer1D(mass=node['mass'],
r_plum=node['r_plum'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mass': _parameter_to_value(model.mass),
'r_plum': _parameter_to_value(model.r_plum)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Plummer1D) and
isinstance(b, physical_models.Plummer1D))
assert_array_equal(a.mass, b.mass)
assert_array_equal(a.r_plum, b.r_plum)
|
9de26ec9fade65b8fab95555e1468f6b2968c54dc2eb3a1b8cdf9e8f7b60cbcd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
from asdf import tagged
from asdf.tests.helpers import assert_tree_match
from astropy.modeling.core import Model, CompoundModel
from astropy.modeling.models import Identity, Mapping, Const1D
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
from astropy.io.misc.asdf.tags.transform.basic import TransformType
__all__ = ['CompoundType', 'RemapAxesType']
_operator_to_tag_mapping = {
'+': 'add',
'-': 'subtract',
'*': 'multiply',
'/': 'divide',
'**': 'power',
'|': 'compose',
'&': 'concatenate',
'fix_inputs': 'fix_inputs'
}
_tag_to_method_mapping = {
'add': '__add__',
'subtract': '__sub__',
'multiply': '__mul__',
'divide': '__truediv__',
'power': '__pow__',
'compose': '__or__',
'concatenate': '__and__',
'fix_inputs': 'fix_inputs'
}
class CompoundType(TransformType):
name = ['transform/' + x for x in _tag_to_method_mapping.keys()]
types = [CompoundModel]
version = '1.2.0'
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
tag = node._tag[node._tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
oper = _tag_to_method_mapping[tag]
left = node['forward'][0]
if not isinstance(left, Model):
raise TypeError(f"Unknown model type '{node['forward'][0]._tag}'")
right = node['forward'][1]
if (not isinstance(right, Model) and
not (oper == 'fix_inputs' and isinstance(right, dict))):
raise TypeError(f"Unknown model type '{node['forward'][1]._tag}'")
if oper == 'fix_inputs':
right = dict(zip(right['keys'], right['values']))
model = CompoundModel('fix_inputs', left, right)
else:
model = getattr(left, oper)(right)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def to_tree_tagged(cls, model, ctx):
warnings.warn(create_asdf_deprecation_warning())
left = model.left
if isinstance(model.right, dict):
right = {
'keys': list(model.right.keys()),
'values': list(model.right.values())
}
else:
right = model.right
node = {
'forward': [left, right]
}
try:
tag_name = 'transform/' + _operator_to_tag_mapping[model.op]
except KeyError:
raise ValueError(f"Unknown operator '{model.op}'")
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a.left, b.left)
assert_tree_match(a.right, b.right)
class RemapAxesType(TransformType):
name = 'transform/remap_axes'
types = [Mapping]
version = '1.3.0'
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node['mapping']
n_inputs = node.get('n_inputs')
if all([isinstance(x, int) for x in mapping]):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max(x for x in mapping if isinstance(x, int)) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & Const1D(entry.value)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mapping': list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node['n_inputs'] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert(a.n_inputs == b.n_inputs)
|
b3dfcb92a23d3ca6e6b105e85176372e0ace3d6d20e0c74032ad23627578d2d1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from numpy.testing import assert_array_equal
from astropy.modeling import powerlaws
from astropy.io.misc.asdf.tags.transform.basic import TransformType
from . import _parameter_to_value
__all__ = ['PowerLaw1DType', 'BrokenPowerLaw1DType',
'SmoothlyBrokenPowerLaw1DType', 'ExponentialCutoffPowerLaw1DType',
'LogParabola1DType']
class PowerLaw1DType(TransformType):
name = 'transform/power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.PowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.PowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.PowerLaw1D) and
isinstance(b, powerlaws.PowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
class BrokenPowerLaw1DType(TransformType):
name = 'transform/broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.BrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.BrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.BrokenPowerLaw1D) and
isinstance(b, powerlaws.BrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
class SmoothlyBrokenPowerLaw1DType(TransformType):
name = 'transform/smoothly_broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.SmoothlyBrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.SmoothlyBrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'],
delta=node['delta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2),
'delta': _parameter_to_value(model.delta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.SmoothlyBrokenPowerLaw1D) and
isinstance(b, powerlaws.SmoothlyBrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
assert_array_equal(a.delta, b.delta)
class ExponentialCutoffPowerLaw1DType(TransformType):
name = 'transform/exponential_cutoff_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.ExponentialCutoffPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.ExponentialCutoffPowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
x_cutoff=node['x_cutoff'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'x_cutoff': _parameter_to_value(model.x_cutoff)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.ExponentialCutoffPowerLaw1D) and
isinstance(b, powerlaws.ExponentialCutoffPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.x_cutoff, b.x_cutoff)
class LogParabola1DType(TransformType):
name = 'transform/log_parabola1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.LogParabola1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.LogParabola1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
beta=node['beta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'beta': _parameter_to_value(model.beta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.LogParabola1D) and
isinstance(b, powerlaws.LogParabola1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.beta, b.beta)
|
c76cc2ee1ac07360cf1756fe90fb91731569c3915d34e28f53e5e7bdef39ec74 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import numpy as np
from astropy.time import TimeDelta
from astropy.io.misc.asdf.types import AstropyType
__all__ = ['TimeDeltaType']
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TimeDeltaType(AstropyType):
name = 'time/timedelta'
types = [TimeDelta]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
60226f69f7a01488e07b5eda27e76e0ce4636d972e9a2e53a560f66943a4e084 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfSpec
from astropy import time
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyAsdfType
__all__ = ['TimeType']
_guessable_formats = {'iso', 'byear', 'jyear', 'yday'}
_astropy_format_to_asdf_format = {
'isot': 'iso',
'byear_str': 'byear',
'jyear_str': 'jyear'
}
def _assert_earthlocation_equal(a, b):
assert_array_equal(a.x, b.x)
assert_array_equal(a.y, b.y)
assert_array_equal(a.z, b.z)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon, b.lon)
class TimeType(AstropyAsdfType):
name = 'time/time'
version = '1.1.0'
supported_versions = ['1.0.0', AsdfSpec('>=1.1.0')]
types = ['astropy.time.core.Time']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
fmt = node.format
if fmt == 'byear':
node = time.Time(node, format='byear_str')
elif fmt == 'jyear':
node = time.Time(node, format='jyear_str')
elif fmt in ('fits', 'datetime', 'plot_date'):
node = time.Time(node, format='isot')
fmt = node.format
fmt = _astropy_format_to_asdf_format.get(fmt, fmt)
guessable_format = fmt in _guessable_formats
if node.scale == 'utc' and guessable_format and node.isscalar:
return node.value
d = {'value': node.value}
if not guessable_format:
d['format'] = fmt
if node.scale != 'utc':
d['scale'] = node.scale
if node.location is not None:
x, y, z = node.location.x, node.location.y, node.location.z
# Preserve backwards compatibility for writing the old schema
# This allows WCS to test backwards compatibility with old frames
# This code does get tested in CI, but we don't run a coverage test
if cls.version == '1.0.0': # pragma: no cover
unit = node.location.unit
d['location'] = {
'x': x.value,
'y': y.value,
'z': z.value,
'unit': unit
}
else:
d['location'] = {
# It seems like EarthLocations can be represented either in
# terms of Cartesian coordinates or latitude and longitude, so
# we rather arbitrarily choose the former for our representation
'x': x,
'y': y,
'z': z
}
return d
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, (str, list, np.ndarray)):
t = time.Time(node)
fmt = _astropy_format_to_asdf_format.get(t.format, t.format)
if fmt not in _guessable_formats:
raise ValueError(f"Invalid time '{node}'")
return t
value = node['value']
fmt = node.get('format')
scale = node.get('scale')
location = node.get('location')
if location is not None:
unit = location.get('unit', u.m)
# This ensures that we can read the v.1.0.0 schema and convert it
# to the new EarthLocation object, which expects Quantity components
for comp in ['x', 'y', 'z']:
if not isinstance(location[comp], Quantity):
location[comp] = Quantity(location[comp], unit=unit)
location = EarthLocation.from_geocentric(
location['x'], location['y'], location['z'])
return time.Time(value, format=fmt, scale=scale, location=location)
@classmethod
def assert_equal(cls, old, new):
assert old.format == new.format
assert old.scale == new.scale
if isinstance(old.location, EarthLocation):
assert isinstance(new.location, EarthLocation)
_assert_earthlocation_equal(old.location, new.location)
else:
assert old.location == new.location
assert_array_equal(old, new)
|
cb2e69ea73ab7109a83322afe3d232e53701244d391c1d9da58b0835032898cb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from asdf.tags.core import NDArrayType
from astropy.coordinates.spectral_coordinate import SpectralCoord
from astropy.io.misc.asdf.types import AstropyType
from astropy.io.misc.asdf.tags.unit.unit import UnitType
__all__ = ['SpectralCoordType']
class SpectralCoordType(AstropyType):
"""
ASDF tag implementation used to serialize/derialize SpectralCoord objects
"""
name = 'coordinates/spectralcoord'
types = [SpectralCoord]
version = '1.0.0'
@classmethod
def to_tree(cls, spec_coord, ctx):
node = {}
if isinstance(spec_coord, SpectralCoord):
node['value'] = spec_coord.value
node['unit'] = spec_coord.unit
if spec_coord.observer is not None:
node['observer'] = spec_coord.observer
if spec_coord.target is not None:
node['target'] = spec_coord.target
return node
raise TypeError(f"'{spec_coord}' is not a valid SpectralCoord")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, SpectralCoord):
return node
unit = UnitType.from_tree(node['unit'], ctx)
value = node['value']
observer = node['observer'] if 'observer' in node else None
target = node['target'] if 'observer' in node else None
if isinstance(value, NDArrayType):
value = value._make_array()
return SpectralCoord(value, unit=unit, observer=observer, target=target)
|
fa067dd31b23d43beef2f4b835970657c69c08ab321d0998d6703d79959595d5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import glob
import warnings
from asdf import tagged
import astropy.units as u
import astropy.coordinates
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.units import Quantity
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from astropy.io.misc.asdf.types import AstropyType
from astropy.io.misc.asdf.deprecation import create_asdf_deprecation_warning
__all__ = ['CoordType']
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'schemas', 'astropy.org', 'astropy'))
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split('-')
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ['baseframe', 'icrs']:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
warnings.warn(create_asdf_deprecation_warning())
frame = cls._tag_to_frame(node._tag)
data = node.get('data', None)
if data is not None:
return frame(node['data'], **node['frame_attributes'])
return frame(**node['frame_attributes'])
@classmethod
def to_tree_tagged(cls, frame, ctx):
warnings.warn(create_asdf_deprecation_warning())
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError("Can only save frames that are registered with the "
"transformation graph.")
node = {}
if frame.has_data:
node['data'] = frame.data
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node['frame_attributes'] = frame_attributes
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert u.allclose(new.data.lon, old.data.lon)
assert u.allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ['astropy']
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ['astropy.coordinates.ICRS']
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ['astropy']
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = Angle(node['ra']['wrap_angle'])
ra = Longitude(
node['ra']['value'],
unit=node['ra']['unit'],
wrap_angle=wrap_angle)
dec = Latitude(node['dec']['value'], unit=node['dec']['unit'])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node['ra'] = {
'value': frame.ra.value,
'unit': frame.ra.unit.to_string(),
'wrap_angle': wrap_angle
}
node['dec'] = {
'value': frame.dec.value,
'unit': frame.dec.unit.to_string()
}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert u.allclose(new.ra, old.ra)
assert u.allclose(new.dec, old.dec)
|
12e4b0cda3778eb026462d1b075eff6fc2c96159e6ecf1e5ad3691a2005aa8b2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import SkyCoord
from astropy.coordinates.tests.helper import skycoord_equal
from astropy.io.misc.asdf.types import AstropyType
class SkyCoordType(AstropyType):
name = 'coordinates/skycoord'
types = [SkyCoord]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, tree, ctx):
return SkyCoord.info._construct_from_dict(tree)
@classmethod
def assert_equal(cls, old, new):
assert skycoord_equal(old, new)
|
46da0e832a0d3f6bb0febde7af52df0c6f147c521e3aa1dca28e11896a936777 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.io.misc.asdf.tags.unit.quantity import QuantityType
__all__ = ['AngleType', 'LatitudeType', 'LongitudeType']
class AngleType(QuantityType):
name = "coordinates/angle"
types = [Angle]
requires = ['astropy']
version = "1.0.0"
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def from_tree(cls, node, ctx):
return Angle(super().from_tree(node, ctx))
class LatitudeType(AngleType):
name = "coordinates/latitude"
types = [Latitude]
@classmethod
def from_tree(cls, node, ctx):
return Latitude(super().from_tree(node, ctx))
class LongitudeType(AngleType):
name = "coordinates/longitude"
types = [Longitude]
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = node['wrap_angle']
return Longitude(super().from_tree(node, ctx), wrap_angle=wrap_angle)
@classmethod
def to_tree(cls, longitude, ctx):
tree = super().to_tree(longitude, ctx)
tree['wrap_angle'] = longitude.wrap_angle
return tree
|
7322fb61300b5316f94b8f15fdfd7a2bdbe4243e846314b14b4cee7a3a466938 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyType
class EarthLocationType(AstropyType):
name = 'coordinates/earthlocation'
types = [EarthLocation]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return EarthLocation.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
return (old == new).all()
|
7597eaf597b5134df065f880ceb2598b9d0d1ade1b9e1ad5f828aa9df38c9d12 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
def run_schema_example_test(organization, standard, name, version, check_func=None):
import asdf
from asdf.tests import helpers
from asdf.types import format_tag
from asdf.schema import load_schema
tag = format_tag(organization, standard, version, name)
uri = asdf.extension.default_extensions.extension_list.tag_mapping(tag)
r = asdf.extension.get_default_resolver()
examples = []
schema = load_schema(uri, resolver=r)
for node in asdf.treeutil.iter_tree(schema):
if (isinstance(node, dict) and
'examples' in node and
isinstance(node['examples'], list)):
for desc, example in node['examples']:
examples.append(example)
for example in examples:
buff = helpers.yaml_to_asdf('example: ' + example.strip())
ff = asdf.AsdfFile(uri=uri)
# Add some dummy blocks so that the ndarray examples work
for i in range(3):
b = asdf.block.Block(np.zeros((1024*1024*8), dtype=np.uint8))
b._used = True
ff.blocks.add(b)
ff._open_impl(ff, buff, mode='r')
if check_func:
check_func(ff)
|
f930af65a584f75e2ba4e35c573d4a64e267bff37cb47bf1aa42d5a7938e6c60 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.units.equivalencies import Equivalency
from astropy.units import equivalencies
from astropy.io.misc.asdf.types import AstropyType
class EquivalencyType(AstropyType):
name = "units/equivalency"
types = [Equivalency]
version = '1.0.0'
@classmethod
def to_tree(cls, equiv, ctx):
node = {}
if not isinstance(equiv, Equivalency):
raise TypeError(f"'{equiv}' is not a valid Equivalency")
eqs = []
for e, kwargs in zip(equiv.name, equiv.kwargs):
kwarg_names = list(kwargs.keys())
kwarg_values = list(kwargs.values())
eq = {'name': e, 'kwargs_names': kwarg_names, 'kwargs_values': kwarg_values}
eqs.append(eq)
return eqs
@classmethod
def from_tree(cls, node, ctx):
eqs = []
for eq in node:
equiv = getattr(equivalencies, eq['name'])
kwargs = dict(zip(eq['kwargs_names'], eq['kwargs_values']))
eqs.append(equiv(**kwargs))
return sum(eqs[1:], eqs[0])
@classmethod
def assert_equal(cls, a, b):
assert a == b
|
12ee8cefaac6b279a4d01c1a23fd67be323a173c0d6da2e8f85bbacfecca92d7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.units import Unit, UnitBase
from astropy.io.misc.asdf.types import AstropyAsdfType
class UnitType(AstropyAsdfType):
name = 'unit/unit'
types = ['astropy.units.UnitBase']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
if isinstance(node, str):
node = Unit(node, format='vounit', parse_strict='warn')
if isinstance(node, UnitBase):
return node.to_string(format='vounit')
raise TypeError(f"'{node}' is not a valid unit")
@classmethod
def from_tree(cls, node, ctx):
return Unit(node, format='vounit', parse_strict='silent')
|
c1efcdbddc7b38b22e259e5ad563d0063c0aff2c7c681f64fc352cfc2e1d51cc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from asdf.tags.core import NDArrayType
from astropy.units import Quantity
from astropy.io.misc.asdf.types import AstropyAsdfType
class QuantityType(AstropyAsdfType):
name = 'unit/quantity'
types = ['astropy.units.Quantity']
requires = ['astropy']
version = '1.1.0'
@classmethod
def to_tree(cls, quantity, ctx):
node = {}
if isinstance(quantity, Quantity):
node['value'] = quantity.value
node['unit'] = quantity.unit
return node
raise TypeError(f"'{quantity}' is not a valid Quantity")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, Quantity):
return node
unit = node['unit']
value = node['value']
if isinstance(value, NDArrayType):
value = value._make_array()
return Quantity(value, unit=unit)
|
11451e058de7073973e2ffd16006353ebc6845f07d781a499d60ff234bbee9e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import numpy as np
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.coordinates.tests.helper import skycoord_equal
from asdf.tests import helpers
from asdf.tags.core.ndarray import NDArrayType
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[('a', '<i4', (2, 2)),
('b', '<f8'),
('c', '|S1')])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[('a', [('a0', '<i4'), ('a1', '|S1')]),
('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')],
dtype=[('a', '<i4'), ('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version('2.8.0'):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff) as ff:
pass
assert 'Inconsistent data column lengths' in str(err.value)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t['a'] = [1, 2, 3]
t['b'] = ['x', 'y', 'z']
t['c'] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff['table']['c'], u.Quantity)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
def check(ff):
assert isinstance(ff['table']['c'], Time)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff['table']['c'], TimeDelta)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5')
def check(ff):
assert isinstance(ff['table']['c'], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new['a'], old['a'])
NDArrayType.assert_equal(new['b'], old['b'])
assert skycoord_equal(new['c'], old['c'])
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
tree_match_func=tree_match)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff['table']['c'], EarthLocation)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({'table': t}, tmpdir)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile['example'], table.Table)
run_schema_example_test('stsci.edu', 'asdf', 'core/table', '1.0.0', check)
|
02ae6f7db85b68e4710d11a9262f497a82a664c883f3114b076007ea2ee872af | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import os
import numpy as np
from astropy.io import fits
from asdf.tests import helpers
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_complex_structure(tmpdir):
with fits.open(os.path.join(
os.path.dirname(__file__), 'data', 'complex.fits'), memmap=False) as hdulist:
tree = {
'fits': hdulist
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array(
[(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {'fits': h}
def check_yaml(content):
assert b'!<tag:astropy.org:astropy/table/table-1.0.0>' in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile['example'], fits.HDUList)
run_schema_example_test('stsci.edu', 'asdf', 'fits/fits', '1.0.0', check)
|
c58dae81cda38da14675470931f75b012197d7c547457145913ee6277c693d63 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import warnings
from packaging.version import Version
import numpy as np
from asdf import util
from asdf.tests import helpers
from asdf import AsdfFile
import asdf
import astropy.units as u
from astropy.modeling.core import fix_inputs
from astropy.modeling import models as astmodels
from astropy.utils.compat.optional_deps import HAS_SCIPY
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ('a', 'b')
m.outputs = ('c',)
return m
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10., x_0=0.5, width=5.),
astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
astmodels.Const1D(amplitude=5.),
astmodels.Const2D(amplitude=5.),
astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4., theta=0.1),
astmodels.Exponential1D(amplitude=10., tau=3.5),
astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
astmodels.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=3., y_stddev=3.),
astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
astmodels.Logarithmic1D(amplitude=10., tau=3.5),
astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10., x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10., x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10., x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10., x_0=0.5, y_0=1.5, r_in=5., width=10.),
astmodels.Sersic1D(amplitude=10., r_eff=1., n=4.),
astmodels.Sersic2D(amplitude=10., r_eff=1., n=4., x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0),
astmodels.Sine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Cosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Tangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcSine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcCosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcTangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Trapezoid1D(amplitude=10., x_0=0.5, width=5., slope=1.),
astmodels.TrapezoidDisk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5., slope=1.),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10., fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.*u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.),
astmodels.LogParabola1D(amplitude=10, x_0=0.5, alpha=2., beta=3.,),
astmodels.PowerLaw1D(amplitude=10., x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(amplitude=10., x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(astmodels.Spline1D(np.array([-3., -3., -3., -3., -1., 0., 1., 3., 3., 3., 3.]),
np.array([0.10412331, 0.07013616, -0.18799552, 1.35953147, -0.15282581, 0.03923, -0.04297299, 0., 0., 0., 0.]),
3))
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [astmodels.Legendre2D(x_degree=1, y_degree=1,
c0_0=1, c0_1=2, c1_0=3,
fixed={'c1_0': True, 'c0_1': True},
bounds={'c0_0': (-10, 10)})]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version('2.6.0'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
@pytest.mark.slow
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
f'astropy.modeling.projections.Sky2Pix_{name}')(),
'backward': util.resolve_name(
f'astropy.modeling.projections.Pix2Sky_{name}')()
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.)},
tmpdir,
init_options={"version": standard_version}
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize("model", [
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2, c0_0=3, c1_0=5, c0_1=7, x_domain=[-2, 2], y_domain=[-4, 4],
x_window=[-6, 6], y_window=[-8, 8]
),
])
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree({"model": model}, tmpdir, init_options={"version": standard_version})
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_domain.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5,
domain=[-2, 2], window=[-0.5, 0.5])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2],
x_window=[-0.5, 0.5], y_window=[-0.1, 0.5])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_window.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1., 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(points, lookup_table=table,
bounds_error=False, fill_value=None,
method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
'compound': fix_inputs(model, {'x': 45}),
'compound1': fix_inputs(model, {0: 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type():
with pytest.raises(TypeError):
tree = {
'compound': fix_inputs(3, {'x': 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {
'compound': astmodels.Pix2Sky_TAN() & {'x': 45}
}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(('model'), [astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1)
])
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree['model'] = model
file_path = str(tmpdir.join('custom_and_analytical_inverse.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model'].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1*u.kg)
m1.input_units_equivalencies = {'x': u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10*u.Hz)
m2.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10*u.Hz)
m3.input_units_equivalencies = {'x': u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1*u.m, x_0=10*u.pix, alpha=7)
m4.input_units_equivalencies = {'x': u.equivalencies.pixel_scale(0.5*u.arcsec/u.pix)}
return[m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10*u.K, 11*u.arcsec, 12*u.arcsec)
m1.input_units_equivalencies = {'x': u.parallax()}
m2 = astmodels.Gaussian1D(5*u.s, 2*u.K, 3*u.K)
m2.input_units_equivalencies = {'x': u.temperature()}
return [m1|m2, m1&m2, m1+m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
cb62d43ff524c73e9af973b8c9949f0f918a204a683d011bdd7f339c956d4119 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.time import Time, TimeDelta
@pytest.mark.parametrize('fmt', TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('scale', list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
|
924509b787abbb176d3dfd7708e2b5bbaed9d20b9e21ce216cab281043c26348 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import datetime
import numpy as np
from astropy import time
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', dict())
else:
cursor = cursor.setdefault('properties', dict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100*u.m, y=0*u.m, z=0*u.m)
t = time.Time('J2000.000', location=location, format='jyear_str')
tree = {'time': t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
helpers.assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
isot = time.Time('2000-01-01T00:00:00.000')
tree = {
'time': isot
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
if isinstance(tree['time'], str):
assert str(tree['time']) == isot.value
elif isinstance(tree['time'], dict):
assert str(tree['time']['value']) == isot.value
assert str(tree['time']['base_format']) == "isot"
else:
assert False
def test_isot_array(tmpdir):
tree = {
'time': time.Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
34019a42407affffadbfad6804d11a3bf73d25d22eaf0ea2fd59887a9e4a88c7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, FK5, Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {'coord': ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {'coord': FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
1731cdb0d08f80e951e1ff7f3f33adfe079a6faebd56d03a420e07afd6e86e59 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.coordinates.angles import Longitude, Latitude
from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS
@pytest.fixture
def position():
lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(lat=34.4900*u.deg, lon=-104.221800*u.deg,
height=40*u.km)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation_site(tmpdir):
orig_sites = getattr(EarthLocation, '_site_registry', None)
try:
EarthLocation._get_site_registry(force_builtin=True)
rog = EarthLocation.of_site('greenwich')
tree = dict(location=rog)
assert_roundtrip_tree(tree, tmpdir)
finally:
EarthLocation._site_registry = orig_sites
|
b8c801bd16b8942a3d1b45d361fd39407de3032a7304dde4033cf6694fa8490b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, ICRS, Galactic, FK4, FK5, Longitude
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('coord', [
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
])
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame='icrs')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
representation_type='cartesian')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr,
pm_dec=1*u.mas/u.yr)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason='Apparent loss of precision during serialization')
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10*u.deg, 20*u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile['coord'], 'equinox')
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
5d8094ee32d50c366a902f321856c0a085986be42aa72b39e59bcdcb76867b8a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import astropy.units as u
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.coordinates import Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_angle(tmpdir):
tree = {'angle': Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {'angle': Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {'angle': Longitude(-100, u.deg, wrap_angle=180*u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
0afa77475af09680bc10ca78c99988fdc45ccbd76e1dea79686e8d415ad68248 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
from numpy.random import random, randint
import astropy.units as u
from astropy.coordinates import Angle
import astropy.coordinates.representation as r
from asdf.tests.helpers import assert_roundtrip_tree
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {'representation': representation}
assert_roundtrip_tree(tree, tmpdir)
|
6d6c87fe2ba40592460837f8cd1ebbd93918e82344a9f8c7720ac35a90aed9fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.coordinates import SpectralCoord, ICRS, Galactic
from astropy.tests.helper import assert_quantity_allclose
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree # noqa
def test_scalar_spectralcoord(tmpdir):
sc = SpectralCoord(565 * u.nm)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, 565 * u.nm)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_vector_spectralcoord(tmpdir):
sc = SpectralCoord([100, 200, 300] * u.GHz)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, [100, 200, 300] * u.GHz)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check, tree_match_func=assert_quantity_allclose)
@pytest.mark.filterwarnings("ignore:No velocity")
def test_spectralcoord_with_obstarget(tmpdir):
sc = SpectralCoord(10 * u.GHz,
observer=ICRS(1 * u.km, 2 * u.km, 3 * u.km, representation_type='cartesian'),
target=Galactic(10 * u.deg, 20 * u.deg, distance=30 * u.pc))
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, 10 * u.GHz)
assert isinstance(asdffile['spectralcoord'].observer, ICRS)
assert isinstance(asdffile['spectralcoord'].target, Galactic)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
|
5a4658130fa9f7e483439b257eae715f6294051fbf311c8363be89800d614590 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import io
from astropy import units as u
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
41fa8f95905a33593f151763343107c2d1a0d9b536268bd92a99445b03af9183 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip('asdf')
import io
from astropy import units
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = f"""
quantity: !unit/quantity-1.1.0
value: {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{testval}
unit: {testunit}
"""
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
d0e3705fc939a90e355fffe8bc92c9c20d127e332b5260a2ef4983df04ba8ddb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import units as u
from astropy.units import equivalencies as eq
from astropy.cosmology import Planck15
asdf = pytest.importorskip('asdf', minversion='2.3.0.dev0')
from asdf.tests import helpers
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [eq.plate_scale(.3 * u.deg/u.mm), eq.pixel_scale(.5 * u.deg/u.pix),
eq.pixel_scale(100. * u.pix/u.cm),
eq.spectral_density(350 * u.nm, factor=2),
eq.spectral_density(350 * u.nm), eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.temperature_energy(), eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr), eq.mass_energy(),
eq.molar_mass_amu(), eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm), eq.doppler_radio(2 * u.Hz),
eq.parallax(), eq.logarithmic(), eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)),
(eq.spectral() + eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr))
]
@pytest.mark.parametrize('equiv', get_equivalencies())
def test_equivalencies(tmpdir, equiv):
tree = {'equiv': equiv}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
ef79ba61ba1d3a949b2df96521c294109c158d4acf316f6072232547788909ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test :mod:`astropy.io.registry`.
.. todo::
Don't rely on Table for tests
"""
import contextlib
import os
from collections import Counter
from copy import copy, deepcopy
from io import StringIO
import pytest
import numpy as np
import astropy.units as u
from astropy.io import registry as io_registry
from astropy.io.registry import (IORegistryError, UnifiedInputRegistry,
UnifiedIORegistry, UnifiedOutputRegistry, compat)
from astropy.io.registry.base import _UnifiedIORegistryBase
from astropy.io.registry.compat import default_registry
from astropy.table import Table
###############################################################################
# pytest setup and fixtures
class UnifiedIORegistryBaseSubClass(_UnifiedIORegistryBase):
"""Non-abstract subclass of UnifiedIORegistryBase for testing."""
def get_formats(self, data_class=None):
return None
class EmptyData:
"""
Thing that can read and write.
Note that the read/write are the compatibility methods, which allow for the
kwarg ``registry``. This allows us to not subclass ``EmptyData`` for each
of the types of registry (read-only, ...) and use this class everywhere.
"""
read = classmethod(io_registry.read)
write = io_registry.write
class OtherEmptyData:
"""A different class with different I/O"""
read = classmethod(io_registry.read)
write = io_registry.write
def empty_reader(*args, **kwargs):
return EmptyData()
def empty_writer(table, *args, **kwargs):
return "status: success"
def empty_identifier(*args, **kwargs):
return True
@pytest.fixture
def fmtcls1():
return ("test1", EmptyData)
@pytest.fixture
def fmtcls2():
return ("test2", EmptyData)
@pytest.fixture(params=["test1", "test2"])
def fmtcls(request):
yield (request.param, EmptyData)
@pytest.fixture
def original():
ORIGINAL = {}
ORIGINAL["readers"] = deepcopy(default_registry._readers)
ORIGINAL["writers"] = deepcopy(default_registry._writers)
ORIGINAL["identifiers"] = deepcopy(default_registry._identifiers)
return ORIGINAL
###############################################################################
def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1]
def test_IORegistryError():
with pytest.raises(IORegistryError, match="just checking"):
raise IORegistryError("just checking")
class TestUnifiedIORegistryBase:
"""Test :class:`astropy.io.registry.UnifiedIORegistryBase`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistryBaseSubClass
@pytest.fixture
def registry(self):
"""I/O registry. Cleaned before and after each function."""
registry = self._cls()
HAS_READERS = hasattr(registry, "_readers")
HAS_WRITERS = hasattr(registry, "_writers")
# copy and clear original registry
ORIGINAL = {}
ORIGINAL["identifiers"] = deepcopy(registry._identifiers)
registry._identifiers.clear()
if HAS_READERS:
ORIGINAL["readers"] = deepcopy(registry._readers)
registry._readers.clear()
if HAS_WRITERS:
ORIGINAL["writers"] = deepcopy(registry._writers)
registry._writers.clear()
yield registry
registry._identifiers.clear()
registry._identifiers.update(ORIGINAL["identifiers"])
if HAS_READERS:
registry._readers.clear()
registry._readers.update(ORIGINAL["readers"])
if HAS_WRITERS:
registry._writers.clear()
registry._writers.update(ORIGINAL["writers"])
# ===========================================
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
# defaults
assert registry.get_formats() is None
# (kw)args don't matter
assert registry.get_formats(data_class=24) is None
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
# TODO! figure out what can be tested
with registry.delay_doc_updates(EmptyData):
registry.register_identifier(*fmtcls1, empty_identifier)
def test_register_identifier(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_identifier()``."""
# initial check it's not registered
assert fmtcls1 not in registry._identifiers
assert fmtcls2 not in registry._identifiers
# register
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls2, empty_identifier)
assert fmtcls1 in registry._identifiers
assert fmtcls2 in registry._identifiers
def test_register_identifier_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_identifier()`` twice."""
fmt, cls = fmtcls
registry.register_identifier(fmt, cls, empty_identifier)
with pytest.raises(IORegistryError) as exc:
registry.register_identifier(fmt, cls, empty_identifier)
assert (
str(exc.value) == f"Identifier for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_identifier_force(self, registry, fmtcls1):
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls1, empty_identifier, force=True)
assert fmtcls1 in registry._identifiers
# -----------------------
def test_unregister_identifier(self, registry, fmtcls1):
"""Test ``registry.unregister_identifier()``."""
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
registry.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_unregister_identifier_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_identifier()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_identifier(fmt, cls)
assert (
str(exc.value) == f"No identifier defined for format '{fmt}' "
f"and class '{cls.__name__}'"
)
def test_identify_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), {})
# test no formats to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# ===========================================
# Compat tests
def test_compat_register_identifier(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._identifiers
compat.register_identifier(*fmtcls1, empty_identifier, registry=registry)
assert fmtcls1 in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
try:
compat.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._identifiers
finally:
default_registry._identifiers.pop(fmtcls1)
def test_compat_unregister_identifier(self, registry, fmtcls1):
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
compat.unregister_identifier(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
default_registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in default_registry._identifiers
compat.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_compat_identify_format(self, registry, fmtcls1):
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), dict())
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
formats = compat.identify_format(*args, registry=registry)
assert fmt in formats
# without registry specified it becomes default_registry
if registry is not default_registry:
try:
default_registry.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
formats = compat.identify_format(*args)
assert fmt in formats
finally:
default_registry.unregister_identifier(*fmtcls1)
@pytest.mark.skip("TODO!")
def test_compat_get_formats(self, registry, fmtcls1):
assert False
@pytest.mark.skip("TODO!")
def test_compat_delay_doc_updates(self, registry, fmtcls1):
assert False
class TestUnifiedInputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedInputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedInputRegistry
# ===========================================
def test_inherited_read_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _read():
return EmptyData()
def _read1():
return Child1()
# check that reader gets inherited
registry.register_reader("test", EmptyData, _read)
assert registry.get_reader("test", Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
registry.register_reader("test", Child1, _read1)
assert registry.get_reader("test", Child2) is _read1
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
with registry.delay_doc_updates(EmptyData):
registry.register_reader("test", EmptyData, empty_reader)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs)
if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iread = docs[ihd].index("Read") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert docs[-1][ifmt : ifmt + 5] == "test"
assert docs[-1][iread : iread + 3] != "Yes"
# now test it's updated
docs = EmptyData.read.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 2
iread = docs[ihd].index("Read") + 1
assert docs[-2][ifmt : ifmt + 4] == "test"
assert docs[-2][iread : iread + 3] == "Yes"
def test_identify_read_format(self, registry):
"""Test ``registry.identify_format()``."""
args = ("read", EmptyData, None, None, (None,), dict())
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a reader, it returns True for all
registry.register_identifier("test", EmptyData, empty_identifier)
formats = registry.identify_format(*args)
assert "test" in formats
# -----------------------
def test_register_reader(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_reader()``."""
# initial check it's not registered
assert fmtcls1 not in registry._readers
assert fmtcls2 not in registry._readers
# register
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls2, empty_reader)
assert fmtcls1 in registry._readers
assert fmtcls2 in registry._readers
assert registry._readers[fmtcls1] == (empty_reader, 0) # (f, priority)
assert registry._readers[fmtcls2] == (empty_reader, 0) # (f, priority)
def test_register_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
registry.register_reader(fmt, cls, empty_reader)
with pytest.raises(IORegistryError) as exc:
registry.register_reader(fmt, cls, empty_reader)
assert (
str(exc.value) == f"Reader for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_reader_force(self, registry, fmtcls1):
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls1, empty_reader, force=True)
assert fmtcls1 in registry._readers
def test_register_readers_with_same_name_on_different_classes(self, registry):
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
registry.register_reader("test", EmptyData, lambda: EmptyData())
registry.register_reader("test", OtherEmptyData, lambda: OtherEmptyData())
t = EmptyData.read(format="test", registry=registry)
assert isinstance(t, EmptyData)
tbl = OtherEmptyData.read(format="test", registry=registry)
assert isinstance(tbl, OtherEmptyData)
# -----------------------
def test_unregister_reader(self, registry, fmtcls1):
"""Test ``registry.unregister_reader()``."""
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
registry.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_unregister_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.unregister_reader(*fmtcls1)
assert (
str(exc.value) == f"No reader defined for format '{fmt}' and "
f"class '{cls.__name__}'"
)
# -----------------------
def test_get_reader(self, registry, fmtcls):
"""Test ``registry.get_reader()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError):
registry.get_reader(fmt, cls)
registry.register_reader(fmt, cls, empty_reader)
reader = registry.get_reader(fmt, cls)
assert reader is empty_reader
def test_get_reader_invalid(self, registry, fmtcls):
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.get_reader(fmt, cls)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_read_noformat(self, registry, fmtcls1):
"""Test ``registry.read()`` when there isn't a reader."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary_file(self, tmpdir, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._readers.update(original["readers"])
testfile = str(tmpdir.join("foo.example"))
with open(testfile, "w") as f:
f.write("Hello world")
with pytest.raises(IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_toomanyformats(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
cls.read(registry=registry)
assert str(exc.value) == (f"Format is ambiguous - options are: {fmt1}, {fmt2}")
def test_read_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
counter = Counter()
def counting_reader1(*args, **kwargs):
counter[fmt1] += 1
return cls()
def counting_reader2(*args, **kwargs):
counter[fmt2] += 1
return cls()
registry.register_reader(fmt1, cls, counting_reader1, priority=1)
registry.register_reader(fmt2, cls, counting_reader2, priority=2)
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
cls.read(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_read_format_noreader(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_read_identifier(self, tmpdir, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(
fmt1, cls, lambda o, path, fileobj, *x, **y: path.endswith("a")
)
registry.register_identifier(
fmt2, cls, lambda o, path, fileobj, *x, **y: path.endswith("b")
)
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmpdir.join("testfile.a").strpath
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt1}' and class '{cls.__name__}'"
)
filename = tmpdir.join("testfile.b").strpath
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_read_valid_return(self, registry, fmtcls):
fmt, cls = fmtcls
registry.register_reader(fmt, cls, empty_reader)
t = cls.read(format=fmt, registry=registry)
assert isinstance(t, cls)
def test_read_non_existing_unknown_ext(self, fmtcls1):
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = fmtcls1[1].read("non-existing-file-with-unknown.ext")
def test_read_directory(self, tmpdir, registry, fmtcls1):
"""
Regression test for a bug that caused the I/O registry infrastructure to
not work correctly for datasets that are represented by folders as
opposed to files, when using the descriptors to add read/write methods.
"""
_, cls = fmtcls1
registry.register_identifier(
"test_folder_format", cls, lambda o, *x, **y: o == "read"
)
registry.register_reader("test_folder_format", cls, empty_reader)
filename = tmpdir.mkdir("folder_dataset").strpath
# With the format explicitly specified
dataset = cls.read(filename, format="test_folder_format", registry=registry)
assert isinstance(dataset, cls)
# With the auto-format identification
dataset = cls.read(filename, registry=registry)
assert isinstance(dataset, cls)
# ===========================================
# Compat tests
def test_compat_register_reader(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._readers
compat.register_reader(*fmtcls1, empty_reader, registry=registry)
assert fmtcls1 in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
try:
compat.register_reader(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._readers
finally:
default_registry._readers.pop(fmtcls1)
def test_compat_unregister_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
compat.unregister_reader(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
default_registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in default_registry._readers
compat.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_compat_get_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1, registry=registry)
assert reader is empty_reader
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1)
assert reader is empty_reader
default_registry.unregister_reader(*fmtcls1)
def test_compat_read(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt, registry=registry)
assert isinstance(t, cls)
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt)
assert isinstance(t, cls)
default_registry.unregister_reader(*fmtcls1)
class TestUnifiedOutputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedOutputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedOutputRegistry
# ===========================================
def test_inherited_write_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _write():
return EmptyData()
def _write1():
return Child1()
# check that writer gets inherited
registry.register_writer("test", EmptyData, _write)
assert registry.get_writer("test", Child2) is _write
# check that nearest ancestor is identified
# (i.e. that the writer for Child2 is the registered method
# for Child1, and not Table)
registry.register_writer("test", Child1, _write1)
assert registry.get_writer("test", Child2) is _write1
# ===========================================
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
fmt, cls = fmtcls1
with registry.delay_doc_updates(EmptyData):
registry.register_writer(*fmtcls1, empty_writer)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs)
if ("Format" in s)][0]
ifmt = docs[ihd].index("Format")
iwrite = docs[ihd].index("Write") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert fmt in docs[-1][ifmt : ifmt + len(fmt) + 1]
assert docs[-1][iwrite : iwrite + 3] != "Yes"
# now test it's updated
docs = EmptyData.write.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 1
iwrite = docs[ihd].index("Write") + 2
assert fmt in docs[-2][ifmt : ifmt + len(fmt) + 1]
assert docs[-2][iwrite : iwrite + 3] == "Yes"
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_identify_write_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = ("write", cls, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a writer, it returns True for all
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# -----------------------
def test_register_writer(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_writer()``."""
# initial check it's not registered
assert fmtcls1 not in registry._writers
assert fmtcls2 not in registry._writers
# register
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls2, empty_writer)
assert fmtcls1 in registry._writers
assert fmtcls2 in registry._writers
def test_register_writer_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_writer()`` twice."""
fmt, cls = fmtcls
registry.register_writer(fmt, cls, empty_writer)
with pytest.raises(IORegistryError) as exc:
registry.register_writer(fmt, cls, empty_writer)
assert (
str(exc.value) == f"Writer for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_writer_force(self, registry, fmtcls1):
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls1, empty_writer, force=True)
assert fmtcls1 in registry._writers
# -----------------------
def test_unregister_writer(self, registry, fmtcls1):
"""Test ``registry.unregister_writer()``."""
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in registry._writers
def test_unregister_writer_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_writer()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_writer(fmt, cls)
assert (
str(exc.value) == f"No writer defined for format '{fmt}' "
f"and class '{cls.__name__}'"
)
# -----------------------
def test_get_writer(self, registry, fmtcls1):
"""Test ``registry.get_writer()``."""
with pytest.raises(IORegistryError):
registry.get_writer(*fmtcls1)
registry.register_writer(*fmtcls1, empty_writer)
writer = registry.get_writer(*fmtcls1)
assert writer is empty_writer
def test_get_writer_invalid(self, registry, fmtcls1):
"""Test invalid ``registry.get_writer()``."""
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.get_writer(fmt, cls)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_write_noformat(self, registry, fmtcls1):
"""Test ``registry.write()`` when there isn't a writer."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary_file(self, tmpdir, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._writers.update(original["writers"])
testfile = str(tmpdir.join("foo.example"))
with pytest.raises(IORegistryError) as exc:
Table().write(testfile, registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_toomanyformats(self, registry, fmtcls1, fmtcls2):
registry.register_identifier(*fmtcls1, lambda o, *x, **y: True)
registry.register_identifier(*fmtcls2, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value) == (
f"Format is ambiguous - options are: {fmtcls1[0]}, {fmtcls2[0]}"
)
def test_write_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls1 = fmtcls1
fmt2, cls2 = fmtcls2
counter = Counter()
def counting_writer1(*args, **kwargs):
counter[fmt1] += 1
def counting_writer2(*args, **kwargs):
counter[fmt2] += 1
registry.register_writer(fmt1, cls1, counting_writer1, priority=1)
registry.register_writer(fmt2, cls2, counting_writer2, priority=2)
registry.register_identifier(fmt1, cls1, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls2, lambda o, *x, **y: True)
cls1().write(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_write_format_nowriter(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_write_identifier(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: x[0].startswith("a"))
registry.register_identifier(fmt2, cls, lambda o, *x, **y: x[0].startswith("b"))
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(IORegistryError) as exc:
cls().write("abc", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write("bac", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_write_return(self, registry, fmtcls1):
"""Most writers will return None, but other values are not forbidden."""
fmt, cls = fmtcls1
registry.register_writer(fmt, cls, empty_writer)
res = cls.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# ===========================================
# Compat tests
def test_compat_register_writer(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._writers
compat.register_writer(*fmtcls1, empty_writer, registry=registry)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
try:
compat.register_writer(*fmtcls1, empty_writer)
except Exception:
pass
else:
assert fmtcls1 in default_registry._writers
finally:
default_registry._writers.pop(fmtcls1)
def test_compat_unregister_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
compat.unregister_writer(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._writers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
compat.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_get_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
writer = compat.get_writer(*fmtcls1, registry=registry)
assert writer is empty_writer
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
writer = compat.get_writer(*fmtcls1)
assert writer is empty_writer
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_write(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
res = compat.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
res = compat.write(cls(), format=fmt)
assert res == "status: success"
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
class TestUnifiedIORegistry(TestUnifiedInputRegistry, TestUnifiedOutputRegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistry
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
# -----------------------
def test_identifier_origin(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: o == "read")
registry.register_identifier(fmt2, cls, lambda o, *x, **y: o == "write")
registry.register_reader(fmt1, cls, empty_reader)
registry.register_writer(fmt2, cls, empty_writer)
# There should not be too many formats defined
cls.read(registry=registry)
cls().write(registry=registry)
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt2, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt1, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
class TestDefaultRegistry(TestUnifiedIORegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = lambda *args: default_registry
# =============================================================================
# Test compat
# much of this is already tested above since EmptyData uses io_registry.X(),
# which are the compat methods.
def test_dir():
"""Test all the compat methods are in the directory"""
dc = dir(compat)
for n in compat.__all__:
assert n in dc
def test_getattr():
for n in compat.__all__:
assert hasattr(compat, n)
with pytest.raises(AttributeError, match="module 'astropy.io.registry.compat'"):
compat.this_is_definitely_not_in_this_module
# =============================================================================
# Table tests
def test_read_basic_table():
registry = Table.read._registry
data = np.array(
list(zip([1, 2, 3], ["a", "b", "c"])), dtype=[("A", int), ("B", "|U1")]
)
try:
registry.register_reader("test", Table, lambda x: Table(x))
except Exception:
pass
else:
t = Table.read(data, format="test")
assert t.keys() == ["A", "B"]
for i in range(3):
assert t["A"][i] == data["A"][i]
assert t["B"][i] == data["B"][i]
finally:
registry._readers.pop("test", None)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
@pytest.fixture(autouse=True)
def registry(self):
"""I/O registry. Not cleaned."""
yield
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ["a b", "1 2"]
mt = MyTable.read(data, format="ascii")
t = Table.read(data, format="ascii")
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=["a", "b"])
mt.write(buffer, format="ascii")
assert buffer.getvalue() == os.linesep.join(["a b", "1 2", ""])
def test_read_table_subclass_with_columns_attributes(self, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/7181"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=["a"])
mt["a"].unit = u.m
mt["a"].format = ".4f"
mt["a"].description = "hello"
testfile = str(tmpdir.join("junk.fits"))
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t["a"].unit == u.m
assert t["a"].format == "{:13.4f}"
assert t["a"].description == "hello"
|
2ad2713d225c25c77c8f13b0da0540b170f75d1e36734aafa51484bf556fe830 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
from unittest import mock
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable import tree
from astropy.io.votable.exceptions import VOTableSpecError, VOWarning, W39
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table = parse_single_table(get_pkg_data_filename('data/regression.xml'))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
table2 = parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=1)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=3)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/regression.xml'),
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
(('string test', 'string_test'), '|O8'),
(('fixed string test', 'string_test_2'), '<U10'),
('unicode_test', '|O8'),
(('unicode test', 'fixed_unicode_test'), '<U10'),
(('string array test', 'string_array_test'), '<U4'),
('unsignedByte', '|u1'),
('short', '<i2'),
('int', '<i4'),
('long', '<i8'),
('double', '<f8'),
('float', '<f4'),
('array', '|O8'),
('bit', '|b1'),
('bitarray', '|b1', (3, 2)),
('bitvararray', '|O8'),
('bitvararray2', '|O8'),
('floatComplex', '<c8'),
('doubleComplex', '<c16'),
('doubleComplexArray', '|O8'),
('doubleComplexArrayFixed', '<c16', (2,)),
('boolean', '|b1'),
('booleanArray', '|b1', (4,)),
('nulls', '<i4'),
('nulls_array', '<i4', (2, 2)),
('precision1', '<f8'),
('precision2', '<f8'),
('doublearray', '|O8'),
('bitarray2', '|b1', (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace('<', '>')
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
f'data/regression.bin.tabledata.truth.{votable.version}.xml'), encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")), encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail('legacy_float_repr')
def test_regression(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, False)
@pytest.mark.xfail('legacy_float_repr')
def test_regression_python_based_parser(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, True)
@pytest.mark.xfail('legacy_float_repr')
def test_regression_binary2(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.table = parse(
get_pkg_data_filename('data/regression.xml')).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table() # noqa
array = table.array
mask = table.array.mask
assert array['string_test'][0] == "String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table() # noqa
array = table.array
mask = table.array.mask
assert array['string_test'][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
['String & test', 'String & test', 'XXXX', '', ''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.unicode_)
assert_array_equal(
self.array['string_test_2'],
['Fixed stri', '0123456789', 'XXXX', '', ''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename('data/regression.xml'))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
# https://github.com/astropy/astropy/issues/13341
@np.errstate(over="ignore")
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if (filename.endswith('custom_datatype.xml') or
filename.endswith('timesys_errors.xml')):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename('data/too_many_columns.xml.gz'))
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", name='filename', datatype="char",
arraysize='1'),
tree.Field(votable, ID="matrix", name='matrix', datatype="double",
arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[('filename', '?'),
('matrix', '?', (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'), encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
@mock.patch('subprocess.Popen')
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ok', 'ko'),
'returncode': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename('data/empty_table.xml'),
xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename('data/regression.xml'))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'))
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'),
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'), encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(get_pkg_data_filename('data/custom_datatype.xml'),
datatype_mapping={'bar': 'int'})
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id('time_frame')
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == 'TCB'
assert timesys.refposition == 'BARYCENTER'
timesys = votable.get_timesys_by_id('mjd_origin')
assert timesys.timeorigin == 'MJD-origin'
assert timesys.timescale == 'TDB'
assert timesys.refposition == 'EMBARYCENTER'
timesys = votable.get_timesys_by_id('jd_origin')
assert timesys.timeorigin == 'JD-origin'
assert timesys.timescale == 'TT'
assert timesys.refposition == 'HELIOCENTER'
timesys = votable.get_timesys_by_id('no_origin')
assert timesys.timeorigin is None
assert timesys.timescale == 'UTC'
assert timesys.refposition == 'TOPOCENTER'
def test_timesys():
votable = parse(get_pkg_data_filename('data/timesys.xml'))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename('data/timesys.xml'))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename('data/timesys_errors.xml'), output,
xmllint=False)
outstr = output.getvalue()
assert("E23: Invalid timeorigin attribute 'bad-origin'" in outstr)
assert("E22: ID attribute is required for all TIMESYS elements" in outstr)
assert("W48: Unknown attribute 'refposition_mispelled' on TIMESYS"
in outstr)
|
87a8ec84bd3e5dbc5db1ae78dd3c57687b938484353e17ca43beb56f43aeea91 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import pytest
import numpy as np
from astropy.config import set_temp_config, reload_config
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from astropy.io.votable.table import parse, writeto
from astropy.io.votable import tree, conf, validate
from astropy.io.votable.exceptions import VOWarning, W39, E25
from astropy.table import Column, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
def test_table(tmpdir):
# Read the VOTABLE
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
votable = parse(get_pkg_data_filename('data/regression.xml'))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype'], f'{name} expected {d["datatype"]} but get {field.datatype}' # noqa
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
# W39: Bit values can not be masked
with pytest.warns(W39):
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
# Issue 8354
assert t['float'].format is None
fn = os.path.join(str(tmpdir), "table_interface.xml")
# W39: Bit values can not be masked
with pytest.warns(W39):
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_pass_kwargs_through_table_interface():
# Table.read() should pass on keyword arguments meant for parse()
filename = get_pkg_data_filename('data/nonstandard_units.xml')
t = Table.read(filename, format='votable', unit_format='generic')
assert t['Flux1'].unit == Unit("erg / (Angstrom cm2 s)")
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_explicit_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9',
'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_write_overwrite(tmpdir):
t = simple_table(3, 3)
filename = os.path.join(tmpdir, 'overwrite_test.vot')
t.write(filename, format='votable')
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format='votable')
t.write(filename, format='votable', overwrite=True)
def test_empty_table():
votable = parse(get_pkg_data_filename('data/empty_table.xml'))
table = votable.get_first_table()
astropy_table = table.to_table() # noqa
def test_no_field_not_empty_table():
votable = parse(get_pkg_data_filename('data/no_field_not_empty_table.xml'))
table = votable.get_first_table()
assert len(table.fields) == 0
assert len(table.infos) == 1
def test_no_field_not_empty_table_exception():
with pytest.raises(E25):
parse(get_pkg_data_filename('data/no_field_not_empty_table.xml'), verify='exception')
def test_binary2_masked_strings():
"""
Issue #8995
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename('data/binary2_masked_strings.xml'))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask['epoch_photometry_url'])
output = io.BytesIO()
astropy_table.write(output, format='votable')
def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename('data/regression.xml')
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False
def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename('data/valid_votable.xml')
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
parse(get_pkg_data_filename('data/gemini.xml'))
# Then try the various explicit options
def test_verify_ignore(self):
parse(get_pkg_data_filename('data/gemini.xml'), verify='ignore')
def test_verify_warn(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), verify='warn')
assert len(w) == 24
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), verify='exception')
# Make sure the deprecated pedantic option still works for now
def test_pedantic_false(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=False)
assert len(w) == 25
def test_pedantic_true(self):
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=True)
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp('verify', 'ignore'):
parse(get_pkg_data_filename('data/gemini.xml'))
def test_conf_verify_warn(self):
with conf.set_temp('verify', 'warn'):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 24
def test_conf_verify_exception(self):
with conf.set_temp('verify', 'exception'):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
# And make sure the old configuration item will keep working
def test_conf_pedantic_false(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = False')
reload_config('astropy.io.votable')
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 25
def test_conf_pedantic_true(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = True')
reload_config('astropy.io.votable')
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
|
7db14155486c626e0ba7da6602689a0783cf053a9321265ebf2f88082564b576 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from astropy.io.votable import table
from astropy.io.votable import exceptions
from astropy.io.votable import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write(f'FAILED: {reason}\n'.encode())
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail(f"HTTPException: {str(e)}")
return
except (socket.timeout, OSError) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify='warn', filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
["java", "-jar", path_to_stilts_jar, "votlint", "validate=false", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
f'{warning_code}: {warning_descr}',
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
f'{exception_code}: {exception_descr}',
exc, ['ul', 'li']))
return tables
|
01531132e7ce6d6744bc260b9356f72f2490583432a7f6c69ca088af471c911f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# test helper.run_tests function
from astropy import test as run_tests
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with pytest.raises(ValueError):
run_tests(package='fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with pytest.raises(ValueError):
run_tests(pastebin='not_an_option')
def test_unicode_literal_conversion():
assert isinstance('ångström', str)
|
812294f75396e8cb4e789b4f0207b102756c26549f791857db7b94c208a09d51 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.table import Table, Column
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31',
'2015-01-21T12:30:32',
'2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=['a', 'b', 'c'])
CSV_FILE = get_pkg_data_filename('data/sampled.csv')
def test_empty_initialization():
ts = TimeSeries()
ts['time'] = Time([50001, 50002, 50003], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'flux'")
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts['time'] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts['a'], [10, 2, 3])
assert_equal(ts['b'], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(TypeError) as exc:
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Either 'time' or 'time_start' should be specified"
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ['time', 'a', 'b', 'c']
def test_initialization_with_time_delta():
ts = TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format='sec'),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, ['2018-07-01T10:10:10.000',
'2018-07-01T10:10:13.000',
'2018-07-01T10:10:16.000'])
def test_initialization_missing_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time' is scalar, so 'time_delta' is required"
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Cannot specify both 'time' and 'time_start'"
def test_initialization_invalid_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time_delta' should be a Quantity or a TimeDelta"
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data['time'] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == {'time', 'a', 'b', 'c'}
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert set(ts2.colnames) == {'time', 'a'}
assert all(ts2.time == INPUT_TIME)
with pytest.raises(TypeError) as exc:
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError) as exc:
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
assert exc.value.args[0] == ("'n_samples' has been given both and it is not the "
"same length as the input data.")
def test_initialization_length_mismatch():
with pytest.raises(ValueError) as exc:
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=['a', 'b'])
assert exc.value.args[0] == "Length of 'time' (3) should match data length (2)"
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format='sec'))
assert exc.value.args[0] == ("'time_delta' should not be specified since "
"'time' is an array")
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1.2, 0.6, -1.6, 1.4], rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-0.6, 0.4, 1.4, 0.0, 1.0, 0.8], rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, 2, 0.6, 1.6, 1.4], rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, -1.4, -0.4, 1.4, -0.8, -1.0], rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, 1.8, 2.8, 1.4, 2.4, 2.2], rtol=1e-6)
# Now repeat the above tests but with normalization applied
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, -1.2/3.2, 0.6/3.2, -1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'),
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[-0.6/3.2, 0.4/3.2, 1.4/3.2, 0.0/3.2, 1.0/3.2, 0.8/3.2],
rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=1, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, 2/3.2, 0.6/3.2, 1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, -1.4/3.2, -0.4/3.2, 1.4/3.2, -0.8/3.2, -1.0/3.2],
rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, wrap_phase=1,
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, 1.8/3.2, 2.8/3.2, 1.4/3.2, 2.4/3.2, 2.2/3.2],
rtol=1e-6)
def test_fold_invalid_options():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2)
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2 * u.m)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a dimensionless Quantity '
'or a float when normalize_phase=True'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(u.UnitsError,
match='wrap_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2)
with pytest.raises(u.UnitsError,
match='wrap_phase should be dimensionless when '
'normalize_phase=True'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-4.2 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=2.2, normalize_phase=True)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1['a'] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ['time', 'a']
assert len(ts.indices) == 1
assert (ts.indices['time'].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale='tcb')
assert ts_tcb.time.scale == 'tcb'
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(['a'])
assert (df1['a'] == df2['a']).all()
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(None)
assert exc.value.args[0] == 'Input should be a pandas DataFrame'
df4 = pandas.DataFrame()
df4['a'] = [1, 2, 3]
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(df4)
assert exc.value.args[0] == 'DataFrame does not have a DatetimeIndex'
def test_read_time_missing():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_column`` should be provided since the default Table readers are being used.'
def test_read_time_wrong():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, time_column='abc', format='csv')
assert exc.value.args[0] == "Time column 'abc' not found in the input data."
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column='Date', format='csv')
assert timeseries.colnames == ['time', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
assert len(timeseries) == 11
assert timeseries['time'].format == 'iso'
assert timeseries['A'].sum() == 266.5
@pytest.mark.remote_data(source='astropy')
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits')
with pytest.warns(UnitsWarning):
timeseries = TimeSeries.read(filename, format='kepler.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source='astropy')
def test_tess_astropy():
filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits')
with pytest.warns(UserWarning, match='Ignoring 815 rows with NaN times'):
timeseries = TimeSeries.read(filename, format='tess.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
with pytest.raises(ValueError) as exc:
ts.copy().add_column(Column([3, 4, 5], name='c'), index=0)
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'c'")
with pytest.raises(ValueError) as exc:
ts.copy().add_columns([Column([3, 4, 5], name='d'),
Column([3, 4, 5], name='e')], indexes=[0, 1])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'd'")
with pytest.raises(ValueError) as exc:
ts.copy().keep_columns(['a', 'b'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_column('time')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_columns(['time', 'a'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'b'")
with pytest.raises(ValueError) as exc:
ts.copy().rename_column('time', 'banana')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'banana'")
# https://github.com/astropy/astropy/issues/13009
ts_2cols_required = ts.copy()
ts_2cols_required._required_columns = ['time', 'a']
with pytest.raises(ValueError) as exc:
ts_2cols_required.remove_column('a')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"['time', 'a'] as the first columns but found ['time', 'b']")
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
f345aa9863d2bdfdc87b5482fc6332988bd0be42e1c25160e0f72186eb5a49b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["bls_fast", "bls_slow"]
import numpy as np
from functools import partial
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration,
oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(
t, y, ivar, period, duration, oversample, use_likelihood
)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result"""
best = (-np.inf, None)
hp = 0.5*period
min_t = np.min(t)
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period+d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t-min_t-t0+hp) % period - hp) < 0.5*dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5*np.sum((y_in - y[m_in])**2 * ivar[m_in])
loglike += 0.5*np.sum((y_out - y[m_in])**2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(objective, depth, depth_err, dur, (t0+min_t) % period,
snr, loglike)
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
|
d56ee42e607d87d79c4ba63660f75afec3355835827a0a0f62760ea842598ac5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from astropy import units as u
from . import methods
from astropy.timeseries.periodograms.base import BasePeriodogram
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like, or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.default_rng(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.standard_normal(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.000412388152837
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, (Time, TimeDelta)):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(self, duration,
minimum_period=None, maximum_period=None,
minimum_n_transit=3, frequency_factor=1.0):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transit : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit-1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0/strip_units(maximum_period)
maximum_frequency = 1.0/strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency)/df))
return 1.0/(maximum_frequency-df*np.arange(nf)) * self._t_unit()
def autopower(self, duration, objective=None, method=None, oversample=10,
minimum_n_transit=3, minimum_period=None,
maximum_period=None, frequency_factor=1.0):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor)
return self.power(period, duration, objective=objective, method=method,
oversample=oversample)
def power(self, period, duration, objective=None, method=None,
oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(f"Unrecognized method '{objective}'\n"
f"allowed methods are: {allowed_objectives}")
use_likelihood = (objective == "likelihood")
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(f"Unrecognized method '{method}'\n"
f"allowed methods are: {allowed_methods}")
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period),
dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration),
dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref, y - np.median(y), ivar, period_fmt, duration,
oversample, use_likelihood)
return self._format_results(t_ref, objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{} was provided as an absolute time but '
'the BoxLeastSquares class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{} was provided as a relative time but '
'the BoxLeastSquares class was initialized '
'with absolute times.'.format(name))
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t_model = strip_units(self._as_relative_time('t_model', t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Compute the depth
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model-transit_time+hp) % period-hp) < 0.5*duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
m_odd = np.abs((t-transit_time) % (2*period) - period) \
< 0.5*duration
m_even = np.abs((t-transit_time+period) % (2*period) - period) \
< 0.5*duration
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t-transit_time) % period - hp) < 0.5*duration
depth_phase = _compute_depth(m_phase,
*_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = np.abs((t-transit_time+0.25*period) % (0.5*period)
- 0.25*period) < 0.5*duration
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in]-transit_time) / period).astype(int)
transit_times = period * np.arange(transit_id.min(),
transit_id.max()+1) + transit_time
unique_ids, unique_counts = np.unique(transit_id,
return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in)**2 - (y[m_in] - y_out)**2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5*np.sum(ivar[m_in] * (y[m_in] - y_in)**2)
full_ll -= 0.5*np.sum(ivar[m_out] * (y[m_out] - y_out)**2)
# Compute the log likelihood of a sine model
A = np.vstack((
np.sin(2*np.pi*t/period), np.cos(2*np.pi*t/period),
np.ones_like(t)
)).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]),
np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5*np.sum((y-mod)**2*ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed('transit_times', transit_times * self._t_unit()),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2]**2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t = strip_units(self._as_relative_time('t', t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5*period
return np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like, or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like, or `~astropy.units.Quantity` ['time']
The set of test periods.
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity` ['time']
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError("The maximum transit duration must be shorter "
"than the minimum period")
return period, duration
def _format_results(self, t_ref, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
t_ref : float
The minimum time in the time series (a reference time).
objective : str
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity` ['time']
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, duration, transit_time, depth_snr,
log_likelihood) = results
transit_time += t_ref
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed('transit_time', transit_time)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(zip(
("objective", "period", "power", "depth", "depth_err",
"duration", "transit_time", "depth_snr", "log_likelihood"),
args
))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
eed96ab9e721e276da439ce2023495c30e23248a8c55649fd51405c0b55dfe9e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError(f"missing key '{k}'")
if k == "objective":
assert v == other[k], (
f"Mismatched objectives. Expected '{v}', got '{other[k]}'"
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
@pytest.fixture
def data():
t = np.array([
6.96469186, 2.86139335, 2.26851454, 5.51314769, 7.1946897,
4.2310646, 9.80764198, 6.84829739, 4.80931901, 3.92117518,
3.43178016, 7.29049707, 4.38572245, 0.59677897, 3.98044255,
7.37995406, 1.8249173, 1.75451756, 5.31551374, 5.31827587,
6.34400959, 8.49431794, 7.24455325, 6.11023511, 7.22443383,
3.22958914, 3.61788656, 2.28263231, 2.93714046, 6.30976124,
0.9210494, 4.33701173, 4.30862763, 4.93685098, 4.2583029,
3.12261223, 4.26351307, 8.93389163, 9.44160018, 5.01836676,
6.23952952, 1.15618395, 3.17285482, 4.14826212, 8.66309158,
2.50455365, 4.83034264, 9.85559786, 5.19485119, 6.12894526,
1.20628666, 8.26340801, 6.03060128, 5.45068006, 3.42763834,
3.04120789, 4.17022211, 6.81300766, 8.75456842, 5.10422337,
6.69313783, 5.85936553, 6.24903502, 6.74689051, 8.42342438,
0.83194988, 7.63682841, 2.43666375, 1.94222961, 5.72456957,
0.95712517, 8.85326826, 6.27248972, 7.23416358, 0.16129207,
5.94431879, 5.56785192, 1.58959644, 1.53070515, 6.95529529,
3.18766426, 6.91970296, 5.5438325, 3.88950574, 9.2513249,
8.41669997, 3.57397567, 0.43591464, 3.04768073, 3.98185682,
7.0495883, 9.95358482, 3.55914866, 7.62547814, 5.93176917,
6.91701799, 1.51127452, 3.98876293, 2.40855898, 3.43456014,
5.13128154, 6.6662455, 1.05908485, 1.30894951, 3.21980606,
6.61564337, 8.46506225, 5.53257345, 8.54452488, 3.84837811,
3.16787897, 3.54264676, 1.71081829, 8.29112635, 3.38670846,
5.52370075, 5.78551468, 5.21533059, 0.02688065, 9.88345419,
9.05341576, 2.07635861, 2.92489413, 5.20010153, 9.01911373,
9.83630885, 2.57542064, 5.64359043, 8.06968684, 3.94370054,
7.31073036, 1.61069014, 6.00698568, 8.65864458, 9.83521609,
0.7936579, 4.28347275, 2.0454286, 4.50636491, 5.47763573,
0.9332671, 2.96860775, 9.2758424, 5.69003731, 4.57411998,
7.53525991, 7.41862152, 0.48579033, 7.08697395, 8.39243348,
1.65937884, 7.80997938, 2.86536617, 3.06469753, 6.65261465,
1.11392172, 6.64872449, 8.87856793, 6.96311268, 4.40327877,
4.38214384, 7.65096095, 5.65642001, 0.84904163, 5.82671088,
8.14843703, 3.37066383, 9.2757658, 7.50717, 5.74063825,
7.51643989, 0.79148961, 8.59389076, 8.21504113, 9.0987166,
1.28631198, 0.81780087, 1.38415573, 3.9937871, 4.24306861,
5.62218379, 1.2224355, 2.01399501, 8.11644348, 4.67987574,
8.07938209, 0.07426379, 5.51592726, 9.31932148, 5.82175459,
2.06095727, 7.17757562, 3.7898585, 6.68383947, 0.29319723,
6.35900359, 0.32197935, 7.44780655, 4.72913002, 1.21754355,
5.42635926, 0.66774443, 6.53364871, 9.96086327, 7.69397337,
5.73774114, 1.02635259, 6.99834075, 6.61167867, 0.49097131,
7.92299302, 5.18716591, 4.25867694, 7.88187174, 4.11569223,
4.81026276, 1.81628843, 3.213189, 8.45532997, 1.86903749,
4.17291061, 9.89034507, 2.36599812, 9.16832333, 9.18397468,
0.91296342, 4.63652725, 5.02216335, 3.1366895, 0.47339537,
2.41685637, 0.95529642, 2.38249906, 8.07791086, 8.94978288,
0.43222892, 3.01946836, 9.80582199, 5.39504823, 6.26309362,
0.05545408, 4.84909443, 9.88328535, 3.75185527, 0.97038159,
4.61908762, 9.63004466, 3.41830614, 7.98922733, 7.98846331,
2.08248297, 4.43367702, 7.15601275, 4.10519785, 1.91006955,
9.67494307, 6.50750366, 8.65459852, 2.52423578e-01, 2.66905815,
5.02071100, 6.74486351e-01, 9.93033261, 2.36462396, 3.74292182,
2.14011915, 1.05445866, 2.32479786, 3.00610136, 6.34442268,
2.81234781, 3.62276761, 5.94284372e-02, 3.65719126, 5.33885982,
1.62015837, 5.97433108, 2.93152469, 6.32050495, 2.61966053e-01,
8.87593460, 1.61186304e-01, 1.26958031, 7.77162462, 4.58952322e-01,
7.10998694, 9.71046141, 8.71682933, 7.10161651, 9.58509743,
4.29813338, 8.72878914, 3.55957668, 9.29763653, 1.48777656,
9.40029015, 8.32716197, 8.46054838, 1.23923010, 5.96486898,
1.63924809e-01, 7.21184366, 7.73751413e-02, 8.48222774e-01, 2.25498410,
8.75124534, 3.63576318, 5.39959935, 5.68103214, 2.25463360,
5.72146768, 6.60951795, 2.98245393, 4.18626859, 4.53088925,
9.32350662, 5.87493747, 9.48252372, 5.56034754, 5.00561421,
3.53221097e-02, 4.80889044, 9.27454999, 1.98365689, 5.20911344e-01,
4.06778893, 3.72396481, 8.57153058, 2.66111156e-01, 9.20149230,
6.80902999, 9.04225994, 6.07529071, 8.11953312, 3.35543874,
3.49566228, 3.89874230, 7.54797082, 3.69291174, 2.42219806,
9.37668357, 9.08011084, 3.48797316, 6.34638070, 2.73842212,
2.06115129, 3.36339529, 3.27099893, 8.82276101, 8.22303815,
7.09623229, 9.59345225, 4.22543353, 2.45033039, 1.17398437,
3.01053358, 1.45263734, 9.21860974e-01, 6.02932197, 3.64187450,
5.64570343, 1.91335721, 6.76905860, 2.15505447, 2.78023594,
7.41760422, 5.59737896, 3.34836413, 5.42988783, 6.93984703,
9.12132121, 5.80713213, 2.32686379, 7.46697631, 7.77769018,
2.00401315, 8.20574220, 4.64934855, 7.79766662, 2.37478220,
3.32580270, 9.53697119, 6.57815073, 7.72877831, 6.88374343,
2.04304118, 4.70688748, 8.08963873, 6.75035127, 6.02788565e-02,
8.74077427e-01, 3.46794720, 9.44365540, 4.91190481, 2.70176267,
3.60423719, 2.10652628, 4.21200057, 2.18035440, 8.45752507,
4.56270599, 2.79802018, 9.32891648, 3.14351354, 9.09714662,
4.34180910e-01, 7.07115060, 4.83889039, 4.44221061, 3.63233444e-01,
4.06831905e-01, 3.32753617, 9.47119540, 6.17659977, 3.68874842,
6.11977039, 2.06131536, 1.65066443, 3.61817266, 8.63353352,
5.09401727, 2.96901516, 9.50251625, 8.15966090, 3.22973943,
9.72098245, 9.87351098, 4.08660134, 6.55923103, 4.05653198,
2.57348106, 8.26526760e-01, 2.63610346, 2.71479854, 3.98639080,
1.84886031, 9.53818403, 1.02879885, 6.25208533, 4.41697388,
4.23518049, 3.71991783, 8.68314710, 2.80476981, 2.05761574e-01,
9.18097016, 8.64480278, 2.76901790, 5.23487548, 1.09088197,
9.34270688e-01, 8.37466108, 4.10265718, 6.61716540, 9.43200558,
2.45130592, 1.31598313e-01, 2.41484058e-01, 7.09385692, 9.24551885,
4.67330273, 3.75109148, 5.42860425, 8.58916838, 6.52153874,
2.32979897, 7.74580205, 1.34613497, 1.65559971, 6.12682283,
2.38783406, 7.04778548, 3.49518527, 2.77423960, 9.98918406,
4.06161246e-01, 6.45822522, 3.86995850e-01, 7.60210258, 2.30089957,
8.98318671e-01, 6.48449712, 7.32601217, 6.78095315, 5.19009471e-01,
2.94306946, 4.51088346, 2.87103290, 8.10513456, 1.31115105,
6.12179362, 9.88214944, 9.02556539, 2.22157062, 8.18876137e-04,
9.80597342, 8.82712985, 9.19472466, 4.15503551, 7.44615462])
y = np.ones_like(t)
dy = np.array([
0.00606416, 0.00696152, 0.00925774, 0.00563806, 0.00946933,
0.00748254, 0.00713048, 0.00652823, 0.00958424, 0.00758812,
0.00902013, 0.00928826, 0.00961191, 0.0065169, 0.00669905,
0.00797537, 0.00720662, 0.00966421, 0.00698782, 0.00738889,
0.00808593, 0.0070237, 0.00996239, 0.00549426, 0.00610302,
0.00661328, 0.00573861, 0.0064211, 0.00889623, 0.00761446,
0.00516977, 0.00991311, 0.00808003, 0.0052947, 0.00830584,
0.00689185, 0.00567837, 0.00781832, 0.0086354, 0.00835563,
0.00623757, 0.00762433, 0.00768832, 0.00858402, 0.00679934,
0.00898866, 0.00813961, 0.00519166, 0.0077324, 0.00930956,
0.00783787, 0.00587914, 0.00755188, 0.00878473, 0.00555053,
0.0090855, 0.00583741, 0.00767038, 0.00692872, 0.00624312,
0.00823716, 0.00518696, 0.00880023, 0.0076347, 0.00937886,
0.00760359, 0.00517517, 0.005718, 0.00897802, 0.00745988,
0.0072094, 0.00659217, 0.00642275, 0.00982943, 0.00716485,
0.00942002, 0.00824082, 0.00929214, 0.00926225, 0.00978156,
0.00848971, 0.00902698, 0.00866564, 0.00802613, 0.00858677,
0.00857875, 0.00520454, 0.00758055, 0.00896326, 0.00621481,
0.00732574, 0.00717493, 0.00701394, 0.0056092, 0.00762856,
0.00723124, 0.00831696, 0.00774707, 0.00513771, 0.00515959,
0.0085068, 0.00853791, 0.0097997, 0.00938352, 0.0073403,
0.00812953, 0.00728591, 0.00611473, 0.00688338, 0.00551942,
0.00833264, 0.00596015, 0.00737734, 0.00983718, 0.00515834,
0.00575865, 0.0064929, 0.00970903, 0.00954421, 0.00581,
0.00990559, 0.00875374, 0.00769989, 0.00965851, 0.00940304,
0.00695658, 0.00828172, 0.00823693, 0.00663484, 0.00589695,
0.00733405, 0.00631641, 0.00677533, 0.00977072, 0.00730569,
0.00842446, 0.00668115, 0.00997931, 0.00829384, 0.00598005,
0.00549092, 0.0097159, 0.00972389, 0.00810664, 0.00508496,
0.00612767, 0.00900638, 0.0093773, 0.00726995, 0.0068276,
0.00637113, 0.00558485, 0.00557872, 0.00976301, 0.00904313,
0.0058239, 0.00603525, 0.00827776, 0.00882332, 0.00905157,
0.00581669, 0.00992064, 0.00613901, 0.00794708, 0.00793808,
0.00983681, 0.00828834, 0.00792452, 0.00759386, 0.00882329,
0.00553028, 0.00501046, 0.00976244, 0.00749329, 0.00664168,
0.00684027, 0.00901922, 0.00691185, 0.00885085, 0.00720231,
0.00922039, 0.00538102, 0.00740564, 0.00733425, 0.00632164,
0.00971807, 0.00952514, 0.00721798, 0.0054858, 0.00603392,
0.00635746, 0.0074211, 0.00669189, 0.00887068, 0.00738013,
0.00935185, 0.00997891, 0.00609918, 0.00805836, 0.00923751,
0.00972618, 0.00645043, 0.00863521, 0.00507508, 0.00939571,
0.00531969, 0.00866698, 0.00997305, 0.00750595, 0.00604667,
0.00797322, 0.00812075, 0.00834036, 0.00586306, 0.00949356,
0.00810496, 0.00521784, 0.00842021, 0.00598042, 0.0051367,
0.00775477, 0.00906657, 0.00929971, 0.0055176, 0.00831521,
0.00855038, 0.00647258, 0.00985682, 0.00639344, 0.00534991,
0.0075964, 0.00847157, 0.0062233, 0.00669291, 0.00781814,
0.00943339, 0.00873663, 0.00604796, 0.00625889, 0.0076194,
0.00884479, 0.00809381, 0.00750662, 0.00798563, 0.0087803,
0.0076854, 0.00948876, 0.00973534, 0.00957677, 0.00877259,
0.00623161, 0.00692636, 0.0064, 0.0082883, 0.00662111,
0.00877196, 0.00556755, 0.00887682, 0.00792951, 0.00917694,
0.00715438, 0.00812482, 0.00777206, 0.00987836, 0.00877737,
0.00772407, 0.00587016, 0.00952057, 0.00602919, 0.00825022,
0.00968236, 0.0061179, 0.00612962, 0.00925909, 0.00913828,
0.00675852, 0.00632548, 0.00563694, 0.00993968, 0.00917672,
0.00949696, 0.0075684, 0.00557192, 0.0052629, 0.00665291,
0.00960165, 0.00973791, 0.00920582, 0.0057934, 0.00709962,
0.00623121, 0.00602675, 0.00842413, 0.00743056, 0.00662455,
0.00550107, 0.00772382, 0.00673513, 0.00695548, 0.00655254,
0.00693598, 0.0077793, 0.00507072, 0.00923823, 0.0096096,
0.00775265, 0.00634011, 0.0099512, 0.00691597, 0.00846828,
0.00844976, 0.00717155, 0.00599579, 0.0098329, 0.00531845,
0.00742575, 0.00610365, 0.00646987, 0.00914264, 0.00683633,
0.00541674, 0.00598155, 0.00930187, 0.00988514, 0.00633991,
0.00837704, 0.00540599, 0.00861733, 0.00708218, 0.0095908,
0.00655768, 0.00970733, 0.00751624, 0.00674446, 0.0082351,
0.00624873, 0.00614882, 0.00598173, 0.0097995, 0.00746457,
0.00875807, 0.00736996, 0.0079377, 0.00792069, 0.00989943,
0.00834217, 0.00619885, 0.00507599, 0.00609341, 0.0072776,
0.0069671, 0.00906163, 0.00892778, 0.00544548, 0.00976005,
0.00763728, 0.00798202, 0.00702528, 0.0082475, 0.00935663,
0.00836968, 0.00985049, 0.00850561, 0.0091086, 0.0052252,
0.00836349, 0.00827376, 0.00550873, 0.00921194, 0.00807086,
0.00549164, 0.00797234, 0.00739208, 0.00616647, 0.00509878,
0.00682784, 0.00809926, 0.0066464, 0.00653627, 0.00875561,
0.00879312, 0.00859383, 0.00550591, 0.00758083, 0.00778899,
0.00872402, 0.00951589, 0.00684519, 0.00714332, 0.00866384,
0.00831318, 0.00778935, 0.0067507, 0.00597676, 0.00591904,
0.00540792, 0.005406, 0.00922899, 0.00691836, 0.0053037,
0.00948213, 0.00611635, 0.00634062, 0.00597249, 0.00983751,
0.0055627, 0.00861082, 0.00966044, 0.00834001, 0.00929363,
0.00621224, 0.00836964, 0.00850436, 0.00729166, 0.00935273,
0.00847193, 0.00947439, 0.00876602, 0.00760145, 0.00749344,
0.00726864, 0.00510823, 0.00767571, 0.00711487, 0.00578767,
0.00559535, 0.00724676, 0.00519957, 0.0099329, 0.0068906,
0.00691055, 0.00525563, 0.00713336, 0.00507873, 0.00515047,
0.0066955, 0.00910484, 0.00729411, 0.0050742, 0.0058161,
0.00869961, 0.00869147, 0.00877261, 0.00675835, 0.00676138,
0.00901038, 0.00699069, 0.00863596, 0.00790562, 0.00682171,
0.00540003, 0.00558063, 0.00944779, 0.0072617, 0.00997002,
0.00681948, 0.00624977, 0.0067527, 0.00671543, 0.00818678,
0.00506369, 0.00881634, 0.00708207, 0.0071612, 0.00740558,
0.00724606, 0.00748735, 0.00672952, 0.00726673, 0.00702326,
0.00759121, 0.00811635, 0.0062052, 0.00754219, 0.00797311,
0.00508474, 0.00760247, 0.00619647, 0.00702269, 0.00913265,
0.00663118, 0.00741608, 0.00512371, 0.00654375, 0.00819861,
0.00657581, 0.00602899, 0.00645328, 0.00977189, 0.00543401,
0.00731679, 0.00529193, 0.00769329, 0.00573018, 0.00817042,
0.00632199, 0.00845458, 0.00673573, 0.00502084, 0.00647447])
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
randn_arr = np.array([
-1.00326528e-02, -8.45644428e-01, 9.11460610e-01, -1.37449688e+00,
-5.47065645e-01, -7.55266106e-05, -1.21166803e-01, -2.00858547e+00,
-9.20646543e-01, 1.68234342e-01, -1.31989156e+00, 1.26642930e+00,
4.95180889e-01, -5.14240391e-01, -2.20292465e-01, 1.86156412e+00,
9.35988451e-01, 3.80219145e-01, -1.41551877e+00, 1.62961132e+00,
1.05240107e+00, -1.48405388e-01, -5.49698069e-01, -1.87903939e-01,
-1.20193668e+00, -4.70785558e-01, 7.63160514e-01, -1.80762128e+00,
-3.14074374e-01, 1.13755973e-01, 1.03568037e-01, -1.17893695e+00,
-1.18215289e+00, 1.08916538e+00, -1.22452909e+00, 1.00865096e+00,
-4.82365315e-01, 1.07979635e+00, -4.21078505e-01, -1.16647132e+00,
8.56554856e-01, -1.73912222e-02, 1.44857659e+00, 8.92200085e-01,
-2.29426629e-01, -4.49667602e-01, 2.33723433e-02, 1.90210018e-01,
-8.81748527e-01, 8.41939573e-01, -3.97363492e-01, -4.23027745e-01,
-5.40688337e-01, 2.31017267e-01, -6.92052602e-01, 1.34970110e-01,
2.76660307e+00, -5.36094601e-02, -4.34004738e-01, -1.66768923e+00,
5.02219248e-02, -1.10923094e+00, -3.75558119e-01, 1.51607594e-01,
-1.73098945e+00, 1.57462752e-01, 3.04515175e-01, -1.29710002e+00,
-3.92309192e-01, -1.83066636e+00, 1.57550094e+00, 3.30563277e-01,
-1.79588501e-01, -1.63435831e-01, 1.13144361e+00, -9.41655519e-02,
3.30816771e-01, 1.51862956e+00, -3.46167148e-01, -1.09263532e+00,
-8.24500575e-01, 1.42866383e+00, 9.14283085e-02, -5.02331288e-01,
9.73644380e-01, 9.97957386e-01, -4.75647768e-01, -9.71936837e-01,
-1.57052860e+00, -1.79388892e+00, -2.64986452e-01, -8.93195947e-01,
1.85847441e+00, 5.85377547e-02, -1.94214954e+00, 1.41872928e+00,
1.61710309e-01, 7.04979480e-01, 6.82034777e-01, 2.96556567e-01,
5.23342630e-01, 2.38760672e-01, -1.10638591e+00, 3.66732198e-01,
1.02390550e+00, -2.10056413e-01, 5.51302218e-01, 4.19589145e-01,
1.81565206e+00, -2.52750301e-01, -2.92004163e-01, -1.16931740e-01,
-1.02391075e-01, -2.27261771e+00, -6.42609841e-01, 2.99885067e-01,
-8.25651467e-03, -7.99339154e-01, -6.64779252e-01, -3.55613128e-01,
-8.01571781e-01, -5.13050610e-01, -5.39390119e-01, 8.95370847e-01,
1.01639127e+00, 9.33585094e-01, 4.26701799e-01, -7.08322484e-01,
9.59830450e-01, -3.14250587e-01, 2.30522083e-02, 1.33822053e+00,
8.39928561e-02, 2.47284030e-01, -1.41277949e+00, 4.87009294e-01,
-9.80006647e-01, 1.01193966e+00, -1.84599177e-01, -2.23616884e+00,
-3.58020103e-01, -2.28034538e-01, 4.85475226e-01, 6.70512391e-01,
-3.27764245e-01, 1.01286819e+00, -3.16705533e+00, -7.13988998e-01,
-1.11236427e+00, -1.25418351e+00, 9.59706371e-01, 8.29170399e-01,
-7.75770020e-01, 1.17805700e+00, 1.01466892e-01, -4.21684101e-01,
-6.92922796e-01, -7.78271726e-01, 4.72774857e-01, 6.50154901e-01,
2.38501212e-01, -2.05021768e+00, 2.96358656e-01, 5.65396564e-01,
-6.69205605e-01, 4.32505429e-02, -1.86388430e+00, -1.22996906e+00,
-3.24235348e-01, -3.09751144e-01, 3.51679372e-01, -1.18692539e+00,
-3.41206065e-01, -4.89779780e-01, 5.28010474e-01, 1.42104277e+00,
1.72092032e+00, -1.56844005e+00, -4.80141918e-02, -1.11252931e+00,
-6.47449515e-02, 4.22919280e-01, 8.14908987e-02, -4.90116988e-02,
1.48303917e+00, 7.20989392e-01, -2.72654462e-01, 2.42113609e-02,
8.70897807e-01, 6.09790506e-01, -4.25076104e-01, -1.77524284e+00,
-1.18465749e+00, 1.45979225e-01, -1.78652685e+00, -1.52394498e-01,
-4.53569176e-01, 9.99252803e-01, -1.31804382e+00, -1.93176898e+00,
-4.19640742e-01, 6.34763132e-01, 1.06991860e+00, -9.09327017e-01,
4.70263748e-01, -1.11143045e+00, -7.48827466e-01, 5.67594726e-01,
7.18150543e-01, -9.99380749e-01, 4.74898323e-01, -1.86849981e+00,
-2.02658907e-01, -1.13424803e+00, -8.07699340e-01, -1.27607735e+00,
5.53626395e-01, 5.53874470e-01, -6.91200445e-01, 3.75582306e-01,
2.61272553e-01, -1.28451754e-01, 2.15817020e+00, -8.40878617e-01,
1.43050907e-02, -3.82387029e-01, -3.71780015e-01, 1.59412004e-01,
-2.94395700e-01, -8.60426760e-01, 1.24227498e-01, 1.18233165e+00,
9.42766380e-01, 2.03044488e-01, -7.35396814e-01, 1.86429600e-01,
1.08464302e+00, 1.19118926e+00, 3.59687060e-01, -3.64357200e-01,
-2.02752749e-01, 7.72045927e-01, 6.86346215e-01, -1.75769961e+00,
6.58617565e-01, 7.11288340e-01, -8.87191425e-01, -7.64981116e-01,
-7.57164098e-01, -6.80262803e-01, -1.41674959e+00, 3.13091930e-01,
-7.85719399e-01, -7.03838361e-02, -4.97568783e-01, 2.55177521e-01,
-1.01061704e+00, 2.45265375e-01, 3.89781016e-01, 8.27594585e-01,
1.96776909e+00, -2.09210177e+00, 3.20314334e-01, -7.09162842e-01,
-1.92505867e+00, 8.41630623e-01, 1.33219988e+00, -3.91627710e-01,
2.10916296e-01, -6.40767402e-02, 4.34197668e-01, 8.80535749e-01,
3.44937336e-01, 3.45769929e-01, 1.25973654e+00, -1.64662222e-01,
9.23064571e-01, -8.22000422e-01, 1.60708495e+00, 7.37825392e-01,
-4.03759534e-01, -2.11454815e+00, -3.10717131e-04, -1.18180941e+00,
2.99634603e-01, 1.45116882e+00, 1.60059793e-01, -1.78012614e-01,
3.42205404e-01, 2.85650196e-01, -2.36286411e+00, 2.40936864e-01,
6.20277356e-01, -2.59341634e-01, 9.78559078e-01, -1.27674575e-01,
7.66998762e-01, 2.27310511e+00, -9.63911290e-02, -1.94213217e+00,
-3.36591724e-01, -1.72589000e+00, 6.11237826e-01, 1.30935097e+00,
6.95879662e-01, 3.20308213e-01, -6.44925458e-01, 1.57564975e+00,
7.53276212e-01, 2.84469557e-01, 2.04860319e-01, 1.11627359e-01,
4.52216424e-01, -6.13327179e-01, 1.52524993e+00, 1.52339753e-01,
6.00054450e-01, -4.33567278e-01, 3.74918534e-01, -2.28175243e+00,
-1.11829888e+00, -3.14131532e-02, -1.32247311e+00, 2.43941406e+00,
-1.66808131e+00, 3.45900749e-01, 1.65577315e+00, 4.81287059e-01,
-3.10227553e-01, -5.52144084e-01, 6.73255489e-01, -8.00270681e-01,
-1.19486110e-01, 6.91198606e-01, -3.07879027e-01, 8.75100102e-02,
-3.04086293e-01, -9.69797604e-01, 1.18915048e+00, 1.39306624e+00,
-3.16699954e-01, -2.65576159e-01, -1.77899339e-01, 5.38803274e-01,
-9.05300265e-01, -8.85253056e-02, 2.62959055e-01, 6.42042149e-01,
-2.78083727e+00, 4.03403210e-01, 3.45846762e-01, 1.00772824e+00,
-5.26264015e-01, -5.18353205e-01, 1.20251659e+00, -1.56315671e+00,
1.62909029e+00, 2.55589446e+00, 4.77451685e-01, 8.14098474e-01,
-1.48958171e+00, -6.94559787e-01, 1.05786255e+00, 3.61815347e-01,
-1.81427463e-01, 2.32869132e-01, 5.06976484e-01, -2.93095701e-01,
-2.89459450e-02, -3.63073748e-02, -1.05227898e+00, 3.23594628e-01,
1.80358591e+00, 1.73196213e+00, -1.47639930e+00, 5.70631220e-01,
6.75503781e-01, -4.10510463e-01, -9.64200035e-01, -1.32081431e+00,
-4.44703779e-01, 3.50009137e-01, -1.58058176e-01, -6.10933088e-01,
-1.24915663e+00, 3.50716258e-01, 1.06654245e+00, -9.26921972e-01,
4.48428964e-01, -1.87947524e+00, -6.57466109e-01, 7.29604120e-01,
-1.11776721e+00, -6.04436725e-01, 1.41796683e+00, -7.32843980e-01,
-8.53944819e-01, 5.75848362e-01, 1.95473356e+00, -2.39669947e-01,
7.68735860e-01, 1.34576918e+00, 3.25552163e-01, -2.69917901e-01,
-8.76326739e-01, -1.42521096e+00, 1.11170175e+00, 1.80957146e-01,
1.33280094e+00, 9.88925316e-01, -6.16970520e-01, -1.18688670e+00,
4.12669583e-01, -6.32506884e-01, 3.76689141e-01, -7.31151938e-01,
-8.61225253e-01, -1.40990810e-01, 9.34100620e-01, 3.06539895e-01,
1.17837515e+00, -1.23356170e+00, -1.05707714e+00, -8.91636992e-02,
2.16570138e+00, 6.74286114e-01, -1.06661274e+00, -7.61404530e-02,
2.20714791e-01, -5.68685746e-01, 6.13274991e-01, -1.56446138e-01,
-2.99330718e-01, 1.26025679e+00, -1.70966090e+00, -9.61805342e-01,
-8.17308981e-01, -8.47681070e-01, -7.28753045e-01, 4.88475958e-01,
1.09653283e+00, 9.16041261e-01, -1.01956213e+00, -1.07417899e-01,
4.52265213e-01, 2.40002952e-01, 1.30574740e+00, -6.75334236e-01,
1.56319421e-01, -3.93230715e-01, 2.51075019e-01, -1.07889691e+00,
-9.28937721e-01, -7.30110860e-01, -5.63669311e-01, 1.54792327e+00,
1.17540191e+00, -2.12649671e-01, 1.72933294e-01, -1.59443602e+00,
-1.79292347e-01, 1.59614713e-01, 1.14568421e+00, 3.26804720e-01,
4.32890059e-01, 2.97762890e-01, 2.69001190e-01, -1.39675918e+00,
-4.16757668e-01, 1.43488680e+00, 8.23896443e-01, 4.94234499e-01,
6.67153092e-02, 6.59441396e-01, -9.44889409e-01, -1.58005956e+00,
-3.82086552e-01, 5.37923058e-01, 1.07829882e-01, 1.01395868e+00,
3.51450517e-01, 4.48421962e-02, 1.32748495e+00, 1.13237578e+00,
-9.80913012e-02, -1.10304986e+00, -9.07361492e-01, -1.61451138e-01,
-3.66811384e-01, 1.65776233e+00, -1.68013415e+00, -6.42577869e-02,
-1.06622649e+00, 1.16801869e-01, 3.82264833e-01, -4.04896974e-01,
5.30481414e-01, -1.98626941e-01, -1.79395613e-01, -4.17888725e-01])
y += dy * randn_arr
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.default_rng(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.standard_normal(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert_allclose(results.period[np.argmax(results.power)],
2.000412388152837)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert_allclose(
results.power,
[0.01723948, 0.0643028, 0.1338783, 0.09428816, 0.03577543], rtol=1.1e-7)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(results, model.power(periods, durations,
method="slow",
objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], [9, 7, 7, 7, 8])
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
res = np.abs((stats[k][0]-f*params["depth"]) / stats[k][1])
assert res < 1, f'f={f}, k={k}, res={res}'
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == 'transit_time':
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == 'objective':
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t_model was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t_model was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == 'transit_times':
assert_quantity_allclose((stats1[key] - start).to(u.day), stats2[key], atol=1e-10 * u.day) # noqa: E501
elif key.startswith('depth'):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
with pytest.raises(TypeError) as exc:
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(TypeError) as exc:
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
def test_transit_time_in_range(data):
t, y, dy, params = data
t_ref = 10230.0
t2 = t + t_ref
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(t2, y, dy)
results1 = bls1.autopower(0.16)
results2 = bls2.autopower(0.16)
assert np.allclose(results1.transit_time, results2.transit_time - t_ref)
assert np.all(results1.transit_time >= t.min())
assert np.all(results1.transit_time <= t.max())
assert np.all(results2.transit_time >= t2.min())
assert np.all(results2.transit_time <= t2.max())
|
32f9f9154089b15fba2df19ffc28b401f5974b38c2aa417bd5335c9d20e57945 | import numpy as np
from .mle import design_matrix
def lombscargle_chi2(t, y, dy, frequency, normalization='standard',
fit_mean=True, center_data=True, nterms=1):
"""Lomb-Scargle Periodogram
This implements a chi-squared-based periodogram, which is relatively slow
but useful for validating the faster algorithms in the package.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity``.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy ** -2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
yw = (y - np.dot(w, y)) / dy
else:
yw = y / dy
chi2_ref = np.dot(yw, yw)
# compute the unnormalized model chi2 at each frequency
def compute_power(f):
X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms)
XTX = np.dot(X.T, X)
XTy = np.dot(X.T, yw)
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(f) for f in frequency])
if normalization == 'psd':
p *= 0.5
elif normalization == 'model':
p /= (chi2_ref - p)
elif normalization == 'log':
p = -np.log(1 - p / chi2_ref)
elif normalization == 'standard':
p /= chi2_ref
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
b271b6da3bebaace3aa75c05ec3129e218071611c871ff6bdf60efcdd93869f8 | import numpy as np
from .utils import trig_sum
def lombscargle_fast(t, y, dy, f0, df, Nf,
center_data=True, fit_mean=True,
normalization='standard',
use_fft=True, trig_sum_kwds=None):
"""Fast Lomb-Scargle Periodogram
This implements the Press & Rybicki method [1]_ for fast O[N log(N)]
Lomb-Scargle periodograms.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_mean : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
trig_sum_kwds : dict or None, optional
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Returns
-------
power : ndarray
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipes in C (2002)
"""
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy ** -2.0
w /= w.sum()
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_mean:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# ----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_mean:
S, C = trig_sum(t, w, **kwargs)
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
else:
tan_2omega_tau = S2 / C2
# This is what we're computing below; the straightforward way is slower
# and less stable, so we use trig identities instead
#
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
# ----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_mean:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
power = (YC * YC / CC + YS * YS / SS)
if normalization == 'standard':
power /= YY
elif normalization == 'model':
power /= YY - power
elif normalization == 'log':
power = -np.log(1 - power / YY)
elif normalization == 'psd':
power *= 0.5 * (dy ** -2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return power
|
1908c0c11903c366c8c110f51f857d83c6766cff5d2671318ed389d152381db3 | import numpy as np
def lombscargle_scipy(t, y, frequency, normalization='standard',
center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
try:
from scipy import signal
except ImportError:
raise ImportError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
# Scipy requires floating-point input
t = np.asarray(t, dtype=float)
y = np.asarray(y, dtype=float)
frequency = np.asarray(frequency, dtype=float)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == 'psd':
pass
elif normalization == 'standard':
p *= 2 / (t.size * np.mean(y ** 2))
elif normalization == 'log':
p = -np.log(1 - 2 * p / (t.size * np.mean(y ** 2)))
elif normalization == 'model':
p /= 0.5 * t.size * np.mean(y ** 2) - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
809c2809386143c3abcbf69b5e936243741694c85044e41d1b682f0e95b9f926 | import numpy as np
def lombscargle_slow(t, y, dy, frequency, normalization='standard',
fit_mean=True, center_data=True):
"""Lomb-Scargle Periodogram
This is a pure-python implementation of the original Lomb-Scargle formalism
(e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_)
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] W. Press et al, Numerical Recipes in C (2002)
.. [2] Scargle, J.D. 1982, ApJ 263:835-853
.. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy ** -2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if fit_mean or center_data:
y = y - np.dot(w, y)
omega = 2 * np.pi * frequency
omega = omega.ravel()[np.newaxis, :]
# make following arrays into column vectors
t, y, dy, w = map(lambda x: x[:, np.newaxis], (t, y, dy, w))
sin_omega_t = np.sin(omega * t)
cos_omega_t = np.cos(omega * t)
# compute time-shift tau
# S2 = np.dot(w.T, np.sin(2 * omega * t)
S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t)
# C2 = np.dot(w.T, np.cos(2 * omega * t)
C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t ** 2)
if fit_mean:
S = np.dot(w.T, sin_omega_t)
C = np.dot(w.T, cos_omega_t)
S2 -= (2 * S * C)
C2 -= (C * C - S * S)
# compute components needed for the fit
omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2)
sin_omega_t_tau = np.sin(omega_t_tau)
cos_omega_t_tau = np.cos(omega_t_tau)
Y = np.dot(w.T, y)
wy = w * y
YCtau = np.dot(wy.T, cos_omega_t_tau)
YStau = np.dot(wy.T, sin_omega_t_tau)
CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau)
SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau)
if fit_mean:
Ctau = np.dot(w.T, cos_omega_t_tau)
Stau = np.dot(w.T, sin_omega_t_tau)
YCtau -= Y * Ctau
YStau -= Y * Stau
CCtau -= Ctau * Ctau
SStau -= Stau * Stau
p = (YCtau * YCtau / CCtau + YStau * YStau / SStau)
YY = np.dot(w.T, y * y)
if normalization == 'standard':
p /= YY
elif normalization == 'model':
p /= YY - p
elif normalization == 'log':
p = -np.log(1 - p / YY)
elif normalization == 'psd':
p *= 0.5 * (dy ** -2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p.ravel()
|
3004860c342c73c3cca2eaa36e757f66f6b2cc3248cfb867b02726b194936ada | import numpy as np
from .utils import trig_sum
def lombscargle_fastchi2(t, y, dy, f0, df, Nf, normalization='standard',
fit_mean=True, center_data=True, nterms=1,
use_fft=True, trig_sum_kwds=None):
"""Lomb-Scargle Periodogram
This implements a fast chi-squared periodogram using the algorithm
outlined in [4]_. The result is identical to the standard Lomb-Scargle
periodogram. The advantage of this algorithm is the
ability to compute multiterm periodograms relatively quickly.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. ApJ 263:835-853 (1982)
.. [4] Palmer, J. ApJ 695:496-502 (2009)
"""
if nterms == 0 and not fit_mean:
raise ValueError("Cannot have nterms = 0 without fitting bias")
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy ** -2.0
ws = np.sum(w)
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
y = y - np.dot(w, y) / ws
yw = y / dy
chi2_ref = np.dot(yw, yw)
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# Here we build-up the matrices XTX and XTy using pre-computed
# sums. The relevant identities are
# 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x
# 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x
# 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x
yws = np.sum(y * w)
SCw = [(np.zeros(Nf), ws * np.ones(Nf))]
SCw.extend([trig_sum(t, w, freq_factor=i, **kwargs)
for i in range(1, 2 * nterms + 1)])
Sw, Cw = zip(*SCw)
SCyw = [(np.zeros(Nf), yws * np.ones(Nf))]
SCyw.extend([trig_sum(t, w * y, freq_factor=i, **kwargs)
for i in range(1, nterms + 1)])
Syw, Cyw = zip(*SCyw)
# Now create an indexing scheme so we can quickly
# build-up matrices at each frequency
order = [('C', 0)] if fit_mean else []
order.extend(sum(([('S', i), ('C', i)]
for i in range(1, nterms + 1)), []))
funcs = dict(S=lambda m, i: Syw[m][i],
C=lambda m, i: Cyw[m][i],
SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]),
CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]),
SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i]
+ Sw[m + n][i]),
CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i]
+ Sw[n + m][i]))
def compute_power(i):
XTX = np.array([[funcs[A[0] + B[0]](A[1], B[1], i)
for A in order]
for B in order])
XTy = np.array([funcs[A[0]](A[1], i) for A in order])
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(i) for i in range(Nf)])
if normalization == 'psd':
p *= 0.5
elif normalization == 'standard':
p /= chi2_ref
elif normalization == 'log':
p = -np.log(1 - p / chi2_ref)
elif normalization == 'model':
p /= chi2_ref - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
15a4162c218338b6082cf1b4a6d28fa2616846822a4463e3a5233f1612a0853e | import numpy as np
def design_matrix(t, frequency, dy=None, bias=True, nterms=1):
"""Compute the Lomb-Scargle design matrix at the given frequency
This is the matrix X such that the periodic model at the given frequency
can be expressed :math:`\\hat{y} = X \\theta`.
Parameters
----------
t : array-like, shape=(n_times,)
times at which to compute the design matrix
frequency : float
frequency for the design matrix
dy : float or array-like, optional
data uncertainties: should be broadcastable with `t`
bias : bool (default=True)
If true, include a bias column in the matrix
nterms : int (default=1)
Number of Fourier terms to include in the model
Returns
-------
X : ndarray, shape=(n_times, n_parameters)
The design matrix, where n_parameters = bool(bias) + 2 * nterms
"""
t = np.asarray(t)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency must be a scalar")
if nterms == 0 and not bias:
raise ValueError("cannot have nterms=0 and no bias")
if bias:
cols = [np.ones_like(t)]
else:
cols = []
for i in range(1, nterms + 1):
cols.append(np.sin(2 * np.pi * i * frequency * t))
cols.append(np.cos(2 * np.pi * i * frequency * t))
XT = np.vstack(cols)
if dy is not None:
XT /= dy
return np.transpose(XT)
def periodic_fit(t, y, dy, frequency, t_fit,
center_data=True, fit_mean=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_mean : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
t, y, frequency = map(np.asarray, (t, y, frequency))
if dy is None:
dy = np.ones_like(y)
else:
dy = np.asarray(dy)
t_fit = np.asarray(t_fit)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if t_fit.ndim != 1:
raise ValueError("t_fit should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency should be a scalar")
if center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
y = (y - y_mean)
else:
y_mean = 0
X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms)
theta_MLE = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms)
return y_mean + np.dot(X_fit, theta_MLE)
|
e1ffb979d82e2574dc004fe57772639c588ea7d753828bc10cb6ecdd1307aa0b | import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum
@pytest.mark.parametrize('N', 2 ** np.arange(1, 12))
@pytest.mark.parametrize('offset', [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset),
int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.5e-5)
@pytest.fixture
def extirpolate_int_data():
rng = np.random.default_rng(0)
x = 100 * rng.random(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat), rtol=1.7e-5)
@pytest.fixture
def trig_sum_data():
rng = np.random.default_rng(0)
t = 10 * rng.random(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize('f0', [0, 1])
@pytest.mark.parametrize('adjust_t', [True, False])
@pytest.mark.parametrize('freq_factor', [1, 2])
@pytest.mark.parametrize('df', [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True,
f0=f0, freq_factor=freq_factor, oversampling=10)
S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False,
f0=f0, freq_factor=freq_factor, oversampling=10)
assert_allclose(S1, S2, atol=1E-2)
assert_allclose(C1, C2, atol=1E-2)
|
87ddd57f582333e396136b1b6ccb0db8cd86a6f141993f7f4eac0dd1bf1ba6e2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Comparison functions for `astropy.cosmology.Cosmology`.
This module is **NOT** public API. To use these functions, import them from
the top-level namespace -- :mod:`astropy.cosmology`.
This module will be moved.
"""
from __future__ import annotations
import functools
import inspect
from typing import Any, Callable, Tuple, Union
import numpy as np
from numpy import False_, True_, ndarray
from astropy import table
from astropy.cosmology.core import Cosmology
__all__ = [] # Nothing is scoped here
##############################################################################
# PARAMETERS
_FormatType = Union[bool, None, str]
_FormatsT = Union[_FormatType, Tuple[_FormatType, ...]]
_CompFnT = Callable[[Any, _FormatType], Cosmology]
_COSMO_AOK: set[Any] = {None, True_, False_, "astropy.cosmology"}
# The numpy bool also catches real bool for ops "==" and "in"
##############################################################################
# UTILITIES
class _CosmologyWrapper:
"""
A private wrapper class to hide things from :mod:`numpy`.
This should never be exposed to the user.
"""
__slots__ = ("wrapped", )
# Use less memory and speed up initilization.
_cantbroadcast: tuple[type, ...] = (table.Row, table.Table)
"""
Have to deal with things that do not broadcast well. e.g.
`~astropy.table.Row` cannot be used in an array, even if ``dtype=object``
and will raise a segfault when used in a `numpy.ufunc`.
"""
wrapped: Any
def __init__(self, wrapped: Any) -> None:
self.wrapped = wrapped
# TODO! when py3.9+ use @functools.partial(np.frompyfunc, nin=2, nout=1)
# TODO! https://github.com/numpy/numpy/issues/9477 segfaults on astropy.row
# and np.vectorize can't coerce table to dtypes
def _wrap_to_ufunc(nin: int, nout: int) -> Callable[[_CompFnT], np.ufunc]:
def wrapper(pyfunc: _CompFnT) -> np.ufunc:
ufunc = np.frompyfunc(pyfunc, 2, 1)
return ufunc
return wrapper
@_wrap_to_ufunc(2, 1)
def _parse_format(cosmo: Any, format: _FormatType, /,) -> Cosmology:
"""Parse Cosmology-like input into Cosmologies, given a format hint.
Parameters
----------
cosmo : |Cosmology|-like, positional-only
|Cosmology| to parse.
format : bool or None or str, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting.
Returns
-------
|Cosmology| or generator thereof
Raises
------
TypeError
If ``cosmo`` is not a |Cosmology| and ``format`` equals `False`.
TypeError
If ``cosmo`` is a |Cosmology| and ``format`` is not `None` or equal to
`True`.
"""
# Deal with private wrapper
if isinstance(cosmo, _CosmologyWrapper):
cosmo = cosmo.wrapped
# Shortcut if already a cosmology
if isinstance(cosmo, Cosmology):
if format not in _COSMO_AOK:
allowed = '/'.join(map(str, _COSMO_AOK))
raise ValueError(f"for parsing a Cosmology, 'format' must be {allowed}, not {format}")
return cosmo
# Convert, if allowed.
elif format == False_: # catches False and False_
raise TypeError(f"if 'format' is False, arguments must be a Cosmology, not {cosmo}")
else:
format = None if format == True_ else format # str->str, None/True/True_->None
out = Cosmology.from_format(cosmo, format=format) # this can error!
return out
def _parse_formats(*cosmos: object, format: _FormatsT) -> ndarray:
"""Parse Cosmology-like to |Cosmology|, using provided formats.
``format`` is broadcast to match the shape of the cosmology arguments. Note
that the cosmology arguments are not broadcast against ``format``, so it
cannot determine the output shape.
Parameters
----------
*cosmos : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by the corresponding ``format``.
format : bool or None or str or array-like thereof, positional-only
Whether to allow, before equivalence is checked, the object to be
converted to a |Cosmology|. This allows, e.g. a |Table| to be equivalent
to a |Cosmology|. `False` (default) will not allow conversion. `True` or
`None` will, and will use the auto-identification to try to infer the
correct format. A `str` is assumed to be the correct format to use when
converting. Note ``format`` is broadcast as an object array to match the
shape of ``cosmos`` so ``format`` cannot determine the output shape.
Raises
------
TypeError
If any in ``cosmos`` is not a |Cosmology| and the corresponding
``format`` equals `False`.
"""
formats = np.broadcast_to(np.array(format, dtype=object), len(cosmos))
# parse each cosmo & format
# Have to deal with things that do not broadcast well.
# astropy.row cannot be used in an array, even if dtype=object
# and will raise a segfault when used in a ufunc.
towrap = (isinstance(cosmo, _CosmologyWrapper._cantbroadcast) for cosmo in cosmos)
wcosmos = [c if not wrap else _CosmologyWrapper(c) for c, wrap in zip(cosmos, towrap)]
return _parse_format(wcosmos, formats)
def _comparison_decorator(pyfunc: Callable[..., Any]) -> Callable[..., Any]:
"""Decorator to make wrapper function that parses |Cosmology|-like inputs.
Parameters
----------
pyfunc : Python function object
An arbitrary Python function.
Returns
-------
callable[..., Any]
Wrapped `pyfunc`, as described above.
Notes
-----
All decorated functions should add the following to 'Parameters'.
format : bool or None or str or array-like thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
"""
sig = inspect.signature(pyfunc)
nin = sum(p.kind == 0 for p in sig.parameters.values())
# Make wrapper function that parses cosmology-like inputs
@functools.wraps(pyfunc)
def wrapper(*cosmos: Any, format: _FormatsT = False, **kwargs: Any) -> bool:
if len(cosmos) > nin:
raise TypeError(f"{wrapper.__wrapped__.__name__} takes {nin} positional"
f" arguments but {len(cosmos)} were given")
# Parse cosmologies to format. Only do specified number.
cosmos = _parse_formats(*cosmos, format=format)
# Evaluate pyfunc, erroring if didn't match specified number.
result = wrapper.__wrapped__(*cosmos, **kwargs)
# Return, casting to correct type casting is possible.
return result
return wrapper
##############################################################################
# COMPARISON FUNCTIONS
@_comparison_decorator
def cosmology_equal(cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool=False) -> bool:
r"""Return element-wise equality check on the cosmologies.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a |Cosmology|. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. Note ``format``
is broadcast as an object array to match the shape of ``cosmos`` so
``format`` cannot determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
Examples
--------
Assuming the following imports
>>> import astropy.units as u
>>> from astropy.cosmology import FlatLambdaCDM
Two identical cosmologies are equal.
>>> cosmo1 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmo2 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3)
>>> cosmology_equal(cosmo1, cosmo2)
True
And cosmologies with different parameters are not.
>>> cosmo3 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.4)
>>> cosmology_equal(cosmo1, cosmo3)
False
Two cosmologies may be equivalent even if not of the same class. In these
examples the |LambdaCDM| has :attr:`~astropy.cosmology.LambdaCDM.Ode0` set
to the same value calculated in |FlatLambdaCDM|.
>>> from astropy.cosmology import LambdaCDM
>>> cosmo3 = LambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, 0.7)
>>> cosmology_equal(cosmo1, cosmo3)
False
>>> cosmology_equal(cosmo1, cosmo3, allow_equivalent=True)
True
While in this example, the cosmologies are not equivalent.
>>> cosmo4 = FlatLambdaCDM(70 * (u.km/u.s/u.Mpc), 0.3, Tcmb0=3 * u.K)
>>> cosmology_equal(cosmo3, cosmo4, allow_equivalent=True)
False
Also, using the keyword argument, the notion of equality is extended to any
Python object that can be converted to a |Cosmology|.
>>> mapping = cosmo2.to_format("mapping")
>>> cosmology_equal(cosmo1, mapping, format=True)
True
Either (or both) arguments can be |Cosmology|-like.
>>> cosmology_equal(mapping, cosmo2, format=True)
True
The list of valid formats, e.g. the |Table| in this example, may be checked
with ``Cosmology.from_format.list_formats()``.
As can be seen in the list of formats, not all formats can be
auto-identified by ``Cosmology.from_format.registry``. Objects of these
kinds can still be checked for equality, but the correct format string must
be used.
>>> yml = cosmo2.to_format("yaml")
>>> cosmology_equal(cosmo1, yml, format=(None, "yaml"))
True
This also works with an array of ``format`` matching the number of
cosmologies.
>>> cosmology_equal(mapping, yml, format=[True, "yaml"])
True
"""
# Check parameter equality
if not allow_equivalent:
eq = (cosmo1 == cosmo2)
else:
# Check parameter equivalence
# The options are: 1) same class & parameters; 2) same class, different
# parameters; 3) different classes, equivalent parameters; 4) different
# classes, different parameters. (1) & (3) => True, (2) & (4) => False.
eq = cosmo1.__equiv__(cosmo2)
if eq is NotImplemented:
eq = cosmo2.__equiv__(cosmo1) # that failed, try from 'other'
eq = False if eq is NotImplemented else eq
# TODO! include equality check of metadata
return eq
@_comparison_decorator
def _cosmology_not_equal(cosmo1: Any, cosmo2: Any, /, *, allow_equivalent: bool=False) -> bool:
r"""Return element-wise cosmology non-equality check.
.. note::
Cosmologies are currently scalar in their parameters.
Parameters
----------
cosmo1, cosmo2 : |Cosmology|-like
The objects to compare. Must be convertible to |Cosmology|, as specified
by ``format``.
out : ndarray, None, optional
A location into which the result is stored. If provided, it must have a
shape that the inputs broadcast to. If not provided or None, a
freshly-allocated array is returned.
format : bool or None or str or tuple thereof, optional keyword-only
Whether to allow the arguments to be converted to a |Cosmology|. This
allows, e.g. a |Table| to be given instead a Cosmology. `False`
(default) will not allow conversion. `True` or `None` will, and will use
the auto-identification to try to infer the correct format. A `str` is
assumed to be the correct format to use when converting. ``format`` is
broadcast to match the shape of the cosmology arguments. Note that the
cosmology arguments are not broadcast against ``format``, so it cannot
determine the output shape.
allow_equivalent : bool, optional keyword-only
Whether to allow cosmologies to be equal even if not of the same class.
For example, an instance of |LambdaCDM| might have :math:`\Omega_0=1`
and :math:`\Omega_k=0` and therefore be flat, like |FlatLambdaCDM|.
See Also
--------
astropy.cosmology.cosmology_equal
Element-wise equality check, with argument conversion to Cosmology.
"""
neq = not cosmology_equal(cosmo1, cosmo2, allow_equivalent=allow_equivalent)
# TODO! it might eventually be worth the speed boost to implement some of
# the internals of cosmology_equal here, but for now it's a hassle.
return neq
|
a2888b13f333ed5044381cd7b61cb6f913ef433a040d5b8a2fd22fba84e3a69d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions for `astropy.cosmology`."""
from .comparison import cosmology_equal
# _z_at_scalar_value is imported for backards compat
from .optimize import _z_at_scalar_value, z_at_value # F401, F403
__all__ = ["z_at_value", "cosmology_equal"]
|
1ec9a8baf80ea6d672b064d0f7d695c01aea4f960b6ffbb5a392661ca42cc735 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Convenience functions for `astropy.cosmology`.
"""
import warnings
import numpy as np
from astropy.cosmology import units as cu
from astropy.cosmology.core import CosmologyError
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['z_at_value']
__doctest_requires__ = {'*': ['scipy']}
def _z_at_scalar_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500,
method='Brent', bracket=None, verbose=False):
"""
Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.funcs.z_at_value`.
"""
from scipy.optimize import minimize_scalar
opt = {'maxiter': maxfun}
# Assume custom methods support the same options as default; otherwise user
# will see warnings.
if str(method).lower() == 'bounded':
opt['xatol'] = ztol
if bracket is not None:
warnings.warn(f"Option 'bracket' is ignored by method {method}.")
bracket = None
else:
opt['xtol'] = ztol
# fval falling inside the interval of bracketing function values does not
# guarantee it has a unique solution, but for Standard Cosmological
# quantities normally should (being monotonic or having a single extremum).
# In these cases keep solver from returning solutions outside of bracket.
fval_zmin, fval_zmax = func(zmin), func(zmax)
nobracket = False
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
if bracket is None:
nobracket = True
else:
fval_brac = func(np.asanyarray(bracket))
if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval):
nobracket = True
else:
zmin, zmax = bracket[0], bracket[-1]
fval_zmin, fval_zmax = fval_brac[[0, -1]]
if nobracket:
warnings.warn(f"fval is not bracketed by func(zmin)={fval_zmin} and "
f"func(zmax)={fval_zmax}. This means either there is no "
"solution, or that there is more than one solution "
"between zmin and zmax satisfying fval = func(z).",
AstropyUserWarning)
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
else:
val = fval
# 'Brent' and 'Golden' ignore `bounds`, force solution inside zlim
def f(z):
if z > zmax:
return 1.e300 * (1.0 + z - zmax)
elif z < zmin:
return 1.e300 * (1.0 + zmin - z)
elif isinstance(fval_zmin, Quantity):
return abs(func(z).value - val)
else:
return abs(func(z) - val)
res = minimize_scalar(f, method=method, bounds=(zmin, zmax),
bracket=bracket, options=opt)
# Scipy docs state that `OptimizeResult` always has 'status' and 'message'
# attributes, but only `_minimize_scalar_bounded()` seems to have really
# implemented them.
if not res.success:
warnings.warn(f"Solver returned {res.get('status')}: {res.get('message', 'Unsuccessful')}\n"
f"Precision {res.fun} reached after {res.nfev} function calls.",
AstropyUserWarning)
if verbose:
print(res)
if np.allclose(res.x, zmax):
raise CosmologyError(
f"Best guess z={res.x} is very close to the upper z limit {zmax}."
"\nTry re-running with a different zmax.")
elif np.allclose(res.x, zmin):
raise CosmologyError(
f"Best guess z={res.x} is very close to the lower z limit {zmin}."
"\nTry re-running with a different zmin.")
return res.x
def z_at_value(func, fval, zmin=1e-8, zmax=1000, ztol=1e-8, maxfun=500,
method='Brent', bracket=None, verbose=False):
"""Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : `~astropy.units.Quantity`
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
Returns
-------
z : `~astropy.units.Quantity` ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift>
"""
# `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer`
# so we strip it of units for broadcasting and restore the units when
# passing the elements to `_z_at_scalar_value`.
fval = np.asanyarray(fval)
unit = getattr(fval, 'unit', 1) # can be unitless
zmin = Quantity(zmin, cu.redshift).value # must be unitless
zmax = Quantity(zmax, cu.redshift).value
# bracket must be an object array (assumed to be correct) or a 'scalar'
# bracket: 2 or 3 elt sequence
if not isinstance(bracket, np.ndarray): # 'scalar' bracket
if bracket is not None and len(bracket) not in (2, 3):
raise ValueError("`bracket` is not an array "
"nor a 2 (or 3) element sequence.")
else: # munge bracket into a 1-elt object array
bracket = np.array([bracket, ()], dtype=object)[:1].squeeze()
if bracket.dtype != np.object_:
raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'")
# make multi-dimensional iterator for all but `method`, `verbose`
with np.nditer(
[fval, zmin, zmax, ztol, maxfun, bracket, None],
flags=['refs_ok'],
op_flags=[*[['readonly']] * 6, # ← inputs output ↓
['writeonly', 'allocate', 'no_subtype']],
op_dtypes=(*(None,)*6, fval.dtype),
casting="no",
) as it:
for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓
zs[...] = _z_at_scalar_value(func, fv * unit, zmin=zmn, zmax=zmx,
ztol=zt, maxfun=mfe, bracket=bkt.item(),
# not broadcasted
method=method, verbose=verbose)
# since bracket is an object array, the output will be too, so it is
# cast to the same type as the function value.
result = it.operands[-1] # zs
return result << cu.redshift
|
66a43380f014143d4c61cf15e5b09850e3adcf281318dbf6bd379f9d2bb5e3b4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Read/Write/Interchange methods for `astropy.cosmology`. **NOT public API**.
"""
# Import to register with the I/O machinery
from . import cosmology, ecsv, mapping, model, row, table, yaml # noqa: F401, F403
|
1aa927e45493f98ffd2e8f5b943a963253b4e30119b01b17e7c26cbe8d779178 | """Testing :mod:`astropy.cosmology.units`."""
##############################################################################
# IMPORTS
import pytest
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Planck13, default_cosmology
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_ASDF, HAS_SCIPY
from astropy.utils.exceptions import AstropyDeprecationWarning
##############################################################################
# TESTS
##############################################################################
def test_has_expected_units():
"""
Test that this module has the expected set of units. Some of the units are
imported from :mod:`astropy.units`, or vice versa. Here we test presence,
not usage. Units from :mod:`astropy.units` are tested in that module. Units
defined in :mod:`astropy.cosmology` will be tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`littleh`"):
assert u.astrophys.littleh is cu.littleh
def test_has_expected_equivalencies():
"""
Test that this module has the expected set of equivalencies. Many of the
equivalencies are imported from :mod:`astropy.units`, so here we test
presence, not usage. Equivalencies from :mod:`astropy.units` are tested in
that module. Equivalencies defined in :mod:`astropy.cosmology` will be
tested subsequently.
"""
with pytest.warns(AstropyDeprecationWarning, match="`with_H0`"):
assert u.equivalencies.with_H0 is cu.with_H0
def test_littleh():
"""Test :func:`astropy.cosmology.units.with_H0`."""
H0_70 = 70 * u.km / u.s / u.Mpc
h70dist = 70 * u.Mpc / cu.littleh
assert_quantity_allclose(h70dist.to(u.Mpc, cu.with_H0(H0_70)), 100 * u.Mpc)
# make sure using the default cosmology works
cosmodist = default_cosmology.get().H0.value * u.Mpc / cu.littleh
assert_quantity_allclose(cosmodist.to(u.Mpc, cu.with_H0()), 100 * u.Mpc)
# Now try a luminosity scaling
h1lum = 0.49 * u.Lsun * cu.littleh ** -2
assert_quantity_allclose(h1lum.to(u.Lsun, cu.with_H0(H0_70)), 1 * u.Lsun)
# And the trickiest one: magnitudes. Using H0=10 here for the round numbers
H0_10 = 10 * u.km / u.s / u.Mpc
# assume the "true" magnitude M = 12.
# Then M - 5*log_10(h) = M + 5 = 17
withlittlehmag = 17 * (u.mag - u.MagUnit(cu.littleh ** 2))
assert_quantity_allclose(withlittlehmag.to(u.mag, cu.with_H0(H0_10)), 12 * u.mag)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_dimensionless_redshift():
"""Test :func:`astropy.cosmology.units.dimensionless_redshift`."""
z = 3 * cu.redshift
val = 3 * u.one
# show units not equal
assert z.unit == cu.redshift
assert z.unit != u.one
# test equivalency enabled by default
assert z == val
# also test that it works for powers
assert (3 * cu.redshift ** 3) == val
# and in composite units
assert (3 * u.km / cu.redshift ** 3) == 3 * u.km
# test it also works as an equivalency
with u.set_enabled_equivalencies([]): # turn off default equivalencies
assert z.to(u.one, equivalencies=cu.dimensionless_redshift()) == val
with pytest.raises(ValueError):
z.to(u.one)
# if this fails, something is really wrong
with u.add_enabled_equivalencies(cu.dimensionless_redshift()):
assert z == val
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_temperature():
"""Test :func:`astropy.cosmology.units.redshift_temperature`."""
cosmo = Planck13.clone(Tcmb0=3 * u.K)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_temperature()
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.redshift_temperature(cosmo)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_temperature(cosmo, ztol=1e-10)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
def test_redshift_hubble():
"""Test :func:`astropy.cosmology.units.redshift_hubble`."""
unit = u.km / u.s / u.Mpc
cosmo = Planck13.clone(H0=100 * unit)
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km/u.s/u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.redshift_hubble()
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.redshift_hubble()
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.redshift_hubble(cosmo)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
equivalency = cu.redshift_hubble(cosmo, ztol=1e-10)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # little-h
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
@pytest.mark.parametrize(
"kind",
[cu.redshift_distance.__defaults__[-1], "comoving", "lookback", "luminosity"]
)
def test_redshift_distance(kind):
"""Test :func:`astropy.cosmology.units.redshift_distance`."""
z = 15 * cu.redshift
d = getattr(Planck13, kind + "_distance")(z)
equivalency = cu.redshift_distance(cosmology=Planck13, kind=kind)
# properties of Equivalency
assert equivalency.name[0] == "redshift_distance"
assert equivalency.kwargs[0]["cosmology"] == Planck13
assert equivalency.kwargs[0]["distance"] == kind
# roundtrip
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_redshift_distance_wrong_kind():
"""Test :func:`astropy.cosmology.units.redshift_distance` wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.redshift_distance(kind=None)
@pytest.mark.skipif(not HAS_SCIPY, reason="Cosmology needs scipy")
class Test_with_redshift:
"""Test `astropy.cosmology.units.with_redshift`."""
@pytest.fixture(scope="class")
def cosmo(self):
"""Test cosmology."""
return Planck13.clone(Tcmb0=3 * u.K)
# ===========================================
def test_cosmo_different(self, cosmo):
"""The default is different than the test cosmology."""
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
def test_no_equivalency(self, cosmo):
"""Test the equivalency ``with_redshift`` without any enabled."""
equivalency = cu.with_redshift(distance=None, hubble=False, Tcmb=False)
assert len(equivalency) == 0
# -------------------------------------------
def test_temperature_off(self, cosmo):
"""Test ``with_redshift`` with the temperature off."""
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'K'"):
z.to(u.K, equivalency)
def test_temperature(self, cosmo):
"""Test temperature equivalency component."""
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
Tcmb = cosmo.Tcmb(z)
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), default_cosmo.Tcmb(z))
assert default_cosmo.Tcmb(z) != Tcmb
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, Tcmb=True)
assert_quantity_allclose(z.to(u.K, equivalency), Tcmb)
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, Tcmb=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(Tcmb.to(cu.redshift, equivalency), z)
# -------------------------------------------
def test_hubble_off(self, cosmo):
"""Test ``with_redshift`` with Hubble off."""
unit = u.km / u.s / u.Mpc
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=False)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'km / "):
z.to(unit, equivalency)
def test_hubble(self, cosmo):
"""Test Hubble equivalency component."""
unit = u.km/u.s/u.Mpc
default_cosmo = default_cosmology.get()
z = 15 * cu.redshift
H = cosmo.H(z)
h = H.to_value(u.km / u.s / u.Mpc) / 100 * cu.littleh
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(hubble=True)
assert_quantity_allclose(z.to(unit, equivalency), default_cosmo.H(z))
assert default_cosmo.H(z) != H
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, hubble=True)
# H
assert_quantity_allclose(z.to(unit, equivalency), H)
assert_quantity_allclose(H.to(cu.redshift, equivalency), z)
# little-h
assert_quantity_allclose(z.to(cu.littleh, equivalency), h)
assert_quantity_allclose(h.to(cu.redshift, equivalency), z)
# Test `atzkw`
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, hubble=True, atzkw={"ztol": 1e-10})
assert_quantity_allclose(H.to(cu.redshift, equivalency), z) # H
assert_quantity_allclose(h.to(cu.redshift, equivalency), z) # h
# -------------------------------------------
def test_distance_off(self, cosmo):
"""Test ``with_redshift`` with the distance off."""
z = 15 * cu.redshift
# 1) Default (without specifying the cosmology)
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=None)
with pytest.raises(u.UnitConversionError, match="'redshift' and 'Mpc'"):
z.to(u.Mpc, equivalency)
def test_distance_default(self):
"""Test distance equivalency default."""
z = 15 * cu.redshift
d = default_cosmology.get().comoving_distance(z)
equivalency = cu.with_redshift()
assert_quantity_allclose(z.to(u.Mpc, equivalency), d)
assert_quantity_allclose(d.to(cu.redshift, equivalency), z)
def test_distance_wrong_kind(self):
"""Test distance equivalency, but the wrong kind."""
with pytest.raises(ValueError, match="`kind`"):
cu.with_redshift(distance=ValueError)
@pytest.mark.parametrize("kind", ["comoving", "lookback", "luminosity"])
def test_distance(self, kind):
"""Test distance equivalency."""
cosmo = Planck13
z = 15 * cu.redshift
dist = getattr(cosmo, kind + "_distance")(z)
default_cosmo = default_cosmology.get()
assert default_cosmo != cosmo # shows changing default
# 1) without specifying the cosmology
with default_cosmology.set(cosmo):
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
# showing the answer changes if the cosmology changes
# this test uses the default cosmology
equivalency = cu.with_redshift(distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency),
getattr(default_cosmo, kind + "_distance")(z))
assert not u.allclose(getattr(default_cosmo, kind + "_distance")(z), dist)
# 2) Specifying the cosmology
equivalency = cu.with_redshift(cosmo, distance=kind)
assert_quantity_allclose(z.to(u.Mpc, equivalency), dist)
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# Test atzkw
# this is really just a test that 'atzkw' doesn't fail
equivalency = cu.with_redshift(cosmo, distance=kind, atzkw={"ztol": 1e-10})
assert_quantity_allclose(dist.to(cu.redshift, equivalency), z)
# FIXME! get "dimensionless_redshift", "with_redshift" to work in this
# they are not in ``astropy.units.equivalencies``, so the following fails
@pytest.mark.skipif(not HAS_ASDF, reason="requires ASDF")
@pytest.mark.parametrize("equiv", [cu.with_H0])
def test_equivalencies_asdf(tmpdir, equiv, recwarn):
from asdf.tests import helpers
tree = {"equiv": equiv()}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_equivalency_context_manager():
base_registry = u.get_current_unit_registry()
# check starting with only the dimensionless_redshift equivalency.
assert len(base_registry.equivalencies) == 1
assert str(base_registry.equivalencies[0][0]) == "redshift"
|
ff345764bedc36769f530f1108d9c5088a04b46d2c4fe0965ea9e7576ecb1036 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.core`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import inspect
import pickle
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy.cosmology import Cosmology, CosmologyError, FlatCosmologyMixin
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.parameter import Parameter
from astropy.table import Column, QTable, Table
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.metadata import MetaData
from .test_connect import ReadWriteTestMixin, ToFromFormatTestMixin
from .test_parameter import ParameterTestMixin
##############################################################################
# SETUP / TEARDOWN
scalar_zs = [
0, 1, 1100, # interesting times
# FIXME! np.inf breaks some funcs. 0 * inf is an error
np.float64(3300), # different type
2 * cu.redshift, 3 * u.one # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
_zarr, # numpy
_zarr.tolist(), # pure python
Column(_zarr), # table-like
_zarr * cu.redshift # Quantity
]
valid_zs = scalar_zs + array_zs
invalid_zs = [
(None, TypeError), # wrong type
# Wrong units (the TypeError is for the cython, which can differ)
(4 * u.MeV, (u.UnitConversionError, TypeError)), # scalar
([0, 1] * u.m, (u.UnitConversionError, TypeError)), # array
]
class SubCosmology(Cosmology):
"""Defined here to be serializable."""
H0 = Parameter(unit="km/(s Mpc)")
Tcmb0 = Parameter(unit=u.K)
m_nu = Parameter(unit=u.eV)
def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
self.H0 = H0
self.Tcmb0 = Tcmb0
self.m_nu = m_nu
@property
def is_flat(self):
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
class MetaTestMixin:
"""Tests for a :class:`astropy.utils.metadata.MetaData` on a Cosmology."""
def test_meta_on_class(self, cosmo_cls):
assert isinstance(cosmo_cls.meta, MetaData)
def test_meta_on_instance(self, cosmo):
assert isinstance(cosmo.meta, dict) # test type
# value set at initialization
assert cosmo.meta == self.cls_kwargs.get("meta", {})
def test_meta_mutable(self, cosmo):
"""The metadata is NOT immutable on a cosmology"""
key = tuple(cosmo.meta.keys())[0] # select some key
cosmo.meta[key] = cosmo.meta.pop(key) # will error if immutable
class TestCosmology(ParameterTestMixin, MetaTestMixin,
ReadWriteTestMixin, ToFromFormatTestMixin,
metaclass=abc.ABCMeta):
"""Test :class:`astropy.cosmology.Cosmology`.
Subclasses should define tests for:
- ``test_clone_change_param()``
- ``test_repr()``
"""
def setup_class(self):
"""
Setup for testing.
Cosmology should not be instantiated, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubCosmology"] = SubCosmology
self.cls = SubCosmology
self._cls_args = dict(H0=70 * (u.km / u.s / u.Mpc), Tcmb0=2.7 * u.K, m_nu=0.6 * u.eV)
self.cls_kwargs = dict(name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
_COSMOLOGY_CLASSES.pop("SubCosmology", None)
@property
def cls_args(self):
return tuple(self._cls_args.values())
@pytest.fixture(scope="class")
def cosmo_cls(self):
"""The Cosmology class as a :func:`pytest.fixture`."""
return self.cls
@pytest.fixture(scope="function") # ensure not cached.
def ba(self):
"""Return filled `inspect.BoundArguments` for cosmology."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return ba
@pytest.fixture(scope="class")
def cosmo(self, cosmo_cls):
"""The cosmology instance with which to test."""
ba = self.cls._init_signature.bind(*self.cls_args, **self.cls_kwargs)
ba.apply_defaults()
return cosmo_cls(*ba.args, **ba.kwargs)
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test creating subclasses registers classes and manages Parameters."""
class InitSubclassTest(cosmo_cls):
pass
# test parameters
assert InitSubclassTest.__parameters__ == cosmo_cls.__parameters__
# test and cleanup registry
registrant = _COSMOLOGY_CLASSES.pop(InitSubclassTest.__qualname__)
assert registrant is InitSubclassTest
def test_init_signature(self, cosmo_cls, cosmo):
"""Test class-property ``_init_signature``."""
# test presence
assert hasattr(cosmo_cls, "_init_signature")
assert hasattr(cosmo, "_init_signature")
# test internal consistency, so following tests can use either cls or instance.
assert cosmo_cls._init_signature == cosmo._init_signature
# test matches __init__, but without 'self'
sig = inspect.signature(cosmo.__init__) # (instances don't have self)
assert set(sig.parameters.keys()) == set(cosmo._init_signature.parameters.keys())
assert all(np.all(sig.parameters[k].default == p.default) for k, p in
cosmo._init_signature.parameters.items())
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
"""Test initialization."""
# Cosmology only does name and meta, but this subclass adds H0 & Tcmb0.
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta={"m": 1})
assert cosmo.name == "test_init"
assert cosmo.meta["m"] == 1
# if meta is None, it is changed to a dict
cosmo = cosmo_cls(*self.cls_args, name="test_init", meta=None)
assert cosmo.meta == {}
def test_name(self, cosmo):
"""Test property ``name``."""
assert cosmo.name is cosmo._name # accesses private attribute
assert cosmo.name is None or isinstance(cosmo.name, str) # type
assert cosmo.name == self.cls_kwargs["name"] # test has expected value
# immutable
with pytest.raises(AttributeError, match="can't set"):
cosmo.name = None
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``. It's an ABC."""
with pytest.raises(NotImplementedError, match="is_flat is not implemented"):
cosmo.is_flat
# ------------------------------------------------
# clone
def test_clone_identical(self, cosmo):
"""Test method ``.clone()`` if no (kw)args."""
assert cosmo.clone() is cosmo
def test_clone_name(self, cosmo):
"""Test method ``.clone()`` name argument."""
# test changing name. clone treats 'name' differently (see next test)
c = cosmo.clone(name="cloned cosmo")
assert c.name == "cloned cosmo" # changed
# show name is the only thing changed
c._name = cosmo.name # first change name back
assert c == cosmo
assert c.meta == cosmo.meta
# now change a different parameter and see how 'name' changes
c = cosmo.clone(meta={"test_clone_name": True})
assert c.name == cosmo.name + " (modified)"
def test_clone_meta(self, cosmo):
"""Test method ``.clone()`` meta argument: updates meta, doesn't clear."""
# start with no change
c = cosmo.clone(meta=None)
assert c.meta == cosmo.meta
# add something
c = cosmo.clone(meta=dict(test_clone_meta=True))
assert c.meta["test_clone_meta"] is True
c.meta.pop("test_clone_meta") # remove from meta
assert c.meta == cosmo.meta # now they match
def test_clone_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s).
Nothing here b/c no Parameters.
"""
def test_clone_fail_unexpected_arg(self, cosmo):
"""Test when ``.clone()`` gets an unexpected argument."""
with pytest.raises(TypeError, match="unexpected keyword argument"):
cosmo.clone(not_an_arg=4)
def test_clone_fail_positional_arg(self, cosmo):
with pytest.raises(TypeError, match="1 positional argument"):
cosmo.clone(None)
# ---------------------------------------------------------------
# comparison methods
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`."""
# to self
assert cosmo.is_equivalent(cosmo)
# same class, different instance
newclone = cosmo.clone(name="test_is_equivalent")
assert cosmo.is_equivalent(newclone)
assert newclone.is_equivalent(cosmo)
# different class and not convertible to Cosmology.
assert not cosmo.is_equivalent(2)
def test_equality(self, cosmo):
"""Test method ``.__eq__()."""
# wrong class
assert (cosmo != 2) and (2 != cosmo)
# correct
assert cosmo == cosmo
# different name <= not equal, but equivalent
newcosmo = cosmo.clone(name="test_equality")
assert (cosmo != newcosmo) and (newcosmo != cosmo)
assert cosmo.__equiv__(newcosmo) and newcosmo.__equiv__(cosmo)
# ---------------------------------------------------------------
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``.
This is a very general test and it is probably good to have a
hard-coded comparison.
"""
r = repr(cosmo)
# class in string rep
assert cosmo_cls.__qualname__ in r
assert r.index(cosmo_cls.__qualname__) == 0 # it's the first thing
r = r[len(cosmo_cls.__qualname__) + 1:] # remove
# name in string rep
if cosmo.name is not None:
assert f"name=\"{cosmo.name}\"" in r
assert r.index("name=") == 0
r = r[6 + len(cosmo.name) + 3:] # remove
# parameters in string rep
ps = {k: getattr(cosmo, k) for k in cosmo.__parameters__}
cps = {k: getattr(cosmo_cls, k) for k in cosmo.__parameters__}
for k, v in ps.items():
sv = f"{k}={v}"
assert sv in r
assert r.index(k) == 0
r = r[len(sv) + 2:] # remove
# ------------------------------------------------
@pytest.mark.parametrize("in_meta", [True, False])
@pytest.mark.parametrize("table_cls", [Table, QTable])
def test_astropy_table(self, cosmo, table_cls, in_meta):
"""Test ``astropy.table.Table(cosmology)``."""
tbl = table_cls(cosmo, cosmology_in_meta=in_meta)
assert isinstance(tbl, table_cls)
# the name & all parameters are columns
for n in ("name", *cosmo.__parameters__):
assert n in tbl.colnames
assert np.all(tbl[n] == getattr(cosmo, n))
# check if Cosmology is in metadata or a column
if in_meta:
assert tbl.meta["cosmology"] == cosmo.__class__.__qualname__
assert "cosmology" not in tbl.colnames
else:
assert "cosmology" not in tbl.meta
assert tbl["cosmology"][0] == cosmo.__class__.__qualname__
# the metadata is transferred
for k, v in cosmo.meta.items():
assert np.all(tbl.meta[k] == v)
# ===============================================================
# Usage Tests
def test_immutability(self, cosmo):
"""
Test immutability of cosmologies.
The metadata is mutable: see ``test_meta_mutable``.
"""
for n in cosmo.__all_parameters__:
with pytest.raises(AttributeError):
setattr(cosmo, n, getattr(cosmo, n))
def test_pickle_class(self, cosmo_cls, pickle_protocol):
"""Test classes can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo_cls, protocol=pickle_protocol)
unpickled = pickle.loads(f)
# test equality
assert unpickled == cosmo_cls
def test_pickle_instance(self, cosmo, pickle_protocol):
"""Test instances can pickle and unpickle."""
# pickle and unpickle
f = pickle.dumps(cosmo, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == cosmo
assert unpickled.meta == cosmo.meta
class CosmologySubclassTest(TestCosmology):
"""
Test subclasses of :class:`astropy.cosmology.Cosmology`.
This is broken away from ``TestCosmology``, because |Cosmology| is/will be
an ABC and subclasses must override some methods.
"""
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# instance-level
@abc.abstractmethod
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# -----------------------------------------------------------------------------
class FlatCosmologyMixinTest:
"""Tests for :class:`astropy.cosmology.core.FlatCosmologyMixin` subclasses.
The test suite structure mirrors the implementation of the tested code.
Just like :class:`astropy.cosmology.FlatCosmologyMixin` is an abstract
base class (ABC) that cannot be used by itself, so too is this corresponding
test class an ABC mixin.
E.g to use this class::
class TestFlatSomeCosmology(FlatCosmologyMixinTest, TestSomeCosmology):
...
"""
def test_nonflat_class_(self, cosmo_cls, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat_cls`.
"""
# Test it's a method on the class
assert issubclass(cosmo_cls, cosmo_cls.__nonflatclass__)
# It also works from the instance. # TODO! as a "metaclassmethod"
assert issubclass(cosmo_cls, cosmo.__nonflatclass__)
# Maybe not the most robust test, but so far all Flat classes have the
# name of their parent class.
assert cosmo.__nonflatclass__.__name__ in cosmo_cls.__name__
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
super().test_is_flat(cosmo_cls, cosmo)
# it's always True
assert cosmo.is_flat is True
def test_nonflat(self, cosmo):
"""Test :attr:`astropy.cosmology.core.FlatCosmologyMixin.nonflat`.
"""
assert cosmo.nonflat.is_equivalent(cosmo)
assert cosmo.is_equivalent(cosmo.nonflat)
# ------------------------------------------------
# clone
def test_clone_to_nonflat_equivalent(self, cosmo):
"""Test method ``.clone()``to_nonflat argument."""
# just converting the class
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
@abc.abstractmethod
def test_clone_to_nonflat_change_param(self, cosmo):
"""
Test method ``.clone()`` changing a(many) Parameter(s). No parameters
are changed here because FlatCosmologyMixin has no Parameters.
See class docstring for why this test method exists.
"""
# send to non-flat
nc = cosmo.clone(to_nonflat=True)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
# ------------------------------------------------
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.is_equivalent`.
Normally this would pass up via super(), but ``__equiv__`` is meant
to be overridden, so we skip super().
e.g. FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestCosmology
vs FlatFLRWMixinTest -> FlatCosmologyMixinTest -> TestFLRW -> TestCosmology
"""
CosmologySubclassTest.test_is_equivalent(self, cosmo)
# See FlatFLRWMixinTest for tests. It's a bit hard here since this class
# is for an ABC.
# ===============================================================
# Usage Tests
def test_subclassing(self, cosmo_cls):
"""Test when subclassing a flat cosmology."""
class SubClass1(cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass1.__nonflatclass__ is cosmo_cls.__nonflatclass__
# A more complex example is when Mixin classes are used.
class Mixin:
pass
class SubClass2(Mixin, cosmo_cls):
pass
# The classes have the same non-flat parent class
assert SubClass2.__nonflatclass__ is cosmo_cls.__nonflatclass__
# The order of the Mixin should not matter
class SubClass3(cosmo_cls, Mixin):
pass
# The classes have the same non-flat parent class
assert SubClass3.__nonflatclass__ is cosmo_cls.__nonflatclass__
def test__nonflatclass__multiple_nonflat_inheritance():
"""
Test :meth:`astropy.cosmology.core.FlatCosmologyMixin.__nonflatclass__`
when there's more than one non-flat class in the inheritance.
"""
# Define a non-operable minimal subclass of Cosmology.
class SubCosmology2(Cosmology):
def __init__(self, H0, Tcmb0=0*u.K, m_nu=0*u.eV, name=None, meta=None):
super().__init__(name=name, meta=meta)
@property
def is_flat(self):
return False
# Now make an ambiguous flat cosmology from the two SubCosmologies
with pytest.raises(TypeError, match="cannot create a consistent non-flat class"):
class FlatSubCosmology(FlatCosmologyMixin, SubCosmology, SubCosmology2):
@property
def nonflat(self):
pass
# -----------------------------------------------------------------------------
def test_flrw_moved_deprecation():
"""Test the deprecation warning about the move of FLRW classes."""
from astropy.cosmology import flrw
# it's deprecated to import `flrw/*` from `core.py`
with pytest.warns(AstropyDeprecationWarning):
from astropy.cosmology.core import FLRW
# but they are the same object
assert FLRW is flrw.FLRW
|
24dd945ab80d8a31c813f4233265ad474afd49526f0b535448cb0952a4235e9f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import pickle
# THIRD PARTY
import pytest
# LOCAL
import astropy.cosmology.units as cu
import astropy.units as u
from astropy import cosmology
from astropy.cosmology import parameters, realizations
from astropy.cosmology.realizations import Planck13, default_cosmology
def test_realizations_in_toplevel_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology`."""
d = dir(cosmology)
assert set(d) == set(cosmology.__all__)
for n in parameters.available:
assert n in d
def test_realizations_in_realizations_dir():
"""Test the realizations are in ``dir`` of :mod:`astropy.cosmology.realizations`."""
d = dir(realizations)
assert set(d) == set(realizations.__all__)
for n in parameters.available:
assert n in d
class Test_default_cosmology:
"""Tests for :class:`~astropy.cosmology.realizations.default_cosmology`."""
# -----------------------------------------------------
# Get
def test_get_current(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` current value."""
cosmo = default_cosmology.get()
assert cosmo is default_cosmology.validate(default_cosmology._value)
# -----------------------------------------------------
# get_cosmology_from_string (deprecated)
def test_get_cosmology_from_string(self, recwarn):
"""Test method ``get_cosmology_from_string``."""
cosmo = default_cosmology.get_cosmology_from_string("no_default")
assert cosmo is None
cosmo = default_cosmology.get_cosmology_from_string("Planck13")
assert cosmo is Planck13
with pytest.raises(ValueError):
cosmo = default_cosmology.get_cosmology_from_string("fail!")
# -----------------------------------------------------
# Validate
def test_validate_fail(self):
"""Test :meth:`astropy.cosmology.default_cosmology.validate`."""
# bad input type
with pytest.raises(TypeError, match="must be a string or Cosmology"):
default_cosmology.validate(TypeError)
# a not-valid option, but still a str
with pytest.raises(ValueError, match="Unknown cosmology"):
default_cosmology.validate("fail!")
# a not-valid type
with pytest.raises(TypeError, match="cannot find a Cosmology"):
default_cosmology.validate("available")
def test_validate_default(self):
"""Test method ``validate`` for specific values."""
value = default_cosmology.validate(None)
assert value is realizations.Planck18
@pytest.mark.parametrize("name", parameters.available)
def test_validate_str(self, name):
"""Test method ``validate`` for string input."""
value = default_cosmology.validate(name)
assert value is getattr(realizations, name)
@pytest.mark.parametrize("name", parameters.available)
def test_validate_cosmo(self, name):
"""Test method ``validate`` for cosmology instance input."""
cosmo = getattr(realizations, name)
value = default_cosmology.validate(cosmo)
assert value is cosmo
def test_validate_no_default(self):
"""Test :meth:`astropy.cosmology.default_cosmology.get` to `None`."""
cosmo = default_cosmology.validate("no_default")
assert cosmo is None
@pytest.mark.parametrize("name", parameters.available)
def test_pickle_builtin_realizations(name, pickle_protocol):
"""
Test in-built realizations can pickle and unpickle.
Also a regression test for #12008.
"""
# get class instance
original = getattr(cosmology, name)
# pickle and unpickle
f = pickle.dumps(original, protocol=pickle_protocol)
with u.add_enabled_units(cu):
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta == original.meta
# if the units are not enabled, it isn't equal because redshift units
# are not equal. This is a weird, known issue.
unpickled = pickle.loads(f)
assert unpickled == original
assert unpickled.meta != original.meta
|
018eb4f855cd639bc1888745b53890219c6a40b91e3700db3cf6effccc5c8593 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import annotations
import warnings
from abc import abstractmethod
from math import exp, floor, log, pi, sqrt
from numbers import Number
from typing import Any, Mapping, TypeVar
import numpy as np
from numpy import inf, sin
import astropy.constants as const
import astropy.units as u
from astropy.cosmology.core import Cosmology, FlatCosmologyMixin
from astropy.cosmology.parameter import Parameter, _validate_non_negative, _validate_with_unit
from astropy.cosmology.utils import aszarr, vectorize_redshift_method
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyUserWarning
# isort: split
if HAS_SCIPY:
from scipy.integrate import quad
else:
def quad(*args, **kwargs):
raise ModuleNotFoundError("No module named 'scipy.integrate'")
__all__ = ["FLRW", "FlatFLRWMixin"]
__doctest_requires__ = {'*': ['scipy']}
##############################################################################
# Parameters
# Some conversion constants -- useful to compute them once here and reuse in
# the initialization rather than have every object do them.
_H0units_to_invs = (u.km / (u.s * u.Mpc)).to(1.0 / u.s)
_sec_to_Gyr = u.s.to(u.Gyr)
# const in critical density in cgs units (g cm^-3)
_critdens_const = (3 / (8 * pi * const.G)).cgs.value
# angle conversions
_radian_in_arcsec = (1 * u.rad).to(u.arcsec)
_radian_in_arcmin = (1 * u.rad).to(u.arcmin)
# Radiation parameter over c^2 in cgs (g cm^-3 K^-4)
_a_B_c2 = (4 * const.sigma_sb / const.c ** 3).cgs.value
# Boltzmann constant in eV / K
_kB_evK = const.k_B.to(u.eV / u.K)
# typing
_FLRWT = TypeVar("_FLRWT", bound="FLRW")
_FlatFLRWMixinT = TypeVar("_FlatFLRWMixinT", bound="FlatFLRWMixin")
##############################################################################
class FLRW(Cosmology):
"""
A class describing an isotropic and homogeneous
(Friedmann-Lemaitre-Robertson-Walker) cosmology.
This is an abstract base class -- you cannot instantiate examples of this
class, but must work with one of its subclasses, such as
:class:`~astropy.cosmology.LambdaCDM` or :class:`~astropy.cosmology.wCDM`.
Parameters
----------
H0 : float or scalar quantity-like ['frequency']
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc].
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0. Note that this does not include massive
neutrinos.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
Tcmb0 : float or scalar quantity-like ['temperature'], optional
Temperature of the CMB z=0. If a float, must be in [K]. Default: 0 [K].
Setting this to zero will turn off both photons and neutrinos
(even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : quantity-like ['energy', 'mass'] or array-like, optional
Mass of each neutrino species in [eV] (mass-energy equivalency enabled).
If this is a scalar Quantity, then all neutrino species are assumed to
have that mass. Otherwise, the mass of each species. The actual number
of neutrino species (and hence the number of elements of m_nu if it is
not scalar) must be the floor of Neff. Typically this means you should
provide three neutrino masses unless you are considering something like
a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any computation
that requires its value will raise an exception.
name : str or None (optional, keyword-only)
Name for this cosmological object.
meta : mapping or None (optional, keyword-only)
Metadata for the cosmology, e.g., a reference.
Notes
-----
Class instances are immutable -- you cannot change the parameters' values.
That is, all of the above attributes (except meta) are read only.
For details on how to create performant custom subclasses, see the
documentation on :ref:`astropy-cosmology-fast-integrals`.
"""
H0 = Parameter(doc="Hubble constant as an `~astropy.units.Quantity` at z=0.",
unit="km/(s Mpc)", fvalidate="scalar")
Om0 = Parameter(doc="Omega matter; matter density/critical density at z=0.",
fvalidate="non-negative")
Ode0 = Parameter(doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float")
Tcmb0 = Parameter(doc="Temperature of the CMB as `~astropy.units.Quantity` at z=0.",
unit="Kelvin", fvalidate="scalar")
Neff = Parameter(doc="Number of effective neutrino species.", fvalidate="non-negative")
m_nu = Parameter(doc="Mass of neutrino species.",
unit="eV", equivalencies=u.mass_energy())
Ob0 = Parameter(doc="Omega baryon; baryonic matter density/critical density at z=0.")
def __init__(self, H0, Om0, Ode0, Tcmb0=0.0*u.K, Neff=3.04, m_nu=0.0*u.eV,
Ob0=None, *, name=None, meta=None):
super().__init__(name=name, meta=meta)
# Assign (and validate) Parameters
self.H0 = H0
self.Om0 = Om0
self.Ode0 = Ode0
self.Tcmb0 = Tcmb0
self.Neff = Neff
self.m_nu = m_nu # (reset later, this is just for unit validation)
self.Ob0 = Ob0 # (must be after Om0)
# Derived quantities:
# Dark matter density; matter - baryons, if latter is not None.
self._Odm0 = None if Ob0 is None else (self._Om0 - self._Ob0)
# 100 km/s/Mpc * h = H0 (so h is dimensionless)
self._h = self._H0.value / 100.0
# Hubble distance
self._hubble_distance = (const.c / self._H0).to(u.Mpc)
# H0 in s^-1
H0_s = self._H0.value * _H0units_to_invs
# Hubble time
self._hubble_time = (_sec_to_Gyr / H0_s) << u.Gyr
# Critical density at z=0 (grams per cubic cm)
cd0value = _critdens_const * H0_s ** 2
self._critical_density0 = cd0value << u.g / u.cm ** 3
# Compute photon density from Tcmb
self._Ogamma0 = _a_B_c2 * self._Tcmb0.value ** 4 / self._critical_density0.value
# Compute Neutrino temperature:
# The constant in front is (4/11)^1/3 -- see any cosmology book for an
# explanation -- for example, Weinberg 'Cosmology' p 154 eq (3.1.21).
self._Tnu0 = 0.7137658555036082 * self._Tcmb0
# Compute neutrino parameters:
if self._m_nu is None:
self._nneutrinos = 0
self._neff_per_nu = None
self._massivenu = False
self._massivenu_mass = None
self._nmassivenu = self._nmasslessnu = None
else:
self._nneutrinos = floor(self._Neff)
# We are going to share Neff between the neutrinos equally. In
# detail this is not correct, but it is a standard assumption
# because properly calculating it is a) complicated b) depends on
# the details of the massive neutrinos (e.g., their weak
# interactions, which could be unusual if one is considering
# sterile neutrinos).
self._neff_per_nu = self._Neff / self._nneutrinos
# Now figure out if we have massive neutrinos to deal with, and if
# so, get the right number of masses. It is worth keeping track of
# massless ones separately (since they are easy to deal with, and a
# common use case is to have only one massive neutrino).
massive = np.nonzero(self._m_nu.value > 0)[0]
self._massivenu = massive.size > 0
self._nmassivenu = len(massive)
self._massivenu_mass = self._m_nu[massive].value if self._massivenu else None
self._nmasslessnu = self._nneutrinos - self._nmassivenu
# Compute Neutrino Omega and total relativistic component for massive
# neutrinos. We also store a list version, since that is more efficient
# to do integrals with (perhaps surprisingly! But small python lists
# are more efficient than small NumPy arrays).
if self._massivenu: # (`_massivenu` set in `m_nu`)
nu_y = self._massivenu_mass / (_kB_evK * self._Tnu0)
self._nu_y = nu_y.value
self._nu_y_list = self._nu_y.tolist()
self._Onu0 = self._Ogamma0 * self.nu_relative_density(0)
else:
# This case is particularly simple, so do it directly The 0.2271...
# is 7/8 (4/11)^(4/3) -- the temperature bit ^4 (blackbody energy
# density) times 7/8 for FD vs. BE statistics.
self._Onu0 = 0.22710731766 * self._Neff * self._Ogamma0
self._nu_y = self._nu_y_list = None
# Compute curvature density
self._Ok0 = 1.0 - self._Om0 - self._Ode0 - self._Ogamma0 - self._Onu0
# Subclasses should override this reference if they provide
# more efficient scalar versions of inv_efunc.
self._inv_efunc_scalar = self.inv_efunc
self._inv_efunc_scalar_args = ()
# ---------------------------------------------------------------
# Parameter details
@Ob0.validator
def Ob0(self, param, value):
"""Validate baryon density to None or positive float > matter density."""
if value is None:
return value
value = _validate_non_negative(self, param, value)
if value > self.Om0:
raise ValueError("baryonic density can not be larger than total matter density.")
return value
@m_nu.validator
def m_nu(self, param, value):
"""Validate neutrino masses to right value, units, and shape.
There are no neutrinos if floor(Neff) or Tcmb0 are 0.
The number of neutrinos must match floor(Neff).
Neutrino masses cannot be negative.
"""
# Check if there are any neutrinos
if (nneutrinos := floor(self._Neff)) == 0 or self._Tcmb0.value == 0:
return None # None, regardless of input
# Validate / set units
value = _validate_with_unit(self, param, value)
# Check values and data shapes
if value.shape not in ((), (nneutrinos,)):
raise ValueError("unexpected number of neutrino masses — "
f"expected {nneutrinos}, got {len(value)}.")
elif np.any(value.value < 0):
raise ValueError("invalid (negative) neutrino mass encountered.")
# scalar -> array
if value.isscalar:
value = np.full_like(value, value, shape=nneutrinos)
return value
# ---------------------------------------------------------------
# properties
@property
def is_flat(self):
"""Return bool; `True` if the cosmology is flat."""
return bool((self._Ok0 == 0.0) and (self.Otot0 == 1.0))
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return self._Om0 + self._Ogamma0 + self._Onu0 + self._Ode0 + self._Ok0
@property
def Odm0(self):
"""Omega dark matter; dark matter density/critical density at z=0."""
return self._Odm0
@property
def Ok0(self):
"""Omega curvature; the effective curvature density/critical density at z=0."""
return self._Ok0
@property
def Tnu0(self):
"""Temperature of the neutrino background as `~astropy.units.Quantity` at z=0."""
return self._Tnu0
@property
def has_massive_nu(self):
"""Does this cosmology have at least one massive neutrino species?"""
if self._Tnu0.value == 0:
return False
return self._massivenu
@property
def h(self):
"""Dimensionless Hubble constant: h = H_0 / 100 [km/sec/Mpc]."""
return self._h
@property
def hubble_time(self):
"""Hubble time as `~astropy.units.Quantity`."""
return self._hubble_time
@property
def hubble_distance(self):
"""Hubble distance as `~astropy.units.Quantity`."""
return self._hubble_distance
@property
def critical_density0(self):
"""Critical density as `~astropy.units.Quantity` at z=0."""
return self._critical_density0
@property
def Ogamma0(self):
"""Omega gamma; the density/critical density of photons at z=0."""
return self._Ogamma0
@property
def Onu0(self):
"""Omega nu; the density/critical density of neutrinos at z=0."""
return self._Onu0
# ---------------------------------------------------------------
@abstractmethod
def w(self, z):
r"""The dark energy equation of state.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
w : ndarray or float
The dark energy equation of state.
`float` if scalar input.
Notes
-----
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\rho(z)`, where :math:`P(z)` is the pressure at
redshift z and :math:`\rho(z)` is the density at redshift z, both in
units where c=1.
This must be overridden by subclasses.
"""
raise NotImplementedError("w(z) is not implemented")
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
The total density relative to the critical density at each redshift.
Returns float if input scalar.
"""
return self.Om(z) + self.Ogamma(z) + self.Onu(z) + self.Ode(z) + self.Ok(z)
def Om(self, z):
"""
Return the density parameter for non-relativistic matter
at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Om : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest; see `Onu`.
"""
z = aszarr(z)
return self._Om0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ob(self, z):
"""Return the density parameter for baryonic matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ob : ndarray or float
The density of baryonic matter relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
"""
if self._Ob0 is None:
raise ValueError("Baryon density not set for this cosmology")
z = aszarr(z)
return self._Ob0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Odm(self, z):
"""Return the density parameter for dark matter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Odm : ndarray or float
The density of non-relativistic dark matter relative to the
critical density at each redshift.
Returns `float` if the input is scalar.
Raises
------
ValueError
If ``Ob0`` is `None`.
Notes
-----
This does not include neutrinos, even if non-relativistic at the
redshift of interest.
"""
if self._Odm0 is None:
raise ValueError("Baryonic density not set for this cosmology, "
"unclear meaning of dark matter density")
z = aszarr(z)
return self._Odm0 * (z + 1.0) ** 3 * self.inv_efunc(z) ** 2
def Ok(self, z):
"""
Return the equivalent density parameter for curvature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ok : ndarray or float
The equivalent density parameter for curvature at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ok0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ok0 * (z + 1.0) ** 2 * self.inv_efunc(z) ** 2
def Ode(self, z):
"""Return the density parameter for dark energy at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ode : ndarray or float
The density of non-relativistic matter relative to the critical
density at each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Ode0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self._Ode0 * self.de_density_scale(z) * self.inv_efunc(z) ** 2
def Ogamma(self, z):
"""Return the density parameter for photons at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Ogamma : ndarray or float
The energy density of photons relative to the critical density at
each redshift.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
return self._Ogamma0 * (z + 1.0) ** 4 * self.inv_efunc(z) ** 2
def Onu(self, z):
r"""Return the density parameter for neutrinos at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Onu : ndarray or float
The energy density of neutrinos relative to the critical density at
each redshift. Note that this includes their kinetic energy (if
they have mass), so it is not equal to the commonly used
:math:`\sum \frac{m_{\nu}}{94 eV}`, which does not include
kinetic energy.
Returns `float` if the input is scalar.
"""
z = aszarr(z)
if self._Onu0 == 0: # Common enough to be worth checking explicitly
return np.zeros(z.shape) if hasattr(z, "shape") else 0.0
return self.Ogamma(z) * self.nu_relative_density(z)
def Tcmb(self, z):
"""Return the CMB temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tcmb : `~astropy.units.Quantity` ['temperature']
The temperature of the CMB in K.
"""
return self._Tcmb0 * (aszarr(z) + 1.0)
def Tnu(self, z):
"""Return the neutrino temperature at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
Tnu : `~astropy.units.Quantity` ['temperature']
The temperature of the cosmic neutrino background in K.
"""
return self._Tnu0 * (aszarr(z) + 1.0)
def nu_relative_density(self, z):
r"""Neutrino density function relative to the energy density in photons.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
f : ndarray or float
The neutrino density scaling factor relative to the density in
photons at each redshift.
Only returns `float` if z is scalar.
Notes
-----
The density in neutrinos is given by
.. math::
\rho_{\nu} \left(a\right) = 0.2271 \, N_{eff} \,
f\left(m_{\nu} a / T_{\nu 0} \right) \,
\rho_{\gamma} \left( a \right)
where
.. math::
f \left(y\right) = \frac{120}{7 \pi^4}
\int_0^{\infty} \, dx \frac{x^2 \sqrt{x^2 + y^2}}
{e^x + 1}
assuming that all neutrino species have the same mass.
If they have different masses, a similar term is calculated for each
one. Note that ``f`` has the asymptotic behavior :math:`f(0) = 1`. This
method returns :math:`0.2271 f` using an analytical fitting formula
given in Komatsu et al. 2011, ApJS 192, 18.
"""
# Note that there is also a scalar-z-only cython implementation of
# this in scalar_inv_efuncs.pyx, so if you find a problem in this
# you need to update there too.
# See Komatsu et al. 2011, eq 26 and the surrounding discussion
# for an explanation of what we are doing here.
# However, this is modified to handle multiple neutrino masses
# by computing the above for each mass, then summing
prefac = 0.22710731766 # 7/8 (4/11)^4/3 -- see any cosmo book
# The massive and massless contribution must be handled separately
# But check for common cases first
z = aszarr(z)
if not self._massivenu:
return prefac * self._Neff * (np.ones(z.shape) if hasattr(z, "shape") else 1.0)
# These are purely fitting constants -- see the Komatsu paper
p = 1.83
invp = 0.54644808743 # 1.0 / p
k = 0.3173
curr_nu_y = self._nu_y / (1. + np.expand_dims(z, axis=-1))
rel_mass_per = (1.0 + (k * curr_nu_y) ** p) ** invp
rel_mass = rel_mass_per.sum(-1) + self._nmasslessnu
return prefac * self._neff_per_nu * rel_mass
def _w_integrand(self, ln1pz):
"""Internal convenience function for w(z) integral (eq. 5 of [1]_).
Parameters
----------
ln1pz : `~numbers.Number` or scalar ndarray
Assumes scalar input, since this should only be called inside an
integral.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
return 1.0 + self.w(exp(ln1pz) - 1.0)
def de_density_scale(self, z):
r"""Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : ndarray or float
The scaling of the energy density of dark energy with redshift.
Returns `float` if the input is scalar.
Notes
-----
The scaling factor, I, is defined by :math:`\rho(z) = \rho_0 I`,
and is given by
.. math::
I = \exp \left( 3 \int_{a}^1 \frac{ da^{\prime} }{ a^{\prime} }
\left[ 1 + w\left( a^{\prime} \right) \right] \right)
The actual integral used is rewritten from [1]_ to be in terms of z.
It will generally helpful for subclasses to overload this method if
the integral can be done analytically for the particular dark
energy equation of state that they implement.
References
----------
.. [1] Linder, E. (2003). Exploring the Expansion History of the
Universe. Phys. Rev. Lett., 90, 091301.
"""
# This allows for an arbitrary w(z) following eq (5) of
# Linder 2003, PRL 90, 91301. The code here evaluates
# the integral numerically. However, most popular
# forms of w(z) are designed to make this integral analytic,
# so it is probably a good idea for subclasses to overload this
# method if an analytic form is available.
z = aszarr(z)
if not isinstance(z, (Number, np.generic)): # array/Quantity
ival = np.array([quad(self._w_integrand, 0, log(1 + redshift))[0]
for redshift in z])
return np.exp(3 * ival)
else: # scalar
ival = quad(self._w_integrand, 0, log(z + 1.0))[0]
return exp(3 * ival)
def efunc(self, z):
"""Function used to calculate H(z), the Hubble parameter.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the Hubble constant.
Returns `float` if the input is scalar.
Defined such that :math:`H(z) = H_0 E(z)`.
Notes
-----
It is not necessary to override this method, but if de_density_scale
takes a particularly simple form, it may be advantageous to.
"""
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return np.sqrt(zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))
def inv_efunc(self, z):
"""Inverse of ``efunc``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
E : ndarray or float
The redshift scaling of the inverse Hubble constant.
Returns `float` if the input is scalar.
"""
# Avoid the function overhead by repeating code
Or = self._Ogamma0 + (self._Onu0 if not self._massivenu
else self._Ogamma0 * self.nu_relative_density(z))
zp1 = aszarr(z) + 1.0 # (converts z [unit] -> z [dimensionless])
return (zp1 ** 2 * ((Or * zp1 + self._Om0) * zp1 + self._Ok0) +
self._Ode0 * self.de_density_scale(z))**(-0.5)
def _lookback_time_integrand_scalar(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : float
Input redshift.
Returns
-------
I : float
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
return self._inv_efunc_scalar(z, *self._inv_efunc_scalar_args) / (z + 1.0)
def lookback_time_integrand(self, z):
"""Integrand of the lookback time (equation 30 of [1]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
I : float or array
The integrand for the lookback time.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return self.inv_efunc(z) / (z + 1.0)
def _abs_distance_integrand_scalar(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
args = self._inv_efunc_scalar_args
return (z + 1.0) ** 2 * self._inv_efunc_scalar(z, *args)
def abs_distance_integrand(self, z):
"""Integrand of the absorption distance [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
X : float or array
The integrand for the absorption distance.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
"""
z = aszarr(z)
return (z + 1.0) ** 2 * self.inv_efunc(z)
def H(self, z):
"""Hubble parameter (km/s/Mpc) at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
H : `~astropy.units.Quantity` ['frequency']
Hubble parameter at each input redshift.
"""
return self._H0 * self.efunc(z)
def scale_factor(self, z):
"""Scale factor at redshift ``z``.
The scale factor is defined as :math:`a = 1 / (1 + z)`.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
a : ndarray or float
Scale factor at each input redshift.
Returns `float` if the input is scalar.
"""
return 1.0 / (aszarr(z) + 1.0)
def lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a lookback time.
"""
return self._lookback_time(z)
def _lookback_time(self, z):
"""Lookback time in Gyr to redshift ``z``.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
Lookback time in Gyr to each input redshift.
"""
return self._hubble_time * self._integral_lookback_time(z)
@vectorize_redshift_method
def _integral_lookback_time(self, z, /):
"""Lookback time to redshift ``z``. Value in units of Hubble time.
The lookback time is the difference between the age of the Universe now
and the age at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
Lookback time to each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._lookback_time_integrand_scalar, 0, z)[0]
def lookback_distance(self, z):
"""
The lookback distance is the light travel time distance to a given
redshift. It is simply c * lookback_time. It may be used to calculate
the proper distance between two redshifts, e.g. for the mean free path
to ionizing radiation.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Lookback distance in Mpc
"""
return (self.lookback_time(z) * const.c).to(u.Mpc)
def age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return self._age(z)
def _age(self, z):
"""Age of the universe in Gyr at redshift ``z``.
This internal function exists to be re-defined for optimizations.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : `~astropy.units.Quantity` ['time']
The age of the universe in Gyr at each input redshift.
"""
return self._hubble_time * self._integral_age(z)
@vectorize_redshift_method
def _integral_age(self, z, /):
"""Age of the universe at redshift ``z``. Value in units of Hubble time.
Calculated using explicit integration.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
t : float or ndarray
The age of the universe at each input redshift in Hubble time units.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
See Also
--------
z_at_value : Find the redshift corresponding to an age.
"""
return quad(self._lookback_time_integrand_scalar, z, inf)[0]
def critical_density(self, z):
"""Critical density in grams per cubic cm at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
rho : `~astropy.units.Quantity`
Critical density in g/cm^3 at each input redshift.
"""
return self._critical_density0 * (self.efunc(z)) ** 2
def comoving_distance(self, z):
"""Comoving line-of-sight distance in Mpc at a given redshift.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc to each input redshift.
"""
return self._comoving_distance_z1z2(0, z)
def _comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._integral_comoving_distance_z1z2(z1, z2)
@vectorize_redshift_method(nin=2)
def _integral_comoving_distance_z1z2_scalar(self, z1, z2, /):
"""
Comoving line-of-sight distance between objects at redshifts ``z1`` and
``z2``. Value in Mpc.
The comoving distance along the line-of-sight between two objects
remains constant with time for objects in the Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : float or ndarray
Comoving distance in Mpc between each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
"""
return quad(self._inv_efunc_scalar, z1, z2, args=self._inv_efunc_scalar_args)[0]
def _integral_comoving_distance_z1z2(self, z1, z2):
"""
Comoving line-of-sight distance in Mpc between objects at redshifts
``z1`` and ``z2``. The comoving distance along the line-of-sight
between two objects remains constant with time for objects in the
Hubble flow.
Parameters
----------
z1, z2 : Quantity-like ['redshift'] or array-like
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving distance in Mpc between each input redshift.
"""
return self._hubble_distance * self._integral_comoving_distance_z1z2_scalar(z1, z2)
def comoving_transverse_distance(self, z):
r"""Comoving transverse distance in Mpc at a given redshift.
This value is the transverse comoving distance at redshift ``z``
corresponding to an angular separation of 1 radian. This is the same as
the comoving distance if :math:`\Omega_k` is zero (as in the current
concordance Lambda-CDM model).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc at each input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
return self._comoving_transverse_distance_z1z2(0, z)
def _comoving_transverse_distance_z1z2(self, z1, z2):
r"""Comoving transverse distance in Mpc between two redshifts.
This value is the transverse comoving distance at redshift ``z2`` as
seen from redshift ``z1`` corresponding to an angular separation of
1 radian. This is the same as the comoving distance if :math:`\Omega_k`
is zero (as in the current concordance Lambda-CDM model).
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Comoving transverse distance in Mpc between input redshift.
Notes
-----
This quantity is also called the 'proper motion distance' in some texts.
"""
Ok0 = self._Ok0
dc = self._comoving_distance_z1z2(z1, z2)
if Ok0 == 0:
return dc
sqrtOk0 = sqrt(abs(Ok0))
dh = self._hubble_distance
if Ok0 > 0:
return dh / sqrtOk0 * np.sinh(sqrtOk0 * dc.value / dh.value)
else:
return dh / sqrtOk0 * sin(sqrtOk0 * dc.value / dh.value)
def angular_diameter_distance(self, z):
"""Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object
at redshift ``z`` ([1]_, [2]_, [3]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Angular diameter distance in Mpc at each input redshift.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 421-424.
.. [2] Weedman, D. (1986). Quasar astronomy, pp 65-67.
.. [3] Peebles, P. (1993). Principles of Physical Cosmology, pp 325-327.
"""
z = aszarr(z)
return self.comoving_transverse_distance(z) / (z + 1.0)
def luminosity_distance(self, z):
"""Luminosity distance in Mpc at redshift ``z``.
This is the distance to use when converting between the bolometric flux
from an object at redshift ``z`` and its bolometric luminosity [1]_.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
Luminosity distance in Mpc at each input redshift.
See Also
--------
z_at_value : Find the redshift corresponding to a luminosity distance.
References
----------
.. [1] Weinberg, 1972, pp 420-424; Weedman, 1986, pp 60-62.
"""
z = aszarr(z)
return (z + 1.0) * self.comoving_transverse_distance(z)
def angular_diameter_distance_z1z2(self, z1, z2):
"""Angular diameter distance between objects at 2 redshifts.
Useful for gravitational lensing, for example computing the angular
diameter distance between a lensed galaxy and the foreground lens.
Parameters
----------
z1, z2 : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts. For most practical applications such as
gravitational lensing, ``z2`` should be larger than ``z1``. The
method will work for ``z2 < z1``; however, this will return
negative distances.
Returns
-------
d : `~astropy.units.Quantity`
The angular diameter distance between each input redshift pair.
Returns scalar if input is scalar, array else-wise.
"""
z1, z2 = aszarr(z1), aszarr(z2)
if np.any(z2 < z1):
warnings.warn(f"Second redshift(s) z2 ({z2}) is less than first "
f"redshift(s) z1 ({z1}).", AstropyUserWarning)
return self._comoving_transverse_distance_z1z2(z1, z2) / (z2 + 1.0)
@vectorize_redshift_method
def absorption_distance(self, z, /):
"""Absorption distance at redshift ``z``.
This is used to calculate the number of objects with some cross section
of absorption and number density intersecting a sightline per unit
redshift path ([1]_, [2]_).
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : float or ndarray
Absorption distance (dimensionless) at each input redshift.
Returns `float` if input scalar, `~numpy.ndarray` otherwise.
References
----------
.. [1] Hogg, D. (1999). Distance measures in cosmology, section 11.
arXiv e-prints, astro-ph/9905116.
.. [2] Bahcall, John N. and Peebles, P.J.E. 1969, ApJ, 156L, 7B
"""
return quad(self._abs_distance_integrand_scalar, 0, z)[0]
def distmod(self, z):
"""Distance modulus at redshift ``z``.
The distance modulus is defined as the (apparent magnitude - absolute
magnitude) for an object at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
distmod : `~astropy.units.Quantity` ['length']
Distance modulus at each input redshift, in magnitudes.
See Also
--------
z_at_value : Find the redshift corresponding to a distance modulus.
"""
# Remember that the luminosity distance is in Mpc
# Abs is necessary because in certain obscure closed cosmologies
# the distance modulus can be negative -- which is okay because
# it enters as the square.
val = 5. * np.log10(abs(self.luminosity_distance(z).value)) + 25.0
return u.Quantity(val, u.mag)
def comoving_volume(self, z):
r"""Comoving volume in cubic Mpc at redshift ``z``.
This is the volume of the universe encompassed by redshifts less than
``z``. For the case of :math:`\Omega_k = 0` it is a sphere of radius
`comoving_distance` but it is less intuitive if :math:`\Omega_k` is not.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
V : `~astropy.units.Quantity`
Comoving volume in :math:`Mpc^3` at each input redshift.
"""
Ok0 = self._Ok0
if Ok0 == 0:
return 4.0 / 3.0 * pi * self.comoving_distance(z) ** 3
dh = self._hubble_distance.value # .value for speed
dm = self.comoving_transverse_distance(z).value
term1 = 4.0 * pi * dh ** 3 / (2.0 * Ok0) * u.Mpc ** 3
term2 = dm / dh * np.sqrt(1 + Ok0 * (dm / dh) ** 2)
term3 = sqrt(abs(Ok0)) * dm / dh
if Ok0 > 0:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsinh(term3))
else:
return term1 * (term2 - 1. / sqrt(abs(Ok0)) * np.arcsin(term3))
def differential_comoving_volume(self, z):
"""Differential comoving volume at redshift z.
Useful for calculating the effective comoving volume.
For example, allows for integration over a comoving volume that has a
sensitivity function that changes with redshift. The total comoving
volume is given by integrating ``differential_comoving_volume`` to
redshift ``z`` and multiplying by a solid angle.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
dV : `~astropy.units.Quantity`
Differential comoving volume per redshift per steradian at each
input redshift.
"""
dm = self.comoving_transverse_distance(z)
return self._hubble_distance * (dm ** 2.0) / (self.efunc(z) << u.steradian)
def kpc_comoving_per_arcmin(self, z):
"""
Separation in transverse comoving kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in comoving kpc corresponding to an arcmin at each
input redshift.
"""
return self.comoving_transverse_distance(z).to(u.kpc) / _radian_in_arcmin
def kpc_proper_per_arcmin(self, z):
"""
Separation in transverse proper kpc corresponding to an arcminute at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
d : `~astropy.units.Quantity` ['length']
The distance in proper kpc corresponding to an arcmin at each input
redshift.
"""
return self.angular_diameter_distance(z).to(u.kpc) / _radian_in_arcmin
def arcsec_per_kpc_comoving(self, z):
"""
Angular separation in arcsec corresponding to a comoving kpc at
redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a comoving kpc at
each input redshift.
"""
return _radian_in_arcsec / self.comoving_transverse_distance(z).to(u.kpc)
def arcsec_per_kpc_proper(self, z):
"""
Angular separation in arcsec corresponding to a proper kpc at redshift
``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshift.
Returns
-------
theta : `~astropy.units.Quantity` ['angle']
The angular separation in arcsec corresponding to a proper kpc at
each input redshift.
"""
return _radian_in_arcsec / self.angular_diameter_distance(z).to(u.kpc)
class FlatFLRWMixin(FlatCosmologyMixin):
"""
Mixin class for flat FLRW cosmologies. Do NOT instantiate directly.
Must precede the base class in the multiple-inheritance so that this
mixin's ``__init__`` proceeds the base class'.
Note that all instances of ``FlatFLRWMixin`` are flat, but not all
flat cosmologies are instances of ``FlatFLRWMixin``. As example,
``LambdaCDM`` **may** be flat (for the a specific set of parameter values),
but ``FlatLambdaCDM`` **will** be flat.
"""
Ode0 = FLRW.Ode0.clone(derived=True) # same as FLRW, but now a derived param.
def __init_subclass__(cls):
super().__init_subclass__()
if "Ode0" in cls._init_signature.parameters:
raise TypeError("subclasses of `FlatFLRWMixin` cannot have `Ode0` in `__init__`")
def __init__(self, *args, **kw):
super().__init__(*args, **kw) # guaranteed not to have `Ode0`
# Do some twiddling after the fact to get flatness
self._Ok0 = 0.0
self._Ode0 = 1.0 - (self._Om0 + self._Ogamma0 + self._Onu0 + self._Ok0)
@lazyproperty
def nonflat(self: _FlatFLRWMixinT) -> _FLRWT:
# Create BoundArgument to handle args versus kwargs.
# This also handles all errors from mismatched arguments
ba = self.__nonflatclass__._init_signature.bind_partial(**self._init_arguments,
Ode0=self.Ode0)
# Make new instance, respecting args vs kwargs
inst = self.__nonflatclass__(*ba.args, **ba.kwargs)
# Because of machine precision, make sure parameters exactly match
for n in inst.__all_parameters__ + ("Ok0", ):
setattr(inst, "_" + n, getattr(self, n))
return inst
def clone(self, *, meta: Mapping | None = None, to_nonflat: bool = None, **kwargs: Any):
"""Returns a copy of this object with updated parameters, as specified.
This cannot be used to change the type of the cosmology, except for
changing to the non-flat version of this cosmology.
Parameters
----------
meta : mapping or None (optional, keyword-only)
Metadata that will update the current metadata.
to_nonflat : bool or None, optional keyword-only
Whether to change to the non-flat version of this cosmology.
**kwargs
Cosmology parameter (and name) modifications. If any parameter is
changed and a new name is not given, the name will be set to "[old
name] (modified)".
Returns
-------
newcosmo : `~astropy.cosmology.Cosmology` subclass instance
A new instance of this class with updated parameters as specified.
If no arguments are given, then a reference to this object is
returned instead of copy.
Examples
--------
To make a copy of the ``Planck13`` cosmology with a different matter
density (``Om0``), and a new name:
>>> from astropy.cosmology import Planck13
>>> Planck13.clone(name="Modified Planck 2013", Om0=0.35)
FlatLambdaCDM(name="Modified Planck 2013", H0=67.77 km / (Mpc s),
Om0=0.35, ...
If no name is specified, the new name will note the modification.
>>> Planck13.clone(Om0=0.35).name
'Planck13 (modified)'
The keyword 'to_nonflat' can be used to clone on the non-flat equivalent
cosmology.
>>> Planck13.clone(to_nonflat=True)
LambdaCDM(name="Planck13", ...
>>> Planck13.clone(H0=70, to_nonflat=True)
LambdaCDM(name="Planck13 (modified)", H0=70.0 km / (Mpc s), ...
With 'to_nonflat' `True`, ``Ode0`` can be modified.
>>> Planck13.clone(to_nonflat=True, Ode0=1)
LambdaCDM(name="Planck13 (modified)", H0=67.77 km / (Mpc s),
Om0=0.30712, Ode0=1.0, ...
"""
return super().clone(meta=meta, to_nonflat=to_nonflat, **kwargs)
@property
def Otot0(self):
"""Omega total; the total density/critical density at z=0."""
return 1.0
def Otot(self, z):
"""The total density parameter at redshift ``z``.
Parameters
----------
z : Quantity-like ['redshift'], array-like, or `~numbers.Number`
Input redshifts.
Returns
-------
Otot : ndarray or float
Returns float if input scalar. Value of 1.
"""
return 1.0 if isinstance(z, (Number, np.generic)) else np.ones_like(z, subok=False)
|
fd343b36e1c751e93ac147e9b461f2ec2b1107d1ca390acaf3a8a13247a5264e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for :mod:`astropy.cosmology.comparison`"""
import re
import numpy as np
import pytest
from astropy.cosmology import Cosmology, FlatCosmologyMixin, Planck18, cosmology_equal
from astropy.cosmology.connect import convert_registry
from astropy.cosmology.funcs.comparison import (
_cosmology_not_equal, _CosmologyWrapper, _parse_format, _parse_formats)
from astropy.cosmology.io.tests.base import ToFromTestMixinBase
class ComparisonFunctionTestBase(ToFromTestMixinBase):
"""Tests for cosmology comparison functions.
This class inherits from
`astropy.cosmology.io.tests.base.ToFromTestMixinBase` because the cosmology
comparison functions all have a kwarg ``format`` that allow the arguments to
be converted to a |Cosmology| using the ``to_format`` architecture.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must be
inherited in a subclass.
"""
@pytest.fixture(scope="class")
def cosmo(self):
return Planck18
@pytest.fixture(scope="class")
def cosmo_eqvxflat(self, cosmo):
if isinstance(cosmo, FlatCosmologyMixin):
return cosmo.nonflat
pytest.skip("cosmology is not flat, "
"so does not have an equivalent non-flat cosmology.")
@pytest.fixture(scope="class",
params={k for k, _ in convert_registry._readers.keys()} - {"astropy.cosmology"})
def format(self, request):
return request.param
@pytest.fixture(scope="class")
def xfail_cant_autoidentify(self, format):
"""`pytest.fixture` form of method ``can_autoidentify`."""
if not self.can_autodentify(format):
pytest.xfail("cannot autoidentify")
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
return to_format(format)
@pytest.fixture(scope="class")
def pert_cosmo(self, cosmo):
# change one parameter
p = cosmo.__parameters__[0]
v = getattr(cosmo, p)
cosmo2 = cosmo.clone(**{p: v * 1.0001 if v != 0 else 0.001 * getattr(v, "unit", 1)})
return cosmo2
@pytest.fixture(scope="class")
def pert_cosmo_eqvxflat(self, pert_cosmo):
if isinstance(pert_cosmo, FlatCosmologyMixin):
return pert_cosmo.nonflat
pytest.skip("cosmology is not flat, "
"so does not have an equivalent non-flat cosmology.")
@pytest.fixture(scope="class")
def pert_converted(self, pert_cosmo, format):
if format == "astropy.model": # special case Model
return pert_cosmo.to_format(format, method="comoving_distance")
return pert_cosmo.to_format(format)
class Test_parse_format(ComparisonFunctionTestBase):
"""Test functions ``_parse_format``."""
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
converted = to_format(format)
# Some raise a segfault! TODO: figure out why
if isinstance(converted, _CosmologyWrapper._cantbroadcast):
converted = _CosmologyWrapper(converted)
return converted
# ========================================================================
def test_shortcut(self, cosmo):
"""Test the already-a-cosmology shortcut."""
# A Cosmology
for fmt in {None, True, False, "astropy.cosmology"}:
assert _parse_format(cosmo, fmt) is cosmo, f"{fmt} failed"
# A Cosmology, but improperly formatted
# see ``test_parse_format_error_wrong_format``.
def test_convert(self, converted, format, cosmo):
"""Test converting a cosmology-like object"""
out = _parse_format(converted, format)
assert isinstance(out, Cosmology)
assert out == cosmo
def test_parse_format_error_wrong_format(self, cosmo):
"""
Test ``_parse_format`` errors when given a Cosmology object and format
is not compatible.
"""
with pytest.raises(ValueError, match=re.escape("for parsing a Cosmology, 'format'")):
_parse_format(cosmo, "mapping")
def test_parse_format_error_noncosmology_cant_convert(self):
"""
Test ``_parse_format`` errors when given a non-Cosmology object
and format is `False`.
"""
notacosmo = object()
with pytest.raises(TypeError, match=re.escape("if 'format' is False")):
_parse_format(notacosmo, False)
def test_parse_format_vectorized(self, cosmo, format, converted):
# vectorized on cosmos
out = _parse_format([cosmo, cosmo], None)
assert len(out) == 2
assert np.all(out == cosmo)
# vectorized on formats
out = _parse_format(cosmo, [None, None])
assert len(out) == 2
assert np.all(out == cosmo)
# more complex broadcast
out = _parse_format([[cosmo, converted], [converted, cosmo]],
[[None, format], [format, None]])
assert out.shape == (2, 2)
assert np.all(out == cosmo)
def test_parse_formats_vectorized(self, cosmo):
# vectorized on cosmos
out = _parse_formats(cosmo, cosmo, format=None)
assert len(out) == 2
assert np.all(out == cosmo)
# does NOT vectorize on formats
with pytest.raises(ValueError, match="operands could not be broadcast"):
_parse_formats(cosmo, format=[None, None])
class Test_cosmology_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison.cosmology_equal`"""
def test_cosmology_equal_simple(self, cosmo, pert_cosmo):
# equality
assert cosmology_equal(cosmo, cosmo) is True
# not equal to perturbed cosmology
assert cosmology_equal(cosmo, pert_cosmo) is False
def test_cosmology_equal_equivalent(self, cosmo, cosmo_eqvxflat,
pert_cosmo, pert_cosmo_eqvxflat):
# now need to check equivalent, but not equal, cosmologies.
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is False
assert cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True) is True
assert cosmology_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False) is False
def test_cosmology_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="cosmology_equal takes 2 positional arguments"):
cosmology_equal(cosmo, cosmo, cosmo)
def test_cosmology_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted)
with pytest.raises(TypeError):
cosmology_equal(cosmo, converted, format=False)
def test_cosmology_equal_format_auto(self, cosmo, converted, xfail_cant_autoidentify):
# These tests only run if the format can autoidentify.
assert cosmology_equal(cosmo, converted, format=None) is True
assert cosmology_equal(cosmo, converted, format=True) is True
def test_cosmology_equal_format_specify(self, cosmo, format, converted, pert_converted):
# equality
assert cosmology_equal(cosmo, converted, format=[None, format]) is True
assert cosmology_equal(converted, cosmo, format=[format, None]) is True
# non-equality
assert cosmology_equal(cosmo, pert_converted, format=[None, format]) is False
def test_cosmology_equal_equivalent_format_specify(self, cosmo, format, converted, cosmo_eqvxflat):
# specifying the format
assert cosmology_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True) is True
assert cosmology_equal(converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True) is True
class Test_cosmology_not_equal(ComparisonFunctionTestBase):
"""Test :func:`astropy.cosmology.comparison._cosmology_not_equal`"""
def test_cosmology_not_equal_simple(self, cosmo, pert_cosmo):
# equality
assert _cosmology_not_equal(cosmo, cosmo) is False
# not equal to perturbed cosmology
assert _cosmology_not_equal(cosmo, pert_cosmo) is True
def test_cosmology_not_equal_too_many_cosmo(self, cosmo):
with pytest.raises(TypeError, match="_cosmology_not_equal takes 2 positional"):
_cosmology_not_equal(cosmo, cosmo, cosmo)
def test_cosmology_not_equal_equivalent(self, cosmo, cosmo_eqvxflat,
pert_cosmo, pert_cosmo_eqvxflat):
# now need to check equivalent, but not equal, cosmologies.
assert _cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=False) is True
assert _cosmology_not_equal(cosmo, cosmo_eqvxflat, allow_equivalent=True) is False
assert _cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=False) is True
assert _cosmology_not_equal(pert_cosmo, pert_cosmo_eqvxflat, allow_equivalent=True) is False
def test_cosmology_not_equal_format_error(self, cosmo, converted):
# Not converting `converted`
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted)
with pytest.raises(TypeError):
_cosmology_not_equal(cosmo, converted, format=False)
def test_cosmology_not_equal_format_auto(self, cosmo, pert_converted, xfail_cant_autoidentify):
assert _cosmology_not_equal(cosmo, pert_converted, format=None) is True
assert _cosmology_not_equal(cosmo, pert_converted, format=True) is True
def test_cosmology_not_equal_format_specify(self, cosmo, format, converted, pert_converted):
# specifying the format
assert _cosmology_not_equal(cosmo, pert_converted, format=[None, format]) is True
assert _cosmology_not_equal(pert_converted, cosmo, format=[format, None]) is True
# equality
assert _cosmology_not_equal(cosmo, converted, format=[None, format]) is False
def test_cosmology_not_equal_equivalent_format_specify(self, cosmo, format, converted, cosmo_eqvxflat):
# specifying the format
assert _cosmology_not_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=False) is True
assert _cosmology_not_equal(cosmo_eqvxflat, converted, format=[None, format], allow_equivalent=True) is False
assert _cosmology_not_equal(converted, cosmo_eqvxflat, format=[format, None], allow_equivalent=True) is False
|
52936d94e2f777d40fee84b3ba2d6cb5582e0261125c0b46f8ae894631df36aa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import inspect
import sys
from io import StringIO
import numpy as np
import pytest
from astropy import units as u
from astropy.cosmology import core, flrw
from astropy.cosmology.funcs import _z_at_scalar_value, z_at_value
from astropy.cosmology.realizations import (
WMAP1, WMAP3, WMAP5, WMAP7, WMAP9, Planck13, Planck15, Planck18)
from astropy.units import allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import AstropyUserWarning
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_scalar():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
cosmo = Planck13
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.19812268, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr), 0.795198375, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag), 1.991389168, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc), 1.36857907, rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 26.037193804 * u.Gpc, ztol=1e-10),
3, rtol=1e-9)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=2),
0.681277696, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=2.5),
3.7914908, rtol=1e-6)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
class Test_ZatValue:
def setup_class(self):
self.cosmo = Planck13
def test_broadcast_arguments(self):
"""Test broadcast of arguments."""
# broadcasting main argument
assert allclose(
z_at_value(self.cosmo.age, [2, 7] * u.Gyr),
[3.1981206134773115, 0.7562044333305182], rtol=1e-6)
# basic broadcast of secondary arguments
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5], zmax=[2, 4]),
[0.681277696, 3.7914908], rtol=1e-6)
# more interesting broadcast
assert allclose(
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[[0, 2.5]], zmax=[2, 4]),
[[0.681277696, 3.7914908]], rtol=1e-6)
def test_broadcast_bracket(self):
"""`bracket` has special requirements."""
# start with an easy one
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=None),
3.1981206134773115, rtol=1e-6)
# now actually have a bracket
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4]),
3.1981206134773115, rtol=1e-6)
# now a bad length
with pytest.raises(ValueError, match="sequence"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=[0, 4, 4, 5])
# now the wrong dtype : an ndarray, but not an object array
with pytest.raises(TypeError, match="dtype"):
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=np.array([0, 4]))
# now an object array of brackets
bracket = np.array([[0, 4], [0, 3, 4]], dtype=object)
assert allclose(
z_at_value(self.cosmo.age, 2 * u.Gyr, bracket=bracket),
[3.1981206134773115, 3.1981206134773115], rtol=1e-6)
def test_bad_broadcast(self):
"""Shapes mismatch as expected"""
with pytest.raises(ValueError, match="broadcast"):
z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=[0, 2.5, 0.1], zmax=[2, 4])
def test_scalar_input_to_output(self):
"""Test scalar input returns a scalar."""
z = z_at_value(self.cosmo.angular_diameter_distance, 1500 * u.Mpc,
zmin=0, zmax=2)
assert isinstance(z, u.Quantity)
assert z.dtype == np.float64
assert z.shape == ()
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_numpyvectorize():
"""Test that numpy vectorize fails on Quantities.
If this test starts failing then numpy vectorize can be used instead of
the home-brewed vectorization. Please submit a PR making the change.
"""
z_at_value = np.vectorize(_z_at_scalar_value,
excluded=["func", "method", "verbose"])
with pytest.raises(u.UnitConversionError, match="dimensionless quantities"):
z_at_value(Planck15.age, 10*u.Gyr)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
def test_z_at_value_verbose(monkeypatch):
cosmo = Planck13
# Test the "verbose" flag. Since this uses "print", need to mod stdout
mock_stdout = StringIO()
monkeypatch.setattr(sys, 'stdout', mock_stdout)
resx = z_at_value(cosmo.age, 2 * u.Gyr, verbose=True)
assert str(resx.value) in mock_stdout.getvalue() # test "verbose" prints res
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_bracketed(method):
"""
Test 2 solutions for angular diameter distance by not constraining zmin, zmax,
but setting `bracket` on the appropriate side of the turning point z.
Setting zmin / zmax should override `bracket`.
"""
cosmo = Planck13
if method == 'Bounded':
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z = z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method)
if z > 1.6:
z = 3.7914908
bracket = (0.9, 1.5)
else:
z = 0.6812777
bracket = (1.6, 2.0)
with pytest.warns(UserWarning, match=r"Option 'bracket' is ignored"):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=bracket), z, rtol=1e-6)
else:
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.3, 1.0)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(2.0, 4.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.1, 1.0, 2.0)), 0.6812777, rtol=1e-6)
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5)), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0)), 3.7914908, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(1.6, 2.0), zmax=1.6), 0.6812777, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(0.9, 1.5), zmin=1.5), 3.7914908, rtol=1e-6)
with pytest.raises(core.CosmologyError):
with pytest.warns(AstropyUserWarning, match=r'fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, method=method,
bracket=(3.9, 5.0), zmin=4.)
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize('method', ['Brent', 'Golden', 'Bounded'])
def test_z_at_value_unconverged(method):
"""
Test warnings on non-converged solution when setting `maxfun` to too small iteration number -
only 'Bounded' returns status value and specific message.
"""
cosmo = Planck18
ztol = {'Brent': [1e-4, 1e-4], 'Golden': [1e-3, 1e-2], 'Bounded': [1e-3, 1e-1]}
if method == 'Bounded':
ctx = pytest.warns(AstropyUserWarning, match='Solver returned 1: Maximum number of '
'function calls reached')
else:
ctx = pytest.warns(AstropyUserWarning, match='Solver returned None')
with ctx:
z0 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmax=2, maxfun=13, method=method)
with ctx:
z1 = z_at_value(cosmo.angular_diameter_distance, 1*u.Gpc, zmin=2, maxfun=13, method=method)
assert allclose(z0, 0.32442, rtol=ztol[method][0])
assert allclose(z1, 8.18551, rtol=ztol[method][1])
@pytest.mark.skipif(not HAS_SCIPY, reason="test requires scipy")
@pytest.mark.parametrize('cosmo', [Planck13, Planck15, Planck18, WMAP1, WMAP3, WMAP5, WMAP7, WMAP9,
flrw.LambdaCDM, flrw.FlatLambdaCDM, flrw.wpwaCDM, flrw.w0wzCDM,
flrw.wCDM, flrw.FlatwCDM, flrw.w0waCDM, flrw.Flatw0waCDM])
def test_z_at_value_roundtrip(cosmo):
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck cosmologies
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone is not a redshift-dependent method
# nu_relative_density is not redshift-dependent in the WMAP cosmologies
skip = ('Ok', 'Otot',
'angular_diameter_distance_z1z2',
'clone', 'is_equivalent',
'de_density_scale', 'w')
if str(cosmo.name).startswith('WMAP'):
skip += ('nu_relative_density', )
methods = inspect.getmembers(cosmo, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
got = z_at_value(func, fval, bracket=[0.3, 1.0], ztol=1e-12)
assert allclose(got, z, rtol=2e-11), f'Round-trip testing {name} failed'
# Test distance functions between two redshifts; only for realizations
if isinstance(cosmo.name, str):
z2 = 2.0
func_z1z2 = [
lambda z1: cosmo._comoving_distance_z1z2(z1, z2),
lambda z1: cosmo._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: cosmo.angular_diameter_distance_z1z2(z1, z2)
]
for func in func_z1z2:
fval = func(z)
assert allclose(z, z_at_value(func, fval, zmax=1.5, ztol=1e-12), rtol=2e-11)
|
5e3e3f1a4269b5b5969103f88a1cd876ab669cd922f080cf6b665519eb8fada7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import inspect
import random
# THIRD PARTY
import numpy as np
import pytest
# LOCAL
from astropy.cosmology.core import Cosmology
from astropy.cosmology.io.model import _CosmologyModel, from_model, to_model
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.modeling.models import Gaussian1D
from astropy.utils.compat.optional_deps import HAS_SCIPY
from .base import ToFromDirectTestBase, ToFromTestMixinBase
###############################################################################
class ToFromModelTestMixin(ToFromTestMixinBase):
"""Tests for a Cosmology[To/From]Format with ``format="astropy.model"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture(scope="class")
def method_name(self, cosmo):
# get methods, ignoring private and dunder
methods = get_redshift_methods(cosmo, include_private=False, include_z2=True)
# dynamically detect ABC and optional dependencies
for n in tuple(methods):
params = inspect.signature(getattr(cosmo, n)).parameters.keys()
ERROR_SEIVE = (NotImplementedError, ValueError)
# # ABC can't introspect for good input
if not HAS_SCIPY:
ERROR_SEIVE = ERROR_SEIVE + (ModuleNotFoundError, )
args = np.arange(len(params)) + 1
try:
getattr(cosmo, n)(*args)
except ERROR_SEIVE:
methods.discard(n)
# TODO! pytest doesn't currently allow multiple yields (`cosmo`) so
# testing with 1 random method
# yield from methods
return random.choice(tuple(methods)) if methods else None
# ===============================================================
def test_fromformat_model_wrong_cls(self, from_format):
"""Test when Model is not the correct class."""
model = Gaussian1D(amplitude=10, mean=14)
with pytest.raises(AttributeError):
from_format(model)
def test_toformat_model_not_method(self, to_format):
"""Test when method is not a method."""
with pytest.raises(AttributeError):
to_format("astropy.model", method="this is definitely not a method.")
def test_toformat_model_not_callable(self, to_format):
"""Test when method is actually an attribute."""
with pytest.raises(ValueError):
to_format("astropy.model", method="name")
def test_toformat_model(self, cosmo, to_format, method_name):
"""Test cosmology -> astropy.model."""
if method_name is None: # no test if no method
return
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
# Parameters
expect = tuple(n for n in cosmo.__parameters__ if getattr(cosmo, n) is not None)
assert model.param_names == expect
# scalar result
args = np.arange(model.n_inputs) + 1
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
# vector result
if "scalar" not in method_name:
args = (np.ones((model.n_inputs, 3)).T + np.arange(model.n_inputs)).T
got = model.evaluate(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
got = model(*args)
expected = getattr(cosmo, method_name)(*args)
assert np.all(got == expected)
def test_tofromformat_model_instance(self, cosmo_cls, cosmo, method_name,
to_format, from_format):
"""Test cosmology -> astropy.model -> cosmology."""
if method_name is None: # no test if no method
return
# ------------
# To Model
# this also serves as a test of all added methods / attributes
# in _CosmologyModel.
model = to_format("astropy.model", method=method_name)
assert isinstance(model, _CosmologyModel)
assert model.cosmology_class is cosmo_cls
assert model.cosmology == cosmo
assert model.method_name == method_name
# ------------
# From Model
# it won't error if everything matches up
got = from_format(model, format="astropy.model")
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
# Note: model adds parameter attributes to the metadata
# also it auto-identifies 'format'
got = from_format(model)
assert got == cosmo
assert set(cosmo.meta.keys()).issubset(got.meta.keys())
def test_fromformat_model_subclass_partial_info(self):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
pass # there's no partial information with a Model
@pytest.mark.parametrize("format", [True, False, None, "astropy.model"])
def test_is_equivalent_to_model(self, cosmo, method_name, to_format, format):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a model.
"""
if method_name is None: # no test if no method
return
obj = to_format("astropy.model", method=method_name)
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is (True if format is not False else False)
class TestToFromModel(ToFromDirectTestBase, ToFromModelTestMixin):
"""Directly test ``to/from_model``."""
def setup_class(self):
self.functions = {"to": to_model, "from": from_model}
|
e83c27359a9cff5cef57ff45615c5a4d4a0927ef3318cb1494c4477f1512cb1b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import json
import os
# THIRD PARTY
import pytest
# LOCAL
import astropy.units as u
from astropy.cosmology import units as cu
from astropy.cosmology.connect import readwrite_registry
from astropy.cosmology.core import Cosmology
from .base import ReadWriteDirectTestBase, ReadWriteTestMixinBase
###############################################################################
def read_json(filename, **kwargs):
"""Read JSON.
Parameters
----------
filename : str
**kwargs
Keyword arguments into :meth:`~astropy.cosmology.Cosmology.from_format`
Returns
-------
`~astropy.cosmology.Cosmology` instance
"""
# read
if isinstance(filename, (str, bytes, os.PathLike)):
with open(filename) as file:
data = file.read()
else: # file-like : this also handles errors in dumping
data = filename.read()
mapping = json.loads(data) # parse json mappable to dict
# deserialize Quantity
with u.add_enabled_units(cu.redshift):
for k, v in mapping.items():
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping[k] = u.Quantity(v["value"], v["unit"])
for k, v in mapping.get("meta", {}).items(): # also the metadata
if isinstance(v, dict) and "value" in v and "unit" in v:
mapping["meta"][k] = u.Quantity(v["value"], v["unit"])
return Cosmology.from_format(mapping, format="mapping", **kwargs)
def write_json(cosmology, file, *, overwrite=False):
"""Write Cosmology to JSON.
Parameters
----------
cosmology : `astropy.cosmology.Cosmology` subclass instance
file : path-like or file-like
overwrite : bool (optional, keyword-only)
"""
data = cosmology.to_format("mapping") # start by turning into dict
data["cosmology"] = data["cosmology"].__qualname__
# serialize Quantity
for k, v in data.items():
if isinstance(v, u.Quantity):
data[k] = {"value": v.value.tolist(), "unit": str(v.unit)}
for k, v in data.get("meta", {}).items(): # also serialize the metadata
if isinstance(v, u.Quantity):
data["meta"][k] = {"value": v.value.tolist(), "unit": str(v.unit)}
# check that file exists and whether to overwrite.
if os.path.exists(file) and not overwrite:
raise OSError(f"{file} exists. Set 'overwrite' to write over.")
with open(file, "w") as write_file:
json.dump(data, write_file)
def json_identify(origin, filepath, fileobj, *args, **kwargs):
return filepath is not None and filepath.endswith(".json")
###############################################################################
class ReadWriteJSONTestMixin(ReadWriteTestMixinBase):
"""
Tests for a Cosmology[Read/Write] with ``format="json"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must dfine a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmology`` for an example.
"""
@pytest.fixture(scope="class", autouse=True)
def register_and_unregister_json(self):
"""Setup & teardown for JSON read/write tests."""
# Register
readwrite_registry.register_reader("json", Cosmology, read_json, force=True)
readwrite_registry.register_writer("json", Cosmology, write_json, force=True)
readwrite_registry.register_identifier("json", Cosmology, json_identify, force=True)
yield # Run all tests in class
# Unregister
readwrite_registry.unregister_reader("json", Cosmology)
readwrite_registry.unregister_writer("json", Cosmology)
readwrite_registry.unregister_identifier("json", Cosmology)
# ========================================================================
def test_readwrite_json_subclass_partial_info(self, cosmo_cls, cosmo, read,
write, tmp_path, add_cu):
"""
Test writing from an instance and reading from that class.
This works with missing information.
"""
fp = tmp_path / "test_readwrite_json_subclass_partial_info.json"
# test write
cosmo.write(fp, format="json")
# partial information
with open(fp) as file:
L = file.readlines()[0]
L = L[: L.index('"cosmology":')] + L[L.index(", ") + 2 :] # remove cosmology # noqa: #203
i = L.index('"Tcmb0":') # delete Tcmb0
L = L[:i] + L[L.index(", ", L.index(", ", i) + 1) + 2 :] # second occurence # noqa: #203
tempfname = tmp_path / f"{cosmo.name}_temp.json"
with open(tempfname, "w") as file:
file.writelines([L])
# read with the same class that wrote fills in the missing info with
# the default value
got = cosmo_cls.read(tempfname, format="json")
got2 = read(tempfname, format="json", cosmology=cosmo_cls)
got3 = read(tempfname, format="json", cosmology=cosmo_cls.__qualname__)
assert (got == got2) and (got2 == got3) # internal consistency
# not equal, because Tcmb0 is changed, which also changes m_nu
assert got != cosmo
assert got.Tcmb0 == cosmo_cls._init_signature.parameters["Tcmb0"].default
assert got.clone(name=cosmo.name, Tcmb0=cosmo.Tcmb0, m_nu=cosmo.m_nu) == cosmo
# but the metadata is the same
assert got.meta == cosmo.meta
class TestReadWriteJSON(ReadWriteDirectTestBase, ReadWriteJSONTestMixin):
"""
Directly test ``read/write_json``.
These are not public API and are discouraged from use, in favor of
``Cosmology.read/write(..., format="json")``, but should be
tested regardless b/c they are used internally.
"""
def setup_class(self):
self.functions = {"read": read_json, "write": write_json}
|
a574b791639ce91442f83c61f1a4a97c42841512f7ed86256c1159080facacc2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Testing :mod:`astropy.cosmology.flrw.base`."""
##############################################################################
# IMPORTS
# STDLIB
import abc
import copy
# THIRD PARTY
import numpy as np
import pytest
import astropy.constants as const
# LOCAL
import astropy.units as u
from astropy.cosmology import FLRW, FlatLambdaCDM, LambdaCDM, Planck18
from astropy.cosmology.core import _COSMOLOGY_CLASSES
from astropy.cosmology.flrw.base import _a_B_c2, _critdens_const, _H0units_to_invs, quad
from astropy.cosmology.parameter import Parameter
from astropy.cosmology.tests.helper import get_redshift_methods
from astropy.cosmology.tests.test_core import CosmologySubclassTest as CosmologyTest
from astropy.cosmology.tests.test_core import (
FlatCosmologyMixinTest, ParameterTestMixin, invalid_zs, valid_zs)
from astropy.utils.compat.optional_deps import HAS_SCIPY
##############################################################################
# SETUP / TEARDOWN
class SubFLRW(FLRW):
def w(self, z):
return super().w(z)
##############################################################################
# TESTS
##############################################################################
@pytest.mark.skipif(HAS_SCIPY, reason="scipy is installed")
def test_optional_deps_functions():
"""Test stand-in functions when optional dependencies not installed."""
with pytest.raises(ModuleNotFoundError, match="No module named 'scipy.integrate'"):
quad()
##############################################################################
class ParameterH0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` H0 on a Cosmology.
H0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_H0(self, cosmo_cls, cosmo):
"""Test Parameter ``H0``."""
unit = u.Unit("km/(s Mpc)")
# on the class
assert isinstance(cosmo_cls.H0, Parameter)
assert "Hubble constant" in cosmo_cls.H0.__doc__
assert cosmo_cls.H0.unit == unit
# validation
assert cosmo_cls.H0.validate(cosmo, 1) == 1 * unit
assert cosmo_cls.H0.validate(cosmo, 10 * unit) == 10 * unit
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls.H0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.H0 is cosmo._H0
assert cosmo.H0 == self._cls_args["H0"]
assert isinstance(cosmo.H0, u.Quantity) and cosmo.H0.unit == unit
def test_init_H0(self, cosmo_cls, ba):
"""Test initialization for values of ``H0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0 == ba.arguments["H0"]
# also without units
ba.arguments["H0"] = ba.arguments["H0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.H0.value == ba.arguments["H0"]
# fails for non-scalar
ba.arguments["H0"] = u.Quantity([70, 100], u.km / u.s / u.Mpc)
with pytest.raises(ValueError, match="H0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOm0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Om0 on a Cosmology.
Om0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Om0(self, cosmo_cls, cosmo):
"""Test Parameter ``Om0``."""
# on the class
assert isinstance(cosmo_cls.Om0, Parameter)
assert "Omega matter" in cosmo_cls.Om0.__doc__
# validation
assert cosmo_cls.Om0.validate(cosmo, 1) == 1
assert cosmo_cls.Om0.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Om0 cannot be negative"):
cosmo_cls.Om0.validate(cosmo, -1)
# on the instance
assert cosmo.Om0 is cosmo._Om0
assert cosmo.Om0 == self._cls_args["Om0"]
assert isinstance(cosmo.Om0, float)
def test_init_Om0(self, cosmo_cls, ba):
"""Test initialization for values of ``Om0``."""
# test that it works with units
ba.arguments["Om0"] = ba.arguments["Om0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# also without units
ba.arguments["Om0"] = ba.arguments["Om0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Om0 == ba.arguments["Om0"]
# fails for negative numbers
ba.arguments["Om0"] = -0.27
with pytest.raises(ValueError, match="Om0 cannot be negative."):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterOde0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a Cosmology.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
assert isinstance(cosmo_cls.Ode0, Parameter)
assert "Omega dark energy" in cosmo_cls.Ode0.__doc__
def test_Parameter_Ode0_validation(self, cosmo_cls, cosmo):
"""Test Parameter ``Ode0`` validation."""
assert cosmo_cls.Ode0.validate(cosmo, 1.1) == 1.1
assert cosmo_cls.Ode0.validate(cosmo, 10 * u.one) == 10.0
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls.Ode0.validate(cosmo, 10 * u.km)
def test_Ode0(self, cosmo):
"""Test Parameter ``Ode0`` validation."""
# if Ode0 is a parameter, test its value
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == self._cls_args["Ode0"]
assert isinstance(cosmo.Ode0, float)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
# test that it works with units
ba.arguments["Ode0"] = ba.arguments["Ode0"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# also without units
ba.arguments["Ode0"] = ba.arguments["Ode0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == ba.arguments["Ode0"]
# Setting param to 0 respects that. Note this test uses ``Ode()``.
ba.arguments["Ode0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert u.allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Ode(1), 0)
# Must be dimensionless or have no units. Errors otherwise.
ba.arguments["Ode0"] = 10 * u.km
with pytest.raises(TypeError, match="only dimensionless"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterTcmb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Tcmb0 on a Cosmology.
Tcmb0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Tcmb0(self, cosmo_cls, cosmo):
"""Test Parameter ``Tcmb0``."""
# on the class
assert isinstance(cosmo_cls.Tcmb0, Parameter)
assert "Temperature of the CMB" in cosmo_cls.Tcmb0.__doc__
assert cosmo_cls.Tcmb0.unit == u.K
# validation
assert cosmo_cls.Tcmb0.validate(cosmo, 1) == 1 * u.K
assert cosmo_cls.Tcmb0.validate(cosmo, 10 * u.K) == 10 * u.K
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls.Tcmb0.validate(cosmo, [1, 2])
# on the instance
assert cosmo.Tcmb0 is cosmo._Tcmb0
assert cosmo.Tcmb0 == self.cls_kwargs["Tcmb0"]
assert isinstance(cosmo.Tcmb0, u.Quantity) and cosmo.Tcmb0.unit == u.K
def test_init_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``Tcmb0``."""
# test that it works with units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0 == ba.arguments["Tcmb0"]
# also without units
ba.arguments["Tcmb0"] = ba.arguments["Tcmb0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Tcmb0.value == ba.arguments["Tcmb0"]
# must be a scalar
ba.arguments["Tcmb0"] = u.Quantity([0.0, 2], u.K)
with pytest.raises(ValueError, match="Tcmb0 is a non-scalar quantity"):
cosmo_cls(*ba.args, **ba.kwargs)
class ParameterNeffTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls, cosmo):
"""Test Parameter ``Neff``."""
# on the class
assert isinstance(cosmo_cls.Neff, Parameter)
assert "Number of effective neutrino species" in cosmo_cls.Neff.__doc__
# validation
assert cosmo_cls.Neff.validate(cosmo, 1) == 1
assert cosmo_cls.Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
cosmo_cls.Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo._Neff
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = ba.arguments["Neff"] << u.one # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
class Parameterm_nuTestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` m_nu on a Cosmology.
m_nu is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_m_nu(self, cosmo_cls, cosmo):
"""Test Parameter ``m_nu``."""
# on the class
assert isinstance(cosmo_cls.m_nu, Parameter)
assert "Mass of neutrino species" in cosmo_cls.m_nu.__doc__
assert cosmo_cls.m_nu.unit == u.eV
assert cosmo_cls.m_nu.equivalencies == u.mass_energy()
# on the instance
# assert cosmo.m_nu is cosmo._m_nu
assert u.allclose(cosmo.m_nu, [0.0, 0.0, 0.0] * u.eV)
# set differently depending on the other inputs
if cosmo.Tnu0.value == 0:
assert cosmo.m_nu is None
elif not cosmo._massivenu: # only massless
assert u.allclose(cosmo.m_nu, 0 * u.eV)
elif self._nmasslessnu == 0: # only massive
assert cosmo.m_nu == cosmo._massivenu_mass
else: # a mix -- the most complicated case
assert u.allclose(cosmo.m_nu[:self._nmasslessnu], 0 * u.eV)
assert u.allclose(cosmo.m_nu[self._nmasslessnu], cosmo._massivenu_mass)
def test_init_m_nu(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this requires the class to have a property ``has_massive_nu``.
"""
# Test that it works when m_nu has units.
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu == ba.arguments["m_nu"]) # (& checks len, unit)
assert not cosmo.has_massive_nu
assert cosmo.m_nu.unit == u.eV # explicitly check unit once.
# And it works when m_nu doesn't have units.
ba.arguments["m_nu"] = ba.arguments["m_nu"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert np.all(cosmo.m_nu.value == ba.arguments["m_nu"])
assert not cosmo.has_massive_nu
# A negative m_nu raises an exception.
tba = copy.copy(ba)
tba.arguments["m_nu"] = u.Quantity([-0.3, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="invalid"):
cosmo_cls(*tba.args, **tba.kwargs)
def test_init_m_nu_and_Neff(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu`` and ``Neff``.
Note this test requires ``Neff`` as constructor input, and a property
``has_massive_nu``.
"""
# Mismatch with Neff = wrong number of neutrinos
tba = copy.copy(ba)
tba.arguments["Neff"] = 4.05
tba.arguments["m_nu"] = u.Quantity([0.15, 0.2, 0.1], u.eV)
with pytest.raises(ValueError, match="unexpected number of neutrino"):
cosmo_cls(*tba.args, **tba.kwargs)
# No neutrinos, but Neff
tba.arguments["m_nu"] = 0
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert not cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, 0 * u.eV)
# TODO! move this test when create ``test_nu_relative_density``
assert u.allclose(cosmo.nu_relative_density(1.0), 0.22710731766 * 4.05, rtol=1e-6)
# All massive neutrinos case, len from Neff
tba.arguments["m_nu"] = 0.1 * u.eV
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
assert cosmo.has_massive_nu
assert len(cosmo.m_nu) == 4
assert cosmo.m_nu.unit == u.eV
assert u.allclose(cosmo.m_nu, [0.1, 0.1, 0.1, 0.1] * u.eV)
def test_init_m_nu_override_by_Tcmb0(self, cosmo_cls, ba):
"""Test initialization for values of ``m_nu``.
Note this test requires ``Tcmb0`` as constructor input, and a property
``has_massive_nu``.
"""
# If Neff = 0, m_nu is None.
tba = copy.copy(ba)
tba.arguments["Neff"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
# If Tcmb0 = 0, m_nu is None
tba = copy.copy(ba)
tba.arguments["Tcmb0"] = 0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.m_nu is None
assert not cosmo.has_massive_nu
class ParameterOb0TestMixin(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Ob0 on a Cosmology.
Ob0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Ob0(self, cosmo_cls, cosmo):
"""Test Parameter ``Ob0``."""
# on the class
assert isinstance(cosmo_cls.Ob0, Parameter)
assert "Omega baryon;" in cosmo_cls.Ob0.__doc__
# validation
assert cosmo_cls.Ob0.validate(cosmo, None) is None
assert cosmo_cls.Ob0.validate(cosmo, 0.1) == 0.1
assert cosmo_cls.Ob0.validate(cosmo, 0.1 * u.one) == 0.1
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls.Ob0.validate(cosmo, -1)
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls.Ob0.validate(cosmo, cosmo.Om0 + 1)
# on the instance
assert cosmo.Ob0 is cosmo._Ob0
assert cosmo.Ob0 == 0.03
def test_init_Ob0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ob0``."""
# test that it works with units
assert isinstance(ba.arguments["Ob0"], u.Quantity)
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# also without units
ba.arguments["Ob0"] = ba.arguments["Ob0"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == ba.arguments["Ob0"]
# Setting param to 0 respects that. Note this test uses ``Ob()``.
ba.arguments["Ob0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ob0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ob(1), 0)
assert u.allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# Negative Ob0 errors
tba = copy.copy(ba)
tba.arguments["Ob0"] = -0.04
with pytest.raises(ValueError, match="Ob0 cannot be negative"):
cosmo_cls(*tba.args, **tba.kwargs)
# Ob0 > Om0 errors
tba.arguments["Ob0"] = tba.arguments["Om0"] + 0.1
with pytest.raises(ValueError, match="baryonic density can not be larger"):
cosmo_cls(*tba.args, **tba.kwargs)
# No baryons specified means baryon-specific methods fail.
tba = copy.copy(ba)
tba.arguments.pop("Ob0", None)
cosmo = cosmo_cls(*tba.args, **tba.kwargs)
with pytest.raises(ValueError):
cosmo.Ob(1)
# also means DM fraction is undefined
with pytest.raises(ValueError):
cosmo.Odm(1)
# The default value is None
assert cosmo_cls._init_signature.parameters["Ob0"].default is None
class TestFLRW(CosmologyTest,
ParameterH0TestMixin, ParameterOm0TestMixin, ParameterOde0TestMixin,
ParameterTcmb0TestMixin, ParameterNeffTestMixin, Parameterm_nuTestMixin,
ParameterOb0TestMixin):
"""Test :class:`astropy.cosmology.FLRW`."""
abstract_w = True
def setup_class(self):
"""
Setup for testing.
FLRW is abstract, so tests are done on a subclass.
"""
# make sure SubCosmology is known
_COSMOLOGY_CLASSES["SubFLRW"] = SubFLRW
self.cls = SubFLRW
self._cls_args = dict(H0=70 * u.km / u.s / u.Mpc, Om0=0.27 * u.one, Ode0=0.73 * u.one)
self.cls_kwargs = dict(Tcmb0=3.0 * u.K, Ob0=0.03 * u.one,
name=self.__class__.__name__, meta={"a": "b"})
def teardown_class(self):
super().teardown_class(self)
_COSMOLOGY_CLASSES.pop("SubFLRW", None)
@pytest.fixture(scope="class")
def nonflatcosmo(self):
"""A non-flat cosmology used in equivalence tests."""
return LambdaCDM(70, 0.4, 0.8)
# ===============================================================
# Method & Attribute Tests
def test_init(self, cosmo_cls):
"""Test initialization."""
super().test_init(cosmo_cls)
# TODO! tests for initializing calculated values, e.g. `h`
# TODO! transfer tests for initializing neutrinos
def test_init_Tcmb0_zeroing(self, cosmo_cls, ba):
"""Test if setting Tcmb0 parameter to 0 influences other parameters.
TODO: consider moving this test to ``FLRWSubclassTest``
"""
ba.arguments["Tcmb0"] = 0.0
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ogamma0 == 0.0
assert cosmo.Onu0 == 0.0
if not self.abstract_w:
assert u.allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert u.allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# ---------------------------------------------------------------
# Properties
def test_Odm0(self, cosmo_cls, cosmo):
"""Test property ``Odm0``."""
# on the class
assert isinstance(cosmo_cls.Odm0, property)
assert cosmo_cls.Odm0.fset is None # immutable
# on the instance
assert cosmo.Odm0 is cosmo._Odm0
# Odm0 can be None, if Ob0 is None. Otherwise DM = matter - baryons.
if cosmo.Ob0 is None:
assert cosmo.Odm0 is None
else:
assert np.allclose(cosmo.Odm0, cosmo.Om0 - cosmo.Ob0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
# on the class
assert isinstance(cosmo_cls.Ok0, property)
assert cosmo_cls.Ok0.fset is None # immutable
# on the instance
assert cosmo.Ok0 is cosmo._Ok0
assert np.allclose(cosmo.Ok0, 1.0 - (cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0))
def test_is_flat(self, cosmo_cls, cosmo):
"""Test property ``is_flat``."""
# on the class
assert isinstance(cosmo_cls.is_flat, property)
assert cosmo_cls.is_flat.fset is None # immutable
# on the instance
assert isinstance(cosmo.is_flat, bool)
assert cosmo.is_flat is bool((cosmo.Ok0 == 0.0) and (cosmo.Otot0 == 1.0))
def test_Tnu0(self, cosmo_cls, cosmo):
"""Test property ``Tnu0``."""
# on the class
assert isinstance(cosmo_cls.Tnu0, property)
assert cosmo_cls.Tnu0.fset is None # immutable
# on the instance
assert cosmo.Tnu0 is cosmo._Tnu0
assert cosmo.Tnu0.unit == u.K
assert u.allclose(cosmo.Tnu0, 0.7137658555036082 * cosmo.Tcmb0, rtol=1e-5)
def test_has_massive_nu(self, cosmo_cls, cosmo):
"""Test property ``has_massive_nu``."""
# on the class
assert isinstance(cosmo_cls.has_massive_nu, property)
assert cosmo_cls.has_massive_nu.fset is None # immutable
# on the instance
if cosmo.Tnu0 == 0:
assert cosmo.has_massive_nu is False
else:
assert cosmo.has_massive_nu is cosmo._massivenu
def test_h(self, cosmo_cls, cosmo):
"""Test property ``h``."""
# on the class
assert isinstance(cosmo_cls.h, property)
assert cosmo_cls.h.fset is None # immutable
# on the instance
assert cosmo.h is cosmo._h
assert np.allclose(cosmo.h, cosmo.H0.value / 100.0)
def test_hubble_time(self, cosmo_cls, cosmo):
"""Test property ``hubble_time``."""
# on the class
assert isinstance(cosmo_cls.hubble_time, property)
assert cosmo_cls.hubble_time.fset is None # immutable
# on the instance
assert cosmo.hubble_time is cosmo._hubble_time
assert u.allclose(cosmo.hubble_time, (1 / cosmo.H0) << u.Gyr)
def test_hubble_distance(self, cosmo_cls, cosmo):
"""Test property ``hubble_distance``."""
# on the class
assert isinstance(cosmo_cls.hubble_distance, property)
assert cosmo_cls.hubble_distance.fset is None # immutable
# on the instance
assert cosmo.hubble_distance is cosmo._hubble_distance
assert cosmo.hubble_distance == (const.c / cosmo._H0).to(u.Mpc)
def test_critical_density0(self, cosmo_cls, cosmo):
"""Test property ``critical_density0``."""
# on the class
assert isinstance(cosmo_cls.critical_density0, property)
assert cosmo_cls.critical_density0.fset is None # immutable
# on the instance
assert cosmo.critical_density0 is cosmo._critical_density0
assert cosmo.critical_density0.unit == u.g / u.cm ** 3
cd0value = _critdens_const * (cosmo.H0.value * _H0units_to_invs) ** 2
assert cosmo.critical_density0.value == cd0value
def test_Ogamma0(self, cosmo_cls, cosmo):
"""Test property ``Ogamma0``."""
# on the class
assert isinstance(cosmo_cls.Ogamma0, property)
assert cosmo_cls.Ogamma0.fset is None # immutable
# on the instance
assert cosmo.Ogamma0 is cosmo._Ogamma0
# Ogamma cor \propto T^4/rhocrit
expect = _a_B_c2 * cosmo.Tcmb0.value ** 4 / cosmo.critical_density0.value
assert np.allclose(cosmo.Ogamma0, expect)
# check absolute equality to 0 if Tcmb0 is 0
if cosmo.Tcmb0 == 0:
assert cosmo.Ogamma0 == 0
def test_Onu0(self, cosmo_cls, cosmo):
"""Test property ``Onu0``."""
# on the class
assert isinstance(cosmo_cls.Onu0, property)
assert cosmo_cls.Onu0.fset is None # immutable
# on the instance
assert cosmo.Onu0 is cosmo._Onu0
# neutrino temperature <= photon temperature since the neutrinos
# decouple first.
if cosmo.has_massive_nu: # Tcmb0 > 0 & has massive
# check the expected formula
assert cosmo.Onu0 == cosmo.Ogamma0 * cosmo.nu_relative_density(0)
# a sanity check on on the ratio of neutrinos to photons
# technically it could be 1, but not for any of the tested cases.
assert cosmo.nu_relative_density(0) <= 1
elif cosmo.Tcmb0 == 0:
assert cosmo.Onu0 == 0
else:
# check the expected formula
assert cosmo.Onu0 == 0.22710731766 * cosmo._Neff * cosmo.Ogamma0
# and check compatibility with nu_relative_density
assert np.allclose(cosmo.nu_relative_density(0), 0.22710731766 * cosmo._Neff)
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`."""
assert cosmo.Otot0 == cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ode0 + cosmo.Ok0
# ---------------------------------------------------------------
# Methods
def test_w(self, cosmo):
"""Test abstract :meth:`astropy.cosmology.FLRW.w`."""
with pytest.raises(NotImplementedError, match="not implemented"):
cosmo.w(1)
def test_Otot(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
assert cosmo.Otot(1)
def test_efunc_vs_invefunc(self, cosmo):
"""
Test that efunc and inv_efunc give inverse values.
Here they just fail b/c no ``w(z)`` or no scipy.
"""
exception = NotImplementedError if HAS_SCIPY else ModuleNotFoundError
with pytest.raises(exception):
cosmo.efunc(0.5)
with pytest.raises(exception):
cosmo.inv_efunc(0.5)
# ---------------------------------------------------------------
# from Cosmology
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# don't change any values
kwargs = cosmo._init_arguments.copy()
kwargs.pop("name", None) # make sure not setting name
kwargs.pop("meta", None) # make sure not setting name
c = cosmo.clone(**kwargs)
assert c.__class__ == cosmo.__class__
assert c == cosmo
# change ``H0``
# Note that H0 affects Ode0 because it changes Ogamma0
c = cosmo.clone(H0=100)
assert c.__class__ == cosmo.__class__
assert c.name == cosmo.name + " (modified)"
assert c.H0.value == 100
for n in (set(cosmo.__parameters__) - {"H0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
# change multiple things
c = cosmo.clone(name="new name", H0=100, Tcmb0=2.8, meta=dict(zz="tops"))
assert c.__class__ == cosmo.__class__
assert c.name == "new name"
assert c.H0.value == 100
assert c.Tcmb0.value == 2.8
assert c.meta == {**cosmo.meta, **dict(zz="tops")}
for n in (set(cosmo.__parameters__) - {"H0", "Tcmb0"}):
v = getattr(c, n)
if v is None:
assert v is getattr(cosmo, n)
else:
assert u.allclose(v, getattr(cosmo, n), atol=1e-4 * getattr(v, "unit", 1))
assert not u.allclose(c.Ogamma0, cosmo.Ogamma0)
assert not u.allclose(c.Onu0, cosmo.Onu0)
assert not u.allclose(c.Tcmb0.value, cosmo.Tcmb0.value)
def test_is_equivalent(self, cosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to CosmologySubclassTest
# test against a FlatFLRWMixin
# case (3) in FLRW.is_equivalent
if isinstance(cosmo, FlatLambdaCDM):
assert cosmo.is_equivalent(Planck18)
assert Planck18.is_equivalent(cosmo)
else:
assert not cosmo.is_equivalent(Planck18)
assert not Planck18.is_equivalent(cosmo)
class FLRWSubclassTest(TestFLRW):
"""
Test subclasses of :class:`astropy.cosmology.FLRW`.
This is broken away from ``TestFLRW``, because ``FLRW`` is an ABC and
subclasses must override some methods.
"""
abstract_w = False
@abc.abstractmethod
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
# ===============================================================
# Method & Attribute Tests
_FLRW_redshift_methods = get_redshift_methods(FLRW, include_private=True, include_z2=False)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', _FLRW_redshift_methods)
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
with pytest.raises(exc):
getattr(cosmo, method)(z)
@pytest.mark.parametrize("z", valid_zs)
@abc.abstractmethod
def test_w(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.w`.
Since ``w`` is abstract, each test class needs to define further tests.
"""
# super().test_w(cosmo, z) # NOT b/c abstract `w(z)`
w = cosmo.w(z)
assert np.shape(w) == np.shape(z) # test same shape
assert u.Quantity(w).unit == u.one # test no units or dimensionless
# -------------------------------------------
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`."""
# super().test_Otot(cosmo) # NOT b/c abstract `w(z)`
assert np.allclose(
cosmo.Otot(z),
cosmo.Om(z) + cosmo.Ogamma(z) + cosmo.Onu(z) + cosmo.Ode(z) + cosmo.Ok(z))
# ---------------------------------------------------------------
def test_efunc_vs_invefunc(self, cosmo):
"""Test that ``efunc`` and ``inv_efunc`` give inverse values.
Note that the test doesn't need scipy because it doesn't need to call
``de_density_scale``.
"""
# super().test_efunc_vs_invefunc(cosmo) # NOT b/c abstract `w(z)`
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
assert np.allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert np.allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# -----------------------------------------------------------------------------
class ParameterFlatOde0TestMixin(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
assert cosmo_cls.Ode0.derived in (True, np.True_)
def test_Ode0(self, cosmo):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo._Ode0
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls, ba):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
class FlatFLRWMixinTest(FlatCosmologyMixinTest, ParameterFlatOde0TestMixin):
"""Tests for :class:`astropy.cosmology.FlatFLRWMixin` subclasses.
E.g to use this class::
class TestFlatSomeFLRW(FlatFLRWMixinTest, TestSomeFLRW):
...
"""
def setup_class(self):
"""Setup for testing.
Set up as for regular FLRW test class, but remove dark energy component
since flat cosmologies are forbidden Ode0 as an argument,
see ``test_init_subclass``.
"""
super().setup_class(self)
self._cls_args.pop("Ode0")
# ===============================================================
# Method & Attribute Tests
# ---------------------------------------------------------------
# class-level
def test_init_subclass(self, cosmo_cls):
"""Test initializing subclass, mostly that can't have Ode0 in init."""
super().test_init_subclass(cosmo_cls)
with pytest.raises(TypeError, match="subclasses of"):
class HASOde0SubClass(cosmo_cls):
def __init__(self, Ode0):
pass
_COSMOLOGY_CLASSES.pop(HASOde0SubClass.__qualname__, None)
# ---------------------------------------------------------------
# instance-level
def test_init(self, cosmo_cls):
super().test_init(cosmo_cls)
cosmo = cosmo_cls(*self.cls_args, **self.cls_kwargs)
assert cosmo._Ok0 == 0.0
assert cosmo._Ode0 == 1.0 - (cosmo._Om0 + cosmo._Ogamma0 + cosmo._Onu0 + cosmo._Ok0)
def test_Ok0(self, cosmo_cls, cosmo):
"""Test property ``Ok0``."""
super().test_Ok0(cosmo_cls, cosmo)
# for flat cosmologies, Ok0 is not *close* to 0, it *is* 0
assert cosmo.Ok0 == 0.0
def test_Otot0(self, cosmo):
"""Test :attr:`astropy.cosmology.FLRW.Otot0`. Should always be 1."""
super().test_Otot0(cosmo)
# for flat cosmologies, Otot0 is not *close* to 1, it *is* 1
assert cosmo.Otot0 == 1.0
@pytest.mark.parametrize("z", valid_zs)
def test_Otot(self, cosmo, z):
"""Test :meth:`astropy.cosmology.FLRW.Otot`. Should always be 1."""
super().test_Otot(cosmo, z)
# for flat cosmologies, Otot is 1, within precision.
assert u.allclose(cosmo.Otot(z), 1.0)
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy is not installed")
@pytest.mark.parametrize("z, exc", invalid_zs)
@pytest.mark.parametrize('method', FLRWSubclassTest._FLRW_redshift_methods - {"Otot"})
def test_redshift_method_bad_input(self, cosmo, method, z, exc):
"""Test all the redshift methods for bad input."""
super().test_redshift_method_bad_input(cosmo, method, z, exc)
# ---------------------------------------------------------------
def test_clone_to_nonflat_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_to_nonflat_change_param(cosmo)
# change Ode0, without non-flat
with pytest.raises(TypeError):
cosmo.clone(Ode0=1)
# change to non-flat
nc = cosmo.clone(to_nonflat=True, Ode0=cosmo.Ode0)
assert isinstance(nc, cosmo.__nonflatclass__)
assert nc == cosmo.nonflat
nc = cosmo.clone(to_nonflat=True, Ode0=1)
assert nc.Ode0 == 1.0
assert nc.name == cosmo.name + " (modified)"
# ---------------------------------------------------------------
def test_is_equivalent(self, cosmo, nonflatcosmo):
"""Test :meth:`astropy.cosmology.FLRW.is_equivalent`."""
super().test_is_equivalent(cosmo) # pass to TestFLRW
# against non-flat Cosmology
assert not cosmo.is_equivalent(nonflatcosmo)
assert not nonflatcosmo.is_equivalent(cosmo)
# non-flat version of class
nonflat_cosmo_cls = cosmo.__nonflatclass__
# keys check in `test_is_equivalent_nonflat_class_different_params`
# non-flat
nonflat = nonflat_cosmo_cls(*self.cls_args, Ode0=0.9, **self.cls_kwargs)
assert not nonflat.is_equivalent(cosmo)
assert not cosmo.is_equivalent(nonflat)
# flat, but not FlatFLRWMixin
flat = nonflat_cosmo_cls(*self.cls_args,
Ode0=1.0 - cosmo.Om0 - cosmo.Ogamma0 - cosmo.Onu0,
**self.cls_kwargs)
flat._Ok0 = 0.0
assert flat.is_equivalent(cosmo)
assert cosmo.is_equivalent(flat)
def test_repr(self, cosmo_cls, cosmo):
"""
Test method ``.__repr__()``. Skip non-flat superclass test.
e.g. `TestFlatLambdaCDDM` -> `FlatFLRWMixinTest`
vs `TestFlatLambdaCDDM` -> `TestLambdaCDDM` -> `FlatFLRWMixinTest`
"""
FLRWSubclassTest.test_repr(self, cosmo_cls, cosmo)
# test eliminated Ode0 from parameters
assert "Ode0" not in repr(cosmo)
|
1ec0b43042806ee1a0f18785d1c4d07723239face6a93b86e5488c19227f3285 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
from importlib.metadata import entry_points
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated
from astropy.utils.exceptions import AstropyUserWarning
from .optimizers import DEFAULT_ACC, DEFAULT_EPS, DEFAULT_MAXITER, SLSQP, Simplex
from .spline import ( # noqa: F401
SplineExactKnotsFitter, SplineInterpolateFitter, SplineSmoothingFitter, SplineSplrepFitter)
from .statistic import leastsquare
from .utils import _combine_equivalency_dict, poly_map_domain
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'TRFLSQFitter',
'DogBoxLSQFitter', 'LMLSQFitter',
'FittingWithOutlierRemoval', 'SLSQPLSQFitter', 'SimplexLSQFitter',
'JointFitter', 'Fitter', 'ModelLinearityError', "ModelsError"]
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class NonFiniteValueError(RuntimeError):
"""
Error raised when attempting to a non-finite value
"""
class Covariance():
"""Class for covariance matrix calculated by fitter. """
def __init__(self, cov_matrix, param_names):
self.cov_matrix = cov_matrix
self.param_names = param_names
def pprint(self, max_lines, round_val):
# Print and label lower triangle of covariance matrix
# Print rows for params up to `max_lines`, round floats to 'round_val'
longest_name = max(len(x) for x in self.param_names)
ret_str = 'parameter variances / covariances \n'
fstring = f'{"": <{longest_name}}| {{0}}\n'
for i, row in enumerate(self.cov_matrix):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += (fstring.replace(' '*len(param), param, 1)
.format(repr(np.round(row[:i+1], round_val))[7:-2]))
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, params):
# index covariance matrix by parameter names or indices
if len(params) != 2:
raise ValueError('Covariance must be indexed by two values.')
if all(isinstance(item, str) for item in params):
i1, i2 = self.param_names.index(params[0]), self.param_names.index(params[1])
elif all(isinstance(item, int) for item in params):
i1, i2 = params
else:
raise TypeError('Covariance can be indexed by two parameter names or integer indices.')
return(self.cov_matrix[i1][i2])
class StandardDeviations():
""" Class for fitting uncertainties."""
def __init__(self, cov_matrix, param_names):
self.param_names = param_names
self.stds = self._calc_stds(cov_matrix)
def _calc_stds(self, cov_matrix):
# sometimes scipy lstsq returns a non-sensical negative vals in the
# diagonals of the cov_x it computes.
stds = [np.sqrt(x) if x > 0 else None for x in np.diag(cov_matrix)]
return stds
def pprint(self, max_lines, round_val):
longest_name = max(len(x) for x in self.param_names)
ret_str = 'standard deviations\n'
for i, std in enumerate(self.stds):
if i <= max_lines-1:
param = self.param_names[i]
ret_str += (f"{param}{' ' * (longest_name - len(param))}| "
f"{np.round(std, round_val)}\n")
else:
ret_str += '...'
return(ret_str.rstrip())
def __repr__(self):
return(self.pprint(max_lines=10, round_val=3))
def __getitem__(self, param):
if isinstance(param, str):
i = self.param_names.index(param)
elif isinstance(param, int):
i = param
else:
raise TypeError('Standard deviation can be indexed by parameter name or integer.')
return(self.stds[i])
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]])
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data['z'] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
if isinstance(model, tuple):
rename_data['_left_kwargs'] = model[1]
rename_data['_right_kwargs'] = model[2]
model = model[0]
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError("This model does not support being "
"fit to data with units.")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@staticmethod
def _add_fitting_uncertainties(*args):
"""
When available, calculate and sets the parameter covariance matrix
(model.cov_matrix) and standard deviations (model.stds).
"""
return None
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self, calc_uncertainties=False):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
self._calc_uncertainties = calc_uncertainties
@staticmethod
def _is_invertible(m):
"""Check if inverse of matrix can be obtained."""
if m.shape[0] != m.shape[1]:
return False
if np.linalg.matrix_rank(m) < m.shape[0]:
return False
return True
def _add_fitting_uncertainties(self, model, a, n_coeff, x, y, z=None,
resids=None):
"""
Calculate and parameter covariance matrix and standard deviations
and set `cov_matrix` and `stds` attributes.
"""
x_dot_x_prime = np.dot(a.T, a)
masked = False or hasattr(y, 'mask')
# check if invertible. if not, can't calc covariance.
if not self._is_invertible(x_dot_x_prime):
return(model)
inv_x_dot_x_prime = np.linalg.inv(x_dot_x_prime)
if z is None: # 1D models
if len(model) == 1: # single model
mask = None
if masked:
mask = y.mask
xx = np.ma.array(x, mask=mask)
RSS = [(1/(xx.count()-n_coeff)) * resids]
if len(model) > 1: # model sets
RSS = [] # collect sum residuals squared for each model in set
for j in range(len(model)):
mask = None
if masked:
mask = y.mask[..., j].flatten()
xx = np.ma.array(x, mask=mask)
eval_y = model(xx, model_set_axis=False)
eval_y = np.rollaxis(eval_y, model.model_set_axis)[j]
RSS.append((1/(xx.count()-n_coeff)) * np.sum((y[..., j] - eval_y)**2))
else: # 2D model
if len(model) == 1:
mask = None
if masked:
warnings.warn('Calculation of fitting uncertainties '
'for 2D models with masked values not '
'currently supported.\n',
AstropyUserWarning)
return
xx, _ = np.ma.array(x, mask=mask), np.ma.array(y, mask=mask)
# len(xx) instead of xx.count. this will break if values are masked?
RSS = [(1/(len(xx)-n_coeff)) * resids]
else:
RSS = []
for j in range(len(model)):
eval_z = model(x, y, model_set_axis=False)
mask = None # need to figure out how to deal w/ masking here.
if model.model_set_axis == 1:
# model_set_axis passed when evaluating only refers to input shapes
# so output must be reshaped for model_set_axis=1.
eval_z = np.rollaxis(eval_z, 1)
eval_z = eval_z[j]
RSS.append([(1/(len(x)-n_coeff)) * np.sum((z[j] - eval_z)**2)])
covs = [inv_x_dot_x_prime * r for r in RSS]
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
if len(covs) == 1:
model.cov_matrix = Covariance(covs[0], model.param_names)
model.stds = StandardDeviations(covs[0], free_param_names)
else:
model.cov_matrix = [Covariance(cov, model.param_names) for cov in covs]
model.stds = [StandardDeviations(cov, free_param_names) for cov in covs]
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array-like
Input coordinates
z : array-like, optional
Input coordinates.
If the dependent (``y`` or ``z``) coordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
coordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
model_copy.sync_constraints = False
_, fitparam_indices, _ = model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
# This is also done by _convert_inputs, but we need it here to allow
# checking the array dimensionality before that gets called:
if weights is not None:
weights = np.asarray(weights, dtype=float)
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, weights = _convert_input(
x, weights,
n_models=len(model_copy) if weights.ndim == y.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x))
fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
if weights is not None:
# If we have separate weights for each model, apply the same
# conversion as for the data, otherwise check common weights
# as if for a single model:
_, _, weights = _convert_input(
x, y, weights,
n_models=len(model_copy) if weights.ndim == z.ndim else 1,
model_set_axis=model_copy.model_set_axis
)
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y))
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x, y=y)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
if weights is not None:
# Same for weights
if weights.ndim > 2:
# Separate 2D weights for each model:
weights = np.rollaxis(weights, model_axis, weights.ndim)
weights = weights.reshape(-1, weights.shape[-1])
elif weights.ndim == z.ndim:
# Separate, flattened weights for each model:
weights = weights.T if model_axis == 0 else weights
else:
# Common weights for all the models:
weights = weights.flatten()
else:
rhs = z.flatten()
if weights is not None:
weights = weights.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError(f"{type(model_copy).__name__} gives unsupported >2D "
"derivative matrix for this x/y")
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input coordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
if rhs.ndim == 2:
if weights.shape == rhs.shape:
# separate weights for multiple models case: broadcast
# lhs to have more dimension (for each model)
lhs = lhs[..., np.newaxis] * weights[:, np.newaxis]
rhs = rhs * weights
else:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original
# dependent variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if weights is not None and not masked and np.any(np.isnan(lhs)):
raise ValueError('Found NaNs in the coefficient matrix, which '
'should not happen and would crash the lapack '
'routine. Maybe check that weights are not null.')
a = None # need for calculating covarience
if ((masked and len(model_copy) > 1) or
(weights is not None and weights.ndim > 1)):
# Separate masks or weights for multiple models case: Numpy's
# lstsq supports multiple dimensions only for rhs, so we need to
# loop manually on the models. This may be fixed in the future
# with https://github.com/numpy/numpy/pull/15777.
# Initialize empty array of coefficients and populate it one model
# at a time. The shape matches the number of coefficients from the
# Vandermonde matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[1:2] + rhs.shape[-1:], dtype=rhs.dtype)
# Arrange the lhs as a stack of 2D matrices that we can iterate
# over to get the correctly-orientated lhs for each model:
if lhs.ndim > 2:
lhs_stack = np.rollaxis(lhs, -1, 0)
else:
lhs_stack = np.broadcast_to(lhs, rhs.shape[-1:] + lhs.shape)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_lhs, model_rhs, model_lacoef in zip(lhs_stack, rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask if masked else slice(None)
model_lhs = model_lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
a = model_lhs
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
else:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
a = lhs[good]
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef /= scl[:, np.newaxis] if scl.ndim < rhs.ndim else scl
self.fit_info['params'] = lacoef
fitter_to_model_params(model_copy, lacoef.flatten())
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if (hasattr(model_copy, '_order') and
len(model_copy) == 1 and
not has_fixed and
rank != model_copy._order):
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
# calculate and set covariance matrix and standard devs. on model
if self._calc_uncertainties:
if len(y) > len(lacoef):
self._add_fitting_uncertainties(model_copy, a*scl,
len(lacoef), x, y, z, resids)
model_copy.sync_constraints = True
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : `Fitter`
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : callable
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {'niter': None}
def __str__(self):
return (f"Fitter: {self.fitter.__class__.__name__}\n"
f"Outlier function: {self.outlier_func.__name__}\n"
f"Num. of iterations: {self.niter}\n"
f"Outlier func. args.: {self.outlier_kwargs}")
def __repr__(self):
return (f"{self.__class__.__name__}(fitter: {self.fitter.__class__.__name__}, "
f"outlier_func: {self.outlier_func.__name__},"
f" niter: {self.niter}, outlier_kwargs: {self.outlier_kwargs})")
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array-like
Input coordinates.
y : array-like
Data measurements (1D case) or input coordinates (2D case).
z : array-like, optional
Data measurements (2D case).
weights : array-like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y coordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if (not hasattr(self.fitter, 'supports_masked_input') or
self.fitter.supports_masked_input is not True):
raise ValueError(f"{type(self.fitter).__name__} cannot fit model sets with masked "
"values")
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input coordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x, )
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {'niter': n}
self.fit_info.update(getattr(self.fitter, 'fit_info', {}))
return fitted_model, filtered_data.mask
class _NonLinearLSQFitter(metaclass=_FitterMeta):
"""
Base class for Non-Linear least-squares fitters
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds : bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition.
Default: True
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=True):
self.fit_info = None
self._calc_uncertainties = calc_uncertainties
self._use_min_max_bounds = use_min_max_bounds
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
fitter_to_model_params(model, fps, self._use_min_max_bounds)
meas = args[-1]
if weights is None:
value = np.ravel(model(*args[2: -1]) - meas)
else:
value = np.ravel(weights * (model(*args[2: -1]) - meas))
if not np.all(np.isfinite(value)):
raise NonFiniteValueError("Objective function has encountered a non-finite value, "
"this will cause the fit to fail!\n"
"Please remove non-finite values from your input data before fitting to avoid this error.")
return value
@staticmethod
def _add_fitting_uncertainties(model, cov_matrix):
"""
Set ``cov_matrix`` and ``stds`` attributes on model with parameter
covariance matrix returned by ``optimize.leastsq``.
"""
free_param_names = [x for x in model.fixed if (model.fixed[x] is False)
and (model.tied[x] is False)]
model.cov_matrix = Covariance(cov_matrix, free_param_names)
model.stds = StandardDeviations(cov_matrix, free_param_names)
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
try:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.array(model.fit_deriv(x, *params))])
except ValueError:
return np.array([np.ravel(_) for _ in np.array(weights) *
np.moveaxis(
np.array(model.fit_deriv(x, *params)),
-1, 0)]).transpose()
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in
(np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))]
def _compute_param_cov(self, model, y, init_values, cov_x, fitparams, farg):
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
if self._calc_uncertainties is True:
if self.fit_info['param_cov'] is not None:
self._add_fitting_uncertainties(model,
self.fit_info['param_cov'])
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
return None, None, None
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self.supported_constraints)
model_copy.sync_constraints = False
farg = (model_copy, weights, ) + _convert_input(x, y, z)
init_values, fitparams, cov_x = self._run_fitter(model_copy, farg,
maxiter, acc, epsilon, estimate_jacobian)
self._compute_param_cov(model_copy, y, init_values, cov_x, fitparams, farg)
model.sync_constraints = True
return model_copy
class LevMarLSQFitter(_NonLinearLSQFitter):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
def __init__(self, calc_uncertainties=False):
super().__init__(calc_uncertainties)
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
if model.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _, _ = model_to_fit_params(model)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
fitter_to_model_params(model, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
return init_values, fitparams, cov_x
class _NLLSQFitter(_NonLinearLSQFitter):
"""
Wrapper class for `scipy.optimize.least_squares` method, which provides:
- Trust Region Reflective
- dogbox
- Levenberg-Marqueardt
algorithms using the least squares statistic.
Parameters
----------
method : str
‘trf’ : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
‘dogbox’ : dogleg algorithm with rectangular trust regions, typical
use case is small problems with bounds. Not recommended for
problems with rank-deficient Jacobian.
‘lm’ : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn’t handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, method, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__(calc_uncertainties, use_min_max_bounds)
self._method = method
def _run_fitter(self, model, farg, maxiter, acc, epsilon, estimate_jacobian):
from scipy import optimize
from scipy.linalg import svd
if model.fit_deriv is None or estimate_jacobian:
dfunc = '2-point'
else:
def _dfunc(params, model, weights, x, y, z=None):
if model.col_fit_deriv:
return np.transpose(self._wrap_deriv(params, model, weights, x, y, z))
else:
return self._wrap_deriv(params, model, weights, x, y, z)
dfunc = _dfunc
init_values, _, bounds = model_to_fit_params(model)
# Note, if use_min_max_bounds is True we are defaulting to enforcing bounds
# using the old method employed by LevMarLSQFitter, this is different
# from the method that optimize.least_squares employs to enforce bounds
# thus we override the bounds being passed to optimize.least_squares so
# that it will not enforce any bounding.
if self._use_min_max_bounds:
bounds = (-np.inf, np.inf)
self.fit_info = optimize.least_squares(
self.objective_function, init_values, args=farg, jac=dfunc,
max_nfev=maxiter, diff_step=np.sqrt(epsilon), xtol=acc,
method=self._method, bounds=bounds
)
# Adapted from ~scipy.optimize.minpack, see:
# https://github.com/scipy/scipy/blob/47bb6febaa10658c72962b9615d5d5aa2513fa3a/scipy/optimize/minpack.py#L795-L816
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(self.fit_info.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(self.fit_info.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
cov_x = np.dot(VT.T / s**2, VT)
fitter_to_model_params(model, self.fit_info.x, False)
if not self.fit_info.success:
warnings.warn("The fit may be unsuccessful; check: \n"
f" {self.fit_info.message}",
AstropyUserWarning)
return init_values, self.fit_info.x, cov_x
class TRFLSQFitter(_NLLSQFitter):
"""
Trust Region Reflective algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__('trf', calc_uncertainties, use_min_max_bounds)
class DogBoxLSQFitter(_NLLSQFitter):
"""
DogBox algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
use_min_max_bounds: bool
If the set parameter bounds for a model will be enforced each given
parameter while fitting via a simple min/max condition. A True setting
will replicate how LevMarLSQFitter enforces bounds.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False, use_min_max_bounds=False):
super().__init__('dogbox', calc_uncertainties, use_min_max_bounds)
class LMLSQFitter(_NLLSQFitter):
"""
`scipy.optimize.least_squares` Levenberg-Marquardt algorithm and least squares statistic.
Parameters
----------
calc_uncertainties : bool
If the covarience matrix should be computed and set in the fit_info.
Default: False
Attributes
----------
fit_info :
A `scipy.optimize.OptimizeResult` class which contains all of
the most recent fit information
"""
def __init__(self, calc_uncertainties=False):
super().__init__('lm', calc_uncertainties, True)
class SLSQPLSQFitter(Fitter):
"""
Sequential Least Squares Programming (SLSQP) optimization algorithm and
least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
Notes
-----
See also the `~astropy.modeling.optimizers.SLSQP` optimizer.
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
`ModelLinearityError`
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional, keyword-only
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
model_copy.sync_constraints = False
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _, _ = model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
fitter_to_model_params(model_copy, fitparams)
model_copy.sync_constraints = True
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self.model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError(f"Expected >1 models, {len(self.models)} is given")
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
f"{len(self.jointparams.keys())} is given")
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError(f"{len(self.jointparams[j])} parameter(s) "
f"provided but {len(self.initvals)} expected")
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError(f"Expected {reduce(lambda x, y: x + 1 + y + 1, self.modeldims)} "
f"coordinates in args but {len(args)} provided")
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
data_ndim, data_shape = z.ndim, z.shape
else:
data_ndim, data_shape = y.ndim, y.shape
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1 or data_ndim > x.ndim:
if (model_set_axis or 0) >= data_ndim:
raise ValueError("model_set_axis out of range")
if data_shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y or z array) is expected to equal "
"the number of parameter sets"
)
if z is None:
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
data_shape = y.shape[:-1]
else:
# Shape of z excluding model_set_axis
data_shape = (z.shape[:model_set_axis] +
z.shape[model_set_axis + 1:])
if z is None:
if data_shape != x.shape:
raise ValueError("x and y should have the same shape")
farg = (x, y)
else:
if not (x.shape == y.shape == data_shape):
raise ValueError("x, y and z should have the same shape")
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def fitter_to_model_params(model, fps, use_min_max_bounds=True):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
Parameters
----------
model :
The model being fit
fps :
The fit parameter values to be assigned
use_min_max_bounds: bool
If the set parameter bounds for model will be enforced on each
parameter with bounds.
Default: True
"""
_, fit_param_indices, _ = model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None) and use_min_max_bounds:
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
# To handle multiple tied constraints, model parameters
# need to be updated after each iteration.
parameters[slice_] = value
model._array_to_parameters()
@deprecated('5.1', 'private method: _fitter_to_model_params has been made public now')
def _fitter_to_model_params(model, fps):
return fitter_to_model_params(model, fps)
def model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
model_params = model.parameters
model_bounds = list(model.bounds.values())
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model_params)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del model_bounds[slice_]
del fitparam_indices[idx]
model_params = np.array(params)
for idx, bound in enumerate(model_bounds):
if bound[0] is None:
lower = -np.inf
else:
lower = bound[0]
if bound[1] is None:
upper = np.inf
else:
upper = bound[1]
model_bounds[idx] = (lower, upper)
model_bounds = tuple(zip(*model_bounds))
return model_params, fitparam_indices, model_bounds
@deprecated('5.1', 'private method: _model_to_fit_params has been made public now')
def _model_to_fit_params(model):
return model_to_fit_params(model)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : list of `~importlib.metadata.EntryPoint`
entry_points are objects which encapsulate importable objects and
are defined on the installation of a package.
Notes
-----
An explanation of entry points can be found `here
<http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`_
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning(
f'{type(e).__name__} error occurred in entry point {name}.'))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
f'Modeling entry point {name} expected to be a Class.'))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
f"Modeling entry point {name} expected to extend "
"astropy.modeling.Fitter"))
def _populate_ep():
# TODO: Exclusively use select when Python minversion is 3.10
ep = entry_points()
if hasattr(ep, 'select'):
populate_entry_points(ep.select(group='astropy.modeling'))
else:
populate_entry_points(ep.get('astropy.modeling', []))
_populate_ep()
|
ac2d3923b11a7ef420e6c1de9da28f0186e9a0a40cb985e03eb593a46a9c808e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Module to test fitting routines
"""
# pylint: disable=invalid-name
import os.path
import unittest.mock as mk
from importlib.metadata import EntryPoint
from itertools import combinations
from unittest import mock
import numpy as np
import pytest
from numpy import linalg
from numpy.testing import assert_allclose, assert_almost_equal, assert_equal
from astropy.modeling import models
from astropy.modeling.core import Fittable2DModel, Parameter
from astropy.modeling.fitting import (
DogBoxLSQFitter, Fitter, FittingWithOutlierRemoval, JointFitter, LevMarLSQFitter,
LinearLSQFitter, LMLSQFitter, NonFiniteValueError, SimplexLSQFitter, SLSQPLSQFitter,
TRFLSQFitter, _NLLSQFitter, populate_entry_points)
from astropy.modeling.optimizers import Optimization
from astropy.stats import sigma_clip
from astropy.utils import NumpyRNGContext
from astropy.utils.compat.optional_deps import HAS_SCIPY
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyUserWarning
from . import irafutil
if HAS_SCIPY:
from scipy import optimize
fitters = [SimplexLSQFitter, SLSQPLSQFitter]
non_linear_fitters = [LevMarLSQFitter, TRFLSQFitter, LMLSQFitter, DogBoxLSQFitter]
_RANDOM_SEED = 0x1337
class TestPolynomial2D:
"""Tests for 2D polynomial fitting."""
def setup_class(self):
self.model = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
def poly2(x, y):
return 1 + 2 * x + 3 * x ** 2 + 4 * y + 5 * y ** 2 + 6 * x * y
self.z = poly2(self.x, self.y)
def test_poly2D_fitting(self):
fitter = LinearLSQFitter()
v = self.model.fit_deriv(x=self.x, y=self.y)
p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, p)
def test_eval(self):
fitter = LinearLSQFitter()
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model(self.x, self.y), self.z)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_nonlinear_fitting(self, fitter):
fitter = fitter()
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
new_model = fitter(self.model, self.x, self.y, self.z)
assert_allclose(new_model.parameters, [1, 2, 3, 4, 5, 6])
@pytest.mark.skipif('not HAS_SCIPY')
def test_compare_nonlinear_fitting(self):
self.model.parameters = [.6, 1.8, 2.9, 3.7, 4.9, 6.7]
fit_models = []
for fitter in non_linear_fitters:
fitter = fitter()
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_models.append(fitter(self.model, self.x, self.y, self.z))
for pair in combinations(fit_models, 2):
assert_allclose(pair[0].parameters, pair[1].parameters)
class TestICheb2D:
"""
Tests 2D Chebyshev polynomial fitting
Create a 2D polynomial (z) using Polynomial2DModel and default coefficients
Fit z using a ICheb2D model
Evaluate the ICheb2D polynomial and compare with the initial z
"""
def setup_class(self):
self.pmodel = models.Polynomial2D(2)
self.y, self.x = np.mgrid[:5, :5]
self.z = self.pmodel(self.x, self.y)
self.cheb2 = models.Chebyshev2D(2, 2)
self.fitter = LinearLSQFitter()
def test_default_params(self):
self.cheb2.parameters = np.arange(9)
p = np.array([1344., 1772., 400., 1860., 2448., 552., 432., 568.,
128.])
z = self.cheb2(self.x, self.y)
model = self.fitter(self.cheb2, self.x, self.y, z)
assert_almost_equal(model.parameters, p)
def test_poly2D_cheb2D(self):
model = self.fitter(self.cheb2, self.x, self.y, self.z)
z1 = model(self.x, self.y)
assert_almost_equal(self.z, z1)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_chebyshev2D_nonlinear_fitting_with_weights(self, fitter):
fitter = fitter()
cheb2d = models.Chebyshev2D(2, 2)
cheb2d.parameters = np.arange(9)
z = cheb2d(self.x, self.y)
cheb2d.parameters = [0.1, .6, 1.8, 2.9, 3.7, 4.9, 6.7, 7.5, 8.9]
weights = np.ones_like(self.y)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
model = fitter(cheb2d, self.x, self.y, z, weights=weights)
assert_allclose(model.parameters, [0, 1, 2, 3, 4, 5, 6, 7, 8],
atol=10**-9)
@pytest.mark.skipif('not HAS_SCIPY')
class TestJointFitter:
"""
Tests the joint fitting routine using 2 gaussian models
"""
def setup_class(self):
"""
Create 2 gaussian models and some data with noise.
Create a fitter for the two models keeping the amplitude parameter
common for the two models.
"""
self.g1 = models.Gaussian1D(10, mean=14.9, stddev=.3)
self.g2 = models.Gaussian1D(10, mean=13, stddev=.4)
self.jf = JointFitter([self.g1, self.g2],
{self.g1: ['amplitude'],
self.g2: ['amplitude']}, [9.8])
self.x = np.arange(10, 20, .1)
y1 = self.g1(self.x)
y2 = self.g2(self.x)
with NumpyRNGContext(_RANDOM_SEED):
n = np.random.randn(100)
self.ny1 = y1 + 2 * n
self.ny2 = y2 + 2 * n
self.jf(self.x, self.ny1, self.x, self.ny2)
def test_joint_parameter(self):
"""
Tests that the amplitude of the two models is the same
"""
assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])
assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])
def test_joint_fitter(self):
"""
Tests the fitting routine with similar procedure.
Compares the fitted parameters.
"""
p1 = [14.9, .3]
p2 = [13, .4]
A = 9.8
p = np.r_[A, p1, p2]
def model(A, p, x):
return A * np.exp(-0.5 / p[1] ** 2 * (x - p[0]) ** 2)
def errfunc(p, x1, y1, x2, y2):
return np.ravel(np.r_[model(p[0], p[1:3], x1) - y1,
model(p[0], p[3:], x2) - y2])
coeff, _ = optimize.leastsq(errfunc, p,
args=(self.x, self.ny1, self.x, self.ny2))
assert_allclose(coeff, self.jf.fitparams, rtol=10 ** (-2))
class TestLinearLSQFitter:
def test_compound_model_raises_error(self):
"""Test that if an user tries to use a compound model, raises an error"""
with pytest.raises(ValueError) as excinfo:
init_model1 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model2 = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
init_model_comp = init_model1 + init_model2
x = np.arange(10)
y = init_model_comp(x, model_set_axis=False)
fitter = LinearLSQFitter()
_ = fitter(init_model_comp, x, y)
assert "Model must be simple, not compound" in str(excinfo.value)
def test_chebyshev1D(self):
"""Tests fitting a 1D Chebyshev polynomial to some real world data."""
test_file = get_pkg_data_filename(os.path.join('data',
'idcompspec.fits'))
with open(test_file) as f:
lines = f.read()
reclist = lines.split('begin')
record = irafutil.IdentifyRecord(reclist[1])
coeffs = record.coeff
order = int(record.fields['order'])
initial_model = models.Chebyshev1D(order - 1,
domain=record.get_range())
fitter = LinearLSQFitter()
fitted_model = fitter(initial_model, record.x, record.z)
assert_allclose(fitted_model.parameters, np.array(coeffs),
rtol=10e-2)
def test_linear_fit_model_set(self):
"""Tests fitting multiple models simultaneously."""
init_model = models.Polynomial1D(degree=2, c0=[1, 1], n_models=2)
x = np.arange(10)
y_expected = init_model(x, model_set_axis=False)
assert y_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
y = y_expected + np.random.normal(0, 0.01, size=y_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model(x, model_set_axis=False), y_expected,
rtol=1e-1)
def test_linear_fit_2d_model_set(self):
"""Tests fitted multiple 2-D models simultaneously."""
init_model = models.Polynomial2D(degree=2, c0_0=[1, 1], n_models=2)
x = np.arange(10)
y = np.arange(10)
z_expected = init_model(x, y, model_set_axis=False)
assert z_expected.shape == (2, 10)
# Add a bit of random noise
with NumpyRNGContext(_RANDOM_SEED):
z = z_expected + np.random.normal(0, 0.01, size=z_expected.shape)
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model(x, y, model_set_axis=False), z_expected,
rtol=1e-1)
def test_linear_fit_fixed_parameter(self):
"""
Tests fitting a polynomial model with a fixed parameter (issue #6135).
"""
init_model = models.Polynomial1D(degree=2, c1=1)
init_model.c1.fixed = True
x = np.arange(10)
y = 2 + x + 0.5*x*x
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [2., 1., 0.5], atol=1e-14)
def test_linear_fit_model_set_fixed_parameter(self):
"""
Tests fitting a polynomial model set with a fixed parameter (#6135).
"""
init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)
init_model.c1.fixed = True
x = np.arange(10)
yy = np.array([2 + x + 0.5*x*x, -2*x])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.c0, [2., 0.], atol=1e-14)
assert_allclose(fitted_model.c1, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c2, [0.5, 0.], atol=1e-14)
def test_linear_fit_2d_model_set_fixed_parameters(self):
"""
Tests fitting a 2d polynomial model set with fixed parameters (#6135).
"""
init_model = models.Polynomial2D(degree=2, c1_0=[1, 2], c0_1=[-0.5, 1],
n_models=2,
fixed={'c1_0': True, 'c0_1': True})
x, y = np.mgrid[0:5, 0:5]
zz = np.array([1+x-0.5*y+0.1*x*x, 2*x+y-0.2*y*y])
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, zz)
assert_allclose(fitted_model(x, y, model_set_axis=False), zz,
atol=1e-14)
def test_linear_fit_model_set_masked_values(self):
"""
Tests model set fitting with masked value(s) (#4824, #6819).
"""
# NB. For single models, there is an equivalent doctest.
init_model = models.Polynomial1D(degree=1, n_models=2)
x = np.arange(10)
y = np.ma.masked_array([2*x+1, x-2], mask=np.zeros_like([x, x]))
y[0, 7] = 100. # throw off fit coefficients if unmasked
y.mask[0, 7] = True
y[1, 1:3] = -100.
y.mask[1, 1:3] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.c0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1, [2., 1.], atol=1e-14)
def test_linear_fit_2d_model_set_masked_values(self):
"""
Tests 2D model set fitting with masked value(s) (#4824, #6819).
"""
init_model = models.Polynomial2D(1, n_models=2)
x, y = np.mgrid[0:5, 0:5]
z = np.ma.masked_array([2*x+3*y+1, x-0.5*y-2],
mask=np.zeros_like([x, x]))
z[0, 3, 1] = -1000. # throw off fit coefficients if unmasked
z.mask[0, 3, 1] = True
fitter = LinearLSQFitter()
fitted_model = fitter(init_model, x, y, z)
assert_allclose(fitted_model.c0_0, [1., -2.], atol=1e-14)
assert_allclose(fitted_model.c1_0, [2., 1.], atol=1e-14)
assert_allclose(fitted_model.c0_1, [3., -0.5], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestNonLinearFitters:
"""Tests non-linear least squares fitting and the SLSQP algorithm."""
def setup_class(self):
self.initial_values = [100, 5, 1]
self.xdata = np.arange(0, 10, 0.1)
sigma = 4. * np.ones_like(self.xdata)
with NumpyRNGContext(_RANDOM_SEED):
yerror = np.random.normal(0, sigma)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
self.ydata = func(self.initial_values, self.xdata) + yerror
self.gauss = models.Gaussian1D(100, 5, stddev=1)
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter0', non_linear_fitters)
@pytest.mark.parametrize('fitter1', non_linear_fitters)
def test_estimated_vs_analytic_deriv_with_weights(self, fitter0, fitter1):
"""
Runs `LevMarLSQFitter` and `TRFLSQFitter` with estimated and
analytic derivatives of a `Gaussian1D`.
"""
weights = 1.0 / (self.ydata / 10.)
fitter0 = fitter0()
model = fitter0(self.gauss, self.xdata, self.ydata, weights=weights)
g1e = models.Gaussian1D(100, 5.0, stddev=1)
fitter1 = fitter1()
emodel = fitter1(g1e, self.xdata, self.ydata, weights=weights, estimate_jacobian=True)
assert_allclose(model.parameters, emodel.parameters, rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_optimize(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` against
`scipy.optimize.leastsq`.
"""
fitter = fitter()
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
def func(p, x):
return p[0] * np.exp(-0.5 / p[2] ** 2 * (x - p[1]) ** 2)
def errfunc(p, x, y):
return func(p, x) - y
result = optimize.leastsq(errfunc, self.initial_values,
args=(self.xdata, self.ydata))
assert_allclose(model.parameters, result[0], rtol=10 ** (-3))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_with_weights(self, fitter):
"""
Tests results from `LevMarLSQFitter` and `TRFLSQFitter` with weights.
"""
fitter = fitter()
# part 1: weights are equal to 1
model = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=np.ones_like(self.xdata))
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
# part 2: weights are 0 or 1 (effectively, they are a mask)
weights = np.zeros_like(self.xdata)
weights[::2] = 1.
mask = weights >= 1.
model = fitter(self.gauss, self.xdata[mask], self.ydata[mask],
estimate_jacobian=True)
withw = fitter(self.gauss, self.xdata, self.ydata,
estimate_jacobian=True, weights=weights)
assert_allclose(model.parameters, withw.parameters, rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:.* Maximum number of iterations reached')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter_class', fitters)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitter_against_LevMar(self, fitter_class, fitter):
"""
Tests results from non-linear fitters against `LevMarLSQFitter`
and `TRFLSQFitter`
"""
fitter = fitter()
fitter_cls = fitter_class()
# This emits a warning from fitter that we need to ignore with
# pytest.mark.filterwarnings above.
new_model = fitter_cls(self.gauss, self.xdata, self.ydata)
model = fitter(self.gauss, self.xdata, self.ydata)
assert_allclose(model.parameters, new_model.parameters,
rtol=10 ** (-4))
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_LSQ_SLSQP_with_constraints(self, fitter):
"""
Runs `LevMarLSQFitter`/`TRFLSQFitter` and `SLSQPLSQFitter` on a
model with constraints.
"""
fitter = fitter()
g1 = models.Gaussian1D(100, 5, stddev=1)
g1.mean.fixed = True
fslsqp = SLSQPLSQFitter()
slsqp_model = fslsqp(g1, self.xdata, self.ydata)
model = fitter(g1, self.xdata, self.ydata)
assert_allclose(model.parameters, slsqp_model.parameters,
rtol=10 ** (-4))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_non_linear_lsq_fitter_with_weights(self, fitter):
"""
Tests that issue #11581 has been solved.
"""
fitter = fitter()
np.random.seed(42)
norder = 2
fitter2 = LinearLSQFitter()
model = models.Polynomial1D(norder)
npts = 10000
c = [2.0, -10.0, 7.0]
tw = np.random.uniform(0.0, 10.0, npts)
tx = np.random.uniform(0.0, 10.0, npts)
ty = c[0] + c[1] * tx + c[2] * (tx ** 2)
ty += np.random.normal(0.0, 1.5, npts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, weights=tw)
tf2 = fitter2(model, tx, ty, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
model = models.Gaussian1D()
if isinstance(fitter, TRFLSQFitter) or isinstance(fitter, LMLSQFitter):
with pytest.warns(AstropyUserWarning, match=r'The fit may be unsuccessful; *.'):
fitter(model, tx, ty, weights=tw)
else:
fitter(model, tx, ty, weights=tw)
model = models.Polynomial2D(norder)
nxpts = 100
nypts = 150
npts = nxpts * nypts
c = [1.0, 4.0, 7.0, -8.0, -9.0, -3.0]
tw = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tx = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
ty = np.random.uniform(0.0, 10.0, npts).reshape(nxpts, nypts)
tz = c[0] + c[1] * tx + c[2] * (tx ** 2) + c[3] * ty + c[4] * (ty ** 2) + c[5] * tx * ty
tz += np.random.normal(0.0, 1.5, npts).reshape(nxpts, nypts)
with pytest.warns(AstropyUserWarning, match=r'Model is linear in parameters'):
tf1 = fitter(model, tx, ty, tz, weights=tw)
tf2 = fitter2(model, tx, ty, tz, weights=tw)
assert_allclose(tf1.parameters, tf2.parameters,
atol=10 ** (-16))
assert_allclose(tf1.parameters, c,
rtol=10 ** (-2), atol=10 ** (-2))
def test_simplex_lsq_fitter(self):
"""A basic test for the `SimplexLSQ` fitter."""
class Rosenbrock(Fittable2DModel):
a = Parameter()
b = Parameter()
@staticmethod
def evaluate(x, y, a, b):
return (a - x) ** 2 + b * (y - x ** 2) ** 2
x = y = np.linspace(-3.0, 3.0, 100)
with NumpyRNGContext(_RANDOM_SEED):
z = Rosenbrock.evaluate(x, y, 1.0, 100.0)
z += np.random.normal(0., 0.1, size=z.shape)
fitter = SimplexLSQFitter()
r_i = Rosenbrock(1, 100)
r_f = fitter(r_i, x, y, z)
assert_allclose(r_f.parameters, [1.0, 100.0], rtol=1e-2)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_param_cov(self, fitter):
"""
Tests that the 'param_cov' fit_info entry gets the right answer for
*linear* least squares, where the answer is exact
"""
fitter = fitter()
a = 2
b = 100
with NumpyRNGContext(_RANDOM_SEED):
x = np.linspace(0, 1, 100)
# y scatter is amplitude ~1 to make sure covarience is
# non-negligible
y = x*a + b + np.random.randn(len(x))
# first compute the ordinary least squares covariance matrix
X = np.vstack([x, np.ones(len(x))]).T
beta = np.matmul(np.matmul(np.linalg.inv(np.matmul(X.T, X)), X.T), y.T)
s2 = (np.sum((y - np.matmul(X, beta).ravel())**2) /
(len(y) - len(beta)))
olscov = np.linalg.inv(np.matmul(X.T, X)) * s2
# now do the non-linear least squares fit
mod = models.Linear1D(a, b)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fmod = fitter(mod, x, y)
assert_allclose(fmod.parameters, beta.ravel())
assert_allclose(olscov, fitter.fit_info['param_cov'])
class TestEntryPoint:
"""Tests population of fitting with entry point fitters"""
def successfulimport(self):
# This should work
class goodclass(Fitter):
__name__ = "GoodClass"
return goodclass
def raiseimporterror(self):
# This should fail as it raises an Import Error
raise ImportError
def returnbadfunc(self):
def badfunc():
# This should import but it should fail type check
pass
return badfunc
def returnbadclass(self):
# This should import But it should fail subclass type check
class badclass:
pass
return badclass
def test_working(self):
"""This should work fine"""
mock_entry_working = mock.create_autospec(EntryPoint)
mock_entry_working.name = "Working"
mock_entry_working.load = self.successfulimport
populate_entry_points([mock_entry_working])
def test_import_error(self):
"""This raises an import error on load to test that it is handled correctly"""
mock_entry_importerror = mock.create_autospec(EntryPoint)
mock_entry_importerror.name = "IErr"
mock_entry_importerror.load = self.raiseimporterror
with pytest.warns(AstropyUserWarning, match=r".*ImportError.*"):
populate_entry_points([mock_entry_importerror])
def test_bad_func(self):
"""This returns a function which fails the type check"""
mock_entry_badfunc = mock.create_autospec(EntryPoint)
mock_entry_badfunc.name = "BadFunc"
mock_entry_badfunc.load = self.returnbadfunc
with pytest.warns(AstropyUserWarning, match=r".*Class.*"):
populate_entry_points([mock_entry_badfunc])
def test_bad_class(self):
"""This returns a class which doesn't inherient from fitter """
mock_entry_badclass = mock.create_autospec(EntryPoint)
mock_entry_badclass.name = "BadClass"
mock_entry_badclass.load = self.returnbadclass
with pytest.warns(AstropyUserWarning, match=r".*BadClass.*"):
populate_entry_points([mock_entry_badclass])
@pytest.mark.skipif('not HAS_SCIPY')
class Test1DFittingWithOutlierRemoval:
def setup_class(self):
self.x = np.linspace(-5., 5., 200)
self.model_params = (3.0, 1.3, 0.8)
def func(p, x):
return p[0]*np.exp(-0.5*(x - p[1])**2/p[2]**2)
self.y = func(self.model_params, self.x)
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.x.shape)
y = self.y + (np.random.normal(0., 0.2, self.x.shape) +
c*np.random.normal(3.0, 5.0, self.x.shape))
g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.0)
fitted_model, _ = fit(g_init, self.x, y)
assert_allclose(fitted_model.parameters, self.model_params, rtol=1e-1)
@pytest.mark.skipif('not HAS_SCIPY')
class Test2DFittingWithOutlierRemoval:
def setup_class(self):
self.y, self.x = np.mgrid[-3:3:128j, -3:3:128j]
self.model_params = (3.0, 1.0, 0.0, 0.8, 0.8)
def Gaussian_2D(p, pos):
return p[0]*np.exp(-0.5*(pos[0] - p[2])**2 / p[4]**2 -
0.5*(pos[1] - p[1])**2 / p[3]**2)
self.z = Gaussian_2D(self.model_params, np.array([self.y, self.x]))
def initial_guess(self, data, pos):
y = pos[0]
x = pos[1]
"""computes the centroid of the data as the initial guess for the
center position"""
wx = x * data
wy = y * data
total_intensity = np.sum(data)
x_mean = np.sum(wx) / total_intensity
y_mean = np.sum(wy) / total_intensity
x_to_pixel = x[0].size / (x[x[0].size - 1][x[0].size - 1] - x[0][0])
y_to_pixel = y[0].size / (y[y[0].size - 1][y[0].size - 1] - y[0][0])
x_pos = np.around(x_mean * x_to_pixel + x[0].size / 2.).astype(int)
y_pos = np.around(y_mean * y_to_pixel + y[0].size / 2.).astype(int)
amplitude = data[y_pos][x_pos]
return amplitude, x_mean, y_mean
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.filterwarnings(r'ignore:Values in x were outside bounds during a minimize step, '
r'clipping to bounds')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_with_fitters_and_sigma_clip(self, fitter):
import scipy.stats as stats
fitter = fitter()
np.random.seed(0)
c = stats.bernoulli.rvs(0.25, size=self.z.shape)
z = self.z + (np.random.normal(0., 0.2, self.z.shape) +
c*np.random.normal(self.z, 2.0, self.z.shape))
guess = self.initial_guess(self.z, np.array([self.y, self.x]))
g2_init = models.Gaussian2D(amplitude=guess[0], x_mean=guess[1],
y_mean=guess[2], x_stddev=0.75,
y_stddev=1.25)
fit = FittingWithOutlierRemoval(fitter, sigma_clip,
niter=3, sigma=3.)
fitted_model, _ = fit(g2_init, self.x, self.y, z)
assert_allclose(fitted_model.parameters[0:5], self.model_params,
atol=1e-1)
def test_1d_set_fitting_with_outlier_removal():
"""Test model set fitting with outlier removal (issue #6819)"""
poly_set = models.Polynomial1D(2, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
x = np.arange(10)
y = np.array([2.5*x - 4, 2*x*x + x + 10])
y[1, 5] = -1000 # outlier
poly_set, filt_y = fitter(poly_set, x, y)
assert_allclose(poly_set.c0, [-4., 10.], atol=1e-14)
assert_allclose(poly_set.c1, [2.5, 1.], atol=1e-14)
assert_allclose(poly_set.c2, [0., 2.], atol=1e-14)
def test_2d_set_axis_2_fitting_with_outlier_removal():
"""Test fitting 2D model set (axis 2) with outlier removal (issue #6819)"""
poly_set = models.Polynomial2D(1, n_models=2, model_set_axis=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(),
sigma_clip, sigma=2.5, niter=3,
cenfunc=np.ma.mean, stdfunc=np.ma.std)
y, x = np.mgrid[0:5, 0:5]
z = np.rollaxis(np.array([x+y, 1-0.1*x+0.2*y]), 0, 3)
z[3, 3:5, 0] = 100. # outliers
poly_set, filt_z = fitter(poly_set, x, y, z)
assert_allclose(poly_set.c0_0, [[[0., 1.]]], atol=1e-14)
assert_allclose(poly_set.c1_0, [[[1., -0.1]]], atol=1e-14)
assert_allclose(poly_set.c0_1, [[[1., 0.2]]], atol=1e-14)
@pytest.mark.skipif('not HAS_SCIPY')
class TestWeightedFittingWithOutlierRemoval:
"""Issue #7020 """
def setup_class(self):
# values of x,y not important as we fit y(x,y) = p0 model here
self.y, self.x = np.mgrid[0:20, 0:20]
self.z = np.mod(self.x + self.y, 2) * 2 - 1 # -1,1 chessboard
self.weights = np.mod(self.x + self.y, 2) * 2 + 1 # 1,3 chessboard
self.z[0, 0] = 1000.0 # outlier
self.z[0, 1] = 1000.0 # outlier
self.x1d = self.x.flatten()
self.z1d = self.z.flatten()
self.weights1d = self.weights.flatten()
def test_1d_without_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d)
assert_allclose(fit.parameters[0], self.z1d.mean(), atol=10**(-2))
def test_1d_without_weights_with_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x1d, self.z1d)
assert((~mask).sum() == self.z1d.size - 2)
assert(mask[0] and mask[1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2)) # with removed outliers mean is 0.0
def test_1d_with_weights_without_sigma_clip(self):
model = models.Polynomial1D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_1d_with_weights_with_sigma_clip(self):
"""
smoke test for #7020 - fails without fitting.py
patch because weights does not propagate
"""
model = models.Polynomial1D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, filtered = fitter(model, self.x1d, self.z1d, weights=self.weights1d)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_1d_set_with_common_weights_with_sigma_clip(self):
"""added for #6819 (1D model set with weights in common)"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=self.weights1d)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_1d_set_with_weights_with_sigma_clip(self):
"""1D model set with separate weights"""
model = models.Polynomial1D(0, n_models=2)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
z1d = np.array([self.z1d, self.z1d])
weights = np.array([self.weights1d, self.weights1d])
fit, filtered = fitter(model, self.x1d, z1d, weights=weights)
assert_allclose(fit.parameters, [0.8, 0.8], atol=1e-14)
def test_2d_without_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter()
fit = fitter(model, self.x, self.y, self.z)
assert_allclose(fit.parameters[0], self.z.mean(), atol=10**(-2))
def test_2d_without_weights_with_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, mask = fitter(model, self.x, self.y, self.z)
assert((~mask).sum() == self.z.size - 2)
assert(mask[0, 0] and mask[0, 1])
assert_allclose(fit.parameters[0], 0.0, atol=10**(-2))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_with_weights_without_sigma_clip(self, fitter):
fitter = fitter()
model = models.Polynomial2D(0)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
def test_2d_linear_with_weights_without_sigma_clip(self):
model = models.Polynomial2D(0)
fitter = LinearLSQFitter() # LinearLSQFitter doesn't handle weights properly in 2D
fit = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 1.0) # outliers pulled it high
@pytest.mark.parametrize('base_fitter', non_linear_fitters)
def test_2d_with_weights_with_sigma_clip(self, base_fitter):
"""smoke test for #7020 - fails without fitting.py patch because
weights does not propagate"""
base_fitter = base_fitter()
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(base_fitter, sigma_clip,
niter=3, sigma=3.)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
def test_2d_linear_with_weights_with_sigma_clip(self):
"""same as test above with a linear fitter."""
model = models.Polynomial2D(0)
fitter = FittingWithOutlierRemoval(LinearLSQFitter(), sigma_clip,
niter=3, sigma=3.)
fit, _ = fitter(model, self.x, self.y, self.z, weights=self.weights)
assert(fit.parameters[0] > 10**(-2)) # weights pulled it > 0
# outliers didn't pull it out of [-1:1] because they had been removed
assert(fit.parameters[0] < 1.0)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_fitters_with_weights(fitter):
"""Issue #5737 """
fitter = fitter()
if isinstance(fitter, _NLLSQFitter):
pytest.xfail("This test is poorly designed and causes issues for "
"scipy.optimize.least_squares based fitters")
Xin, Yin = np.mgrid[0:21, 0:21]
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
# Non-linear model
g2 = models.Gaussian2D(10, 10, 9, 2, 3)
z = g2(Xin, Yin)
gmod = fitter(models.Gaussian2D(15, 7, 8, 1.3, 1.2), Xin, Yin, z + zsig)
assert_allclose(gmod.parameters, g2.parameters, atol=10 ** (-2))
# Linear model
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig)
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights():
"""Regression test for #7035"""
Xin, Yin = np.mgrid[0:21, 0:21]
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
def test_linear_fitter_with_weights_flat():
"""Same as the above #7035 test but with flattened inputs"""
Xin, Yin = np.mgrid[0:21, 0:21]
Xin, Yin = Xin.flatten(), Yin.flatten()
fitter = LinearLSQFitter()
with NumpyRNGContext(_RANDOM_SEED):
zsig = np.random.normal(0, 0.01, size=Xin.shape)
p2 = models.Polynomial2D(3)
p2.parameters = np.arange(10)/1.2
z = p2(Xin, Yin)
pmod = fitter(models.Polynomial2D(3), Xin, Yin, z + zsig, weights=zsig**(-2))
assert_allclose(pmod.parameters, p2.parameters, atol=10 ** (-2))
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.filterwarnings('ignore:The fit may be unsuccessful')
@pytest.mark.parametrize('fitter', non_linear_fitters + fitters)
def test_fitters_interface(fitter):
"""
Test that ``**kwargs`` work with all optimizers.
This is a basic smoke test.
"""
fitter = fitter()
model = models.Gaussian1D(10, 4, .3)
x = np.arange(21)
y = model(x)
if isinstance(fitter, SimplexLSQFitter):
kwargs = {'maxiter': 79, 'verblevel': 1, 'acc': 1e-6}
else:
kwargs = {'maxiter': 77, 'verblevel': 1, 'epsilon': 1e-2, 'acc': 1e-6}
if isinstance(fitter, LevMarLSQFitter) or isinstance(fitter, _NLLSQFitter):
kwargs.pop('verblevel')
_ = fitter(model, x, y, **kwargs)
@pytest.mark.skipif('not HAS_SCIPY')
@pytest.mark.parametrize('fitter_class', [SLSQPLSQFitter, SimplexLSQFitter])
def test_optimizers(fitter_class):
fitter = fitter_class()
# Test maxiter
assert fitter._opt_method.maxiter == 100
fitter._opt_method.maxiter = 1000
assert fitter._opt_method.maxiter == 1000
# Test eps
assert fitter._opt_method.eps == np.sqrt(np.finfo(float).eps)
fitter._opt_method.eps = 1e-16
assert fitter._opt_method.eps == 1e-16
# Test acc
assert fitter._opt_method.acc == 1e-7
fitter._opt_method.acc = 1e-16
assert fitter._opt_method.acc == 1e-16
# Test repr
assert repr(fitter._opt_method) == f"{fitter._opt_method.__class__.__name__}()"
fitparams = mk.MagicMock()
final_func_val = mk.MagicMock()
numiter = mk.MagicMock()
funcalls = mk.MagicMock()
exit_mode = 1
mess = mk.MagicMock()
xtol = mk.MagicMock()
if fitter_class == SLSQPLSQFitter:
return_value = (fitparams, final_func_val, numiter, exit_mode, mess)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'message': mess
}
else:
return_value = (fitparams, final_func_val, numiter, funcalls, exit_mode)
fit_info = {
'final_func_val': final_func_val,
'numiter': numiter,
'exit_mode': exit_mode,
'num_function_calls': funcalls
}
with mk.patch.object(fitter._opt_method.__class__, 'opt_method',
return_value=return_value):
with pytest.warns(AstropyUserWarning, match=r"The fit may be unsuccessful; .*"):
assert (fitparams, fit_info) == fitter._opt_method(mk.MagicMock(), mk.MagicMock(),
mk.MagicMock(), xtol=xtol)
assert fit_info == fitter._opt_method.fit_info
if isinstance(fitter, SLSQPLSQFitter):
fitter._opt_method.acc == 1e-16
else:
fitter._opt_method.acc == xtol
@mk.patch.multiple(Optimization, __abstractmethods__=set())
def test_Optimization_abstract_call():
optimization = Optimization(mk.MagicMock())
with pytest.raises(NotImplementedError) as err:
optimization()
assert str(err.value) == "Subclasses should implement this method"
def test_fitting_with_outlier_removal_niter():
"""
Test that FittingWithOutlierRemoval stops prior to reaching niter if the
set of masked points has converged and correctly reports the actual number
of iterations performed.
"""
# 2 rows with some noise around a constant level and 1 deviant point:
x = np.arange(25)
with NumpyRNGContext(_RANDOM_SEED):
y = np.random.normal(loc=10., scale=1., size=(2, 25))
y[0, 14] = 100.
# Fit 2 models with up to 5 iterations (should only take 2):
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=5,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2, n_models=2), x, y)
# Confirm that only the deviant point was rejected, in 2 iterations:
assert_equal(np.where(mask), [[0], [14]])
assert fitter.fit_info['niter'] == 2
# Refit just the first row without any rejection iterations, to ensure
# there are no regressions for that special case:
fitter = FittingWithOutlierRemoval(
fitter=LinearLSQFitter(), outlier_func=sigma_clip, niter=0,
sigma_lower=3., sigma_upper=3., maxiters=1
)
model, mask = fitter(models.Chebyshev1D(2), x, y[0])
# Confirm that there were no iterations or rejected points:
assert mask.sum() == 0
assert fitter.fit_info['niter'] == 0
@pytest.mark.skipif('not HAS_SCIPY')
class TestFittingUncertanties:
"""
Test that parameter covariance is calculated correctly for the fitters
that do so (currently LevMarLSQFitter, LinearLSQFitter).
"""
example_1D_models = [models.Polynomial1D(2), models.Linear1D()]
example_1D_sets = [models.Polynomial1D(2, n_models=2, model_set_axis=False),
models.Linear1D(n_models=2, slope=[1., 1.], intercept=[0, 0])]
def setup_class(self):
np.random.seed(619)
self.x = np.arange(10)
self.x_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.y_grid = np.random.randint(0, 100, size=100).reshape(10, 10)
self.rand_grid = np.random.random(100).reshape(10, 10)
self.rand = self.rand_grid[0]
@pytest.mark.parametrize(('single_model', 'model_set'),
list(zip(example_1D_models, example_1D_sets)))
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_1d_models(self, single_model, model_set, fitter):
""" Test that fitting uncertainties are computed correctly for 1D models
and 1D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
# test 1D single models
# fit single model w/ nonlinear fitter
y = single_model(self.x) + self.rand
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x, y)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ linlsq fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x, y)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
# check covariance, stds computed correctly computed
assert_allclose(cov_model_linlsq, cov_model)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# now test 1D model sets
# fit set of models w/ linear fitter
y = model_set(self.x, model_set_axis=False) + np.array([self.rand, self.rand])
fit_1d_set_linlsq = linlsq_fitter(model_set, self.x, y)
cov_1d_set_linlsq = [j.cov_matrix for j in
fit_1d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_1d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_1d_set_linlsq[0])),
fit_1d_set_linlsq.stds[0].stds)
@pytest.mark.parametrize('fitter', non_linear_fitters)
def test_2d_models(self, fitter):
"""
Test that fitting uncertainties are computed correctly for 2D models
and 2D model sets. Use covariance/stds given by LevMarLSQFitter as
a benchmark since they are returned by the numpy fitter.
"""
fitter = fitter(calc_uncertainties=True)
linlsq_fitter = LinearLSQFitter(calc_uncertainties=True)
single_model = models.Polynomial2D(2, c0_0=2)
model_set = models.Polynomial2D(degree=2, n_models=2, c0_0=[2, 3],
model_set_axis=False)
# fit single model w/ nonlinear fitter
z_grid = single_model(self.x_grid, self.y_grid) + self.rand_grid
with pytest.warns(AstropyUserWarning,
match=r'Model is linear in parameters'):
fit_model = fitter(single_model, self.x_grid, self.y_grid, z_grid)
cov_model = fit_model.cov_matrix.cov_matrix
# fit single model w/ nonlinear fitter
fit_model_linlsq = linlsq_fitter(single_model, self.x_grid,
self.y_grid, z_grid)
cov_model_linlsq = fit_model_linlsq.cov_matrix.cov_matrix
assert_allclose(cov_model, cov_model_linlsq)
assert_allclose(np.sqrt(np.diag(cov_model_linlsq)),
fit_model_linlsq.stds.stds)
# fit 2d model set
z_grid = model_set(self.x_grid, self.y_grid) + np.array((self.rand_grid,
self.rand_grid))
fit_2d_set_linlsq = linlsq_fitter(model_set, self.x_grid, self.y_grid,
z_grid)
cov_2d_set_linlsq = [j.cov_matrix for j in fit_2d_set_linlsq.cov_matrix]
# make sure cov matrix from single model fit w/ levmar fitter matches
# the cov matrix of first model in the set
assert_allclose(cov_2d_set_linlsq[0], cov_model)
assert_allclose(np.sqrt(np.diag(cov_2d_set_linlsq[0])),
fit_2d_set_linlsq.stds[0].stds)
def test_covariance_std_printing_indexing(self, capsys):
"""
Test printing methods and indexing.
"""
# test str representation for Covariance/stds
fitter = LinearLSQFitter(calc_uncertainties=True)
mod = models.Linear1D()
fit_mod = fitter(mod, self.x, mod(self.x)+self.rand)
print(fit_mod.cov_matrix)
captured = capsys.readouterr()
assert "slope | 0.001" in captured.out
assert "intercept| -0.005, 0.03" in captured.out
print(fit_mod.stds)
captured = capsys.readouterr()
assert "slope | 0.032" in captured.out
assert "intercept| 0.173" in captured.out
# test 'pprint' for Covariance/stds
print(fit_mod.cov_matrix.pprint(round_val=5, max_lines=1))
captured = capsys.readouterr()
assert "slope | 0.00105" in captured.out
assert "intercept" not in captured.out
print(fit_mod.stds.pprint(max_lines=1, round_val=5))
captured = capsys.readouterr()
assert "slope | 0.03241" in captured.out
assert "intercept" not in captured.out
# test indexing for Covariance class.
assert fit_mod.cov_matrix[0, 0] == fit_mod.cov_matrix['slope', 'slope']
# test indexing for stds class.
assert fit_mod.stds[1] == fit_mod.stds['intercept']
@pytest.mark.skipif('not HAS_SCIPY')
def test_non_finite_filter():
"""Regression test filter introduced to solve issues #3575 and #12809"""
x = np.array([1, 2, 3, 4, 5, np.nan, 7, np.inf])
y = np.array([9, np.nan, 11, np.nan, 13, np.nan, 15, 16])
m_init = models.Gaussian1D()
fit = LevMarLSQFitter()
# Raise warning, notice fit fails due to nans
with pytest.raises(NonFiniteValueError, match=r"Objective function has encountered.*"):
fit(m_init, x, y)
|
1739b93e5b2f1080edc21633f8b19015fc9e64b1e4e9ed533846a807ac4e2c0f | # Note that we test the main astropy.wcs.WCS class directly rather than testing
# the mix-in class on its own (since it's not functional without being used as
# a mix-in)
import warnings
from packaging.version import Version
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from itertools import product
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import Quantity
from astropy.coordinates import ICRS, FK5, Galactic, SkyCoord, SpectralCoord, ITRS, EarthLocation
from astropy.io.fits import Header
from astropy.io.fits.verify import VerifyWarning
from astropy.units.core import UnitsWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.wcs.wcs import WCS, FITSFixedWarning, Sip, NoConvergence
from astropy.wcs.wcsapi.fitswcs import custom_ctype_to_ucd_mapping, VELOCITY_FRAMES
from astropy.wcs._wcs import __version__ as wcsver
from astropy.utils import iers
from astropy.utils.exceptions import AstropyUserWarning
###############################################################################
# The following example is the simplest WCS with default values
###############################################################################
WCS_EMPTY = WCS(naxis=1)
WCS_EMPTY.wcs.crpix = [1]
def test_empty():
wcs = WCS_EMPTY
# Low-level API
assert wcs.pixel_n_dim == 1
assert wcs.world_n_dim == 1
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == [None]
assert wcs.world_axis_units == ['']
assert wcs.pixel_axis_names == ['']
assert wcs.world_axis_names == ['']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('world', 0, 'value')]
assert wcs.world_axis_object_classes['world'][0] is Quantity
assert wcs.world_axis_object_classes['world'][1] == ()
assert wcs.world_axis_object_classes['world'][2]['unit'] is u.one
assert_allclose(wcs.pixel_to_world_values(29), 29)
assert_allclose(wcs.array_index_to_world_values(29), 29)
assert np.ndim(wcs.pixel_to_world_values(29)) == 0
assert np.ndim(wcs.array_index_to_world_values(29)) == 0
assert_allclose(wcs.world_to_pixel_values(29), 29)
assert_equal(wcs.world_to_array_index_values(29), (29,))
assert np.ndim(wcs.world_to_pixel_values(29)) == 0
assert np.ndim(wcs.world_to_array_index_values(29)) == 0
# High-level API
coord = wcs.pixel_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = wcs.array_index_to_world(29)
assert_quantity_allclose(coord, 29 * u.one)
assert np.ndim(coord) == 0
coord = 15 * u.one
x = wcs.world_to_pixel(coord)
assert_allclose(x, 15.)
assert np.ndim(x) == 0
i = wcs.world_to_array_index(coord)
assert_equal(i, 15)
assert np.ndim(i) == 0
###############################################################################
# The following example is a simple 2D image with celestial coordinates
###############################################################################
HEADER_SIMPLE_CELESTIAL = """
WCSAXES = 2
CTYPE1 = RA---TAN
CTYPE2 = DEC--TAN
CRVAL1 = 10
CRVAL2 = 20
CRPIX1 = 30
CRPIX2 = 40
CDELT1 = -0.1
CDELT2 = 0.1
CROTA2 = 0.
CUNIT1 = deg
CUNIT2 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SIMPLE_CELESTIAL = WCS(Header.fromstring(
HEADER_SIMPLE_CELESTIAL, sep='\n'))
def test_simple_celestial():
wcs = WCS_SIMPLE_CELESTIAL
# Low-level API
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.eq.ra', 'pos.eq.dec']
assert wcs.world_axis_units == ['deg', 'deg']
assert wcs.pixel_axis_names == ['', '']
assert wcs.world_axis_names == ['', '']
assert_equal(wcs.axis_correlation_matrix, True)
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39), (10, 20))
assert_allclose(wcs.array_index_to_world_values(39, 29), (10, 20))
assert_allclose(wcs.world_to_pixel_values(10, 20), (29., 39.))
assert_equal(wcs.world_to_array_index_values(10, 20), (39, 29))
# High-level API
coord = wcs.pixel_to_world(29, 39)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = wcs.array_index_to_world(39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 10)
assert_allclose(coord.dec.deg, 20)
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
x, y = wcs.world_to_pixel(coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that if the coordinates are passed in a different frame things still
# work properly
coord_galactic = coord.galactic
x, y = wcs.world_to_pixel(coord_galactic)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
i, j = wcs.world_to_array_index(coord_galactic)
assert_equal(i, 39)
assert_equal(j, 29)
# Check that we can actually index the array
data = np.arange(3600).reshape((60, 60))
coord = SkyCoord(10, 20, unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], 2369)
coord = SkyCoord([10, 12], [20, 22], unit='deg', frame='icrs')
index = wcs.world_to_array_index(coord)
assert_equal(data[index], [2369, 3550])
###############################################################################
# The following example is a spectral cube with axes in an unusual order
###############################################################################
HEADER_SPECTRAL_CUBE = """
WCSAXES = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CNAME1 = Latitude
CNAME2 = Frequency
CNAME3 = Longitude
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
def test_spectral_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_SPECTRAL_CUBE
# Low-level API
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True],
[False, True, False],
[True, False, True]])
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
# High-level API
coord, spec = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord, spec = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, Galactic)
assert_allclose(coord.l.deg, 25)
assert_allclose(coord.b.deg, 10)
assert isinstance(spec, SpectralCoord)
assert_allclose(spec.to_value(u.Hz), 20)
coord = SkyCoord(25, 10, unit='deg', frame='galactic')
spec = 20 * u.Hz
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(coord, spec)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
x, y, z = wcs.world_to_pixel(spec, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(coord, spec)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
i, j, k = wcs.world_to_array_index(spec, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
HEADER_SPECTRAL_CUBE_NONALIGNED = HEADER_SPECTRAL_CUBE.strip() + '\n' + """
PC2_3 = -0.5
PC3_2 = +0.5
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', VerifyWarning)
WCS_SPECTRAL_CUBE_NONALIGNED = WCS(Header.fromstring(
HEADER_SPECTRAL_CUBE_NONALIGNED, sep='\n'))
def test_spectral_cube_nonaligned():
# Make sure that correlation matrix gets adjusted if there are non-identity
# CD matrix terms.
wcs = WCS_SPECTRAL_CUBE_NONALIGNED
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['Latitude', 'Frequency', 'Longitude']
assert_equal(wcs.axis_correlation_matrix, [[True, True, True],
[False, True, True],
[True, True, True]])
# NOTE: we check world_axis_object_components and world_axis_object_classes
# again here because in the past this failed when non-aligned axes were
# present, so this serves as a regression test.
assert len(wcs.world_axis_object_components) == 3
assert wcs.world_axis_object_components[0] == ('celestial', 1, 'spherical.lat.degree')
assert wcs.world_axis_object_components[1][:2] == ('spectral', 0)
assert wcs.world_axis_object_components[2] == ('celestial', 0, 'spherical.lon.degree')
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['spectral'][0] is Quantity
assert wcs.world_axis_object_classes['spectral'][1] == ()
assert wcs.world_axis_object_classes['spectral'][2] == {}
###############################################################################
# The following example is from Rots et al (2015), Table 5. It represents a
# cube with two spatial dimensions and one time dimension
###############################################################################
HEADER_TIME_CUBE = """
SIMPLE = T / Fits standard
BITPIX = -32 / Bits per pixel
NAXIS = 3 / Number of axes
NAXIS1 = 2048 / Axis length
NAXIS2 = 2048 / Axis length
NAXIS3 = 11 / Axis length
DATE = '2008-10-28T14:39:06' / Date FITS file was generated
OBJECT = '2008 TC3' / Name of the object observed
EXPTIME = 1.0011 / Integration time
MJD-OBS = 54746.02749237 / Obs start
DATE-OBS= '2008-10-07T00:39:35.3342' / Observing date
TELESCOP= 'VISTA' / ESO Telescope Name
INSTRUME= 'VIRCAM' / Instrument used.
TIMESYS = 'UTC' / From Observatory Time System
TREFPOS = 'TOPOCENT' / Topocentric
MJDREF = 54746.0 / Time reference point in MJD
RADESYS = 'ICRS' / Not equinoctal
CTYPE2 = 'RA---ZPN' / Zenithal Polynomial Projection
CRVAL2 = 2.01824372640628 / RA at ref pixel
CUNIT2 = 'deg' / Angles are degrees always
CRPIX2 = 2956.6 / Pixel coordinate at ref point
CTYPE1 = 'DEC--ZPN' / Zenithal Polynomial Projection
CRVAL1 = 14.8289418840003 / Dec at ref pixel
CUNIT1 = 'deg' / Angles are degrees always
CRPIX1 = -448.2 / Pixel coordinate at ref point
CTYPE3 = 'UTC' / linear time (UTC)
CRVAL3 = 2375.341 / Relative time of first frame
CUNIT3 = 's' / Time unit
CRPIX3 = 1.0 / Pixel coordinate at ref point
CTYPE3A = 'TT' / alternative linear time (TT)
CRVAL3A = 2440.525 / Relative time of first frame
CUNIT3A = 's' / Time unit
CRPIX3A = 1.0 / Pixel coordinate at ref point
OBSGEO-B= -24.6157 / [deg] Tel geodetic latitute (=North)+
OBSGEO-L= -70.3976 / [deg] Tel geodetic longitude (=East)+
OBSGEO-H= 2530.0000 / [m] Tel height above reference ellipsoid
CRDER3 = 0.0819 / random error in timings from fit
CSYER3 = 0.0100 / absolute time error
PC1_1 = 0.999999971570892 / WCS transform matrix element
PC1_2 = 0.000238449608932 / WCS transform matrix element
PC2_1 = -0.000621542859395 / WCS transform matrix element
PC2_2 = 0.999999806842218 / WCS transform matrix element
CDELT1 = -9.48575432499806E-5 / Axis scale at reference point
CDELT2 = 9.48683176211164E-5 / Axis scale at reference point
CDELT3 = 13.3629 / Axis scale at reference point
PV1_1 = 1. / ZPN linear term
PV1_3 = 42. / ZPN cubic term
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore', (VerifyWarning, FITSFixedWarning))
WCS_TIME_CUBE = WCS(Header.fromstring(HEADER_TIME_CUBE, sep='\n'))
def test_time_cube():
# Spectral cube with a weird axis ordering
wcs = WCS_TIME_CUBE
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (11, 2048, 2048)
assert wcs.pixel_shape == (2048, 2048, 11)
assert wcs.world_axis_physical_types == ['pos.eq.dec', 'pos.eq.ra', 'time']
assert wcs.world_axis_units == ['deg', 'deg', 's']
assert wcs.pixel_axis_names == ['', '', '']
assert wcs.world_axis_names == ['', '', '']
assert_equal(wcs.axis_correlation_matrix, [[True, True, False],
[True, True, False],
[False, False, True]])
components = wcs.world_axis_object_components
assert components[0] == ('celestial', 1, 'spherical.lat.degree')
assert components[1] == ('celestial', 0, 'spherical.lon.degree')
assert components[2][:2] == ('time', 0)
assert callable(components[2][2])
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['time'][0] is Time
assert wcs.world_axis_object_classes['time'][1] == ()
assert wcs.world_axis_object_classes['time'][2] == {}
assert callable(wcs.world_axis_object_classes['time'][3])
assert_allclose(wcs.pixel_to_world_values(-449.2, 2955.6, 0),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.array_index_to_world_values(0, 2955.6, -449.2),
(14.8289418840003, 2.01824372640628, 2375.341))
assert_allclose(wcs.world_to_pixel_values(14.8289418840003, 2.01824372640628, 2375.341),
(-449.2, 2955.6, 0))
assert_equal(wcs.world_to_array_index_values(14.8289418840003, 2.01824372640628, 2375.341),
(0, 2956, -449))
# High-level API
coord, time = wcs.pixel_to_world(29, 39, 44)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
coord, time = wcs.array_index_to_world(44, 39, 29)
assert isinstance(coord, SkyCoord)
assert isinstance(coord.frame, ICRS)
assert_allclose(coord.ra.deg, 1.7323356692202325)
assert_allclose(coord.dec.deg, 14.783516054817797)
assert isinstance(time, Time)
assert_allclose(time.mjd, 54746.03429755324)
x, y, z = wcs.world_to_pixel(coord, time)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
# Order of world coordinates shouldn't matter
x, y, z = wcs.world_to_pixel(time, coord)
assert_allclose(x, 29.)
assert_allclose(y, 39.)
assert_allclose(z, 44.)
i, j, k = wcs.world_to_array_index(coord, time)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
# Order of world coordinates shouldn't matter
i, j, k = wcs.world_to_array_index(time, coord)
assert_equal(i, 44)
assert_equal(j, 39)
assert_equal(k, 29)
###############################################################################
# The following tests are to make sure that Time objects are constructed
# correctly for a variety of combinations of WCS keywords
###############################################################################
HEADER_TIME_1D = """
SIMPLE = T
BITPIX = -32
NAXIS = 1
NAXIS1 = 2048
TIMESYS = 'UTC'
TREFPOS = 'TOPOCENT'
MJDREF = 50002.6
CTYPE1 = 'UTC'
CRVAL1 = 5
CUNIT1 = 's'
CRPIX1 = 1.0
CDELT1 = 2
OBSGEO-L= -20
OBSGEO-B= -70
OBSGEO-H= 2530
"""
if Version(wcsver) >= Version('7.1'):
HEADER_TIME_1D += "DATEREF = '1995-10-12T14:24:00'\n"
@pytest.fixture
def header_time_1d():
return Header.fromstring(HEADER_TIME_1D, sep='\n')
def assert_time_at(header, position, jd1, jd2, scale, format):
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(position)
assert_allclose(time.jd1, jd1, rtol=1e-10)
assert_allclose(time.jd2, jd2, rtol=1e-10)
assert time.format == format
assert time.scale == scale
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc', 'local'))
def test_time_1d_values(header_time_1d, scale):
# Check that Time objects are instantiated with the correct values,
# scales, and formats.
header_time_1d['CTYPE1'] = scale.upper()
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, scale, 'mjd')
def test_time_1d_values_gps(header_time_1d):
# Special treatment for GPS scale
header_time_1d['CTYPE1'] = 'GPS'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + (7 + 19) / 3600 / 24, 'tai', 'mjd')
def test_time_1d_values_deprecated(header_time_1d):
# Deprecated (in FITS) scales
header_time_1d['CTYPE1'] = 'TDT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
header_time_1d['CTYPE1'] = 'IAT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
header_time_1d['CTYPE1'] = 'GMT'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['CTYPE1'] = 'ET'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tt', 'mjd')
def test_time_1d_values_time(header_time_1d):
header_time_1d['CTYPE1'] = 'TIME'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'utc', 'mjd')
header_time_1d['TIMESYS'] = 'TAI'
assert_time_at(header_time_1d, 1, 2450003, 0.1 + 7 / 3600 / 24, 'tai', 'mjd')
@pytest.mark.remote_data
@pytest.mark.parametrize('scale', ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc'))
def test_time_1d_roundtrip(header_time_1d, scale):
# Check that coordinates round-trip
pixel_in = np.arange(3, 10)
header_time_1d['CTYPE1'] = scale.upper()
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
# Simple test
time = wcs.pixel_to_world(pixel_in)
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
# Test with an intermediate change to a different scale/format
time = wcs.pixel_to_world(pixel_in).tdb
time.format = 'isot'
pixel_out = wcs.world_to_pixel(time)
assert_allclose(pixel_in, pixel_out)
def test_time_1d_high_precision(header_time_1d):
# Case where the MJDREF is split into two for high precision
del header_time_1d['MJDREF']
header_time_1d['MJDREFI'] = 52000.
header_time_1d['MJDREFF'] = 1e-11
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
# Here we have to use a very small rtol to really test that MJDREFF is
# taken into account
assert_allclose(time.jd1, 2452001.0, rtol=1e-12)
assert_allclose(time.jd2, -0.5 + 25 / 3600 / 24 + 1e-11, rtol=1e-13)
def test_time_1d_location_geodetic(header_time_1d):
# Make sure that the location is correctly returned (geodetic case)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d)
time = wcs.pixel_to_world(10)
lon, lat, alt = time.location.to_geodetic()
# FIXME: alt won't work for now because ERFA doesn't implement the IAU 1976
# ellipsoid (https://github.com/astropy/astropy/issues/9420)
assert_allclose(lon.degree, -20)
assert_allclose(lat.degree, -70)
# assert_allclose(alt.to_value(u.m), 2530.)
@pytest.fixture
def header_time_1d_no_obs():
header = Header.fromstring(HEADER_TIME_1D, sep='\n')
del header['OBSGEO-L']
del header['OBSGEO-B']
del header['OBSGEO-H']
return header
def test_time_1d_location_geocentric(header_time_1d_no_obs):
# Make sure that the location is correctly returned (geocentric case)
header = header_time_1d_no_obs
header['OBSGEO-X'] = 10
header['OBSGEO-Y'] = -20
header['OBSGEO-Z'] = 30
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 10)
assert_allclose(y.to_value(u.m), -20)
assert_allclose(z.to_value(u.m), 30)
def test_time_1d_location_geocenter(header_time_1d_no_obs):
header_time_1d_no_obs['TREFPOS'] = 'GEOCENTER'
wcs = WCS(header_time_1d_no_obs)
time = wcs.pixel_to_world(10)
x, y, z = time.location.to_geocentric()
assert_allclose(x.to_value(u.m), 0)
assert_allclose(y.to_value(u.m), 0)
assert_allclose(z.to_value(u.m), 0)
def test_time_1d_location_missing(header_time_1d_no_obs):
# Check what happens when no location is present
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_incomplete(header_time_1d_no_obs):
# Check what happens when location information is incomplete
header_time_1d_no_obs['OBSGEO-L'] = 10.
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match='Missing or incomplete observer location '
'information, setting location in Time to None'):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_location_unsupported(header_time_1d_no_obs):
# Check what happens when TREFPOS is unsupported
header_time_1d_no_obs['TREFPOS'] = 'BARYCENTER'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Observation location 'barycenter' is not "
"supported, setting location in Time to None"):
time = wcs.pixel_to_world(10)
assert time.location is None
def test_time_1d_unsupported_ctype(header_time_1d_no_obs):
# For cases that we don't support yet, e.g. UT(...), use Time and drop sub-scale
# Case where the MJDREF is split into two for high precision
header_time_1d_no_obs['CTYPE1'] = 'UT(WWV)'
wcs = WCS(header_time_1d_no_obs)
with pytest.warns(UserWarning,
match="Dropping unsupported sub-scale WWV from scale UT"):
time = wcs.pixel_to_world(10)
assert isinstance(time, Time)
###############################################################################
# Extra corner cases
###############################################################################
def test_unrecognized_unit():
# TODO: Determine whether the following behavior is desirable
wcs = WCS(naxis=1)
with pytest.warns(UnitsWarning):
wcs.wcs.cunit = ['bananas // sekonds']
assert wcs.world_axis_units == ['bananas // sekonds']
def test_distortion_correlations():
filename = get_pkg_data_filename('../../tests/data/sip.fits')
with pytest.warns(FITSFixedWarning):
w = WCS(filename)
assert_equal(w.axis_correlation_matrix, True)
# Changing PC to an identity matrix doesn't change anything since
# distortions are still present.
w.wcs.pc = [[1, 0], [0, 1]]
assert_equal(w.axis_correlation_matrix, True)
# Nor does changing the name of the axes to make them non-celestial
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
# However once we turn off the distortions the matrix changes
w.sip = None
assert_equal(w.axis_correlation_matrix, [[True, False], [False, True]])
# If we go back to celestial coordinates then the matrix is all True again
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_equal(w.axis_correlation_matrix, True)
# Or if we change to X/Y but have a non-identity PC
w.wcs.pc = [[0.9, -0.1], [0.1, 0.9]]
w.wcs.ctype = ['X', 'Y']
assert_equal(w.axis_correlation_matrix, True)
def test_custom_ctype_to_ucd_mappings():
wcs = WCS(naxis=1)
wcs.wcs.ctype = ['SPAM']
assert wcs.world_axis_physical_types == [None]
# Check simple behavior
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == [None]
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit', 'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'APPLE': 'food.fruit'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
# Check priority in nesting
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
assert wcs.world_axis_physical_types == ['food.spam']
with custom_ctype_to_ucd_mapping({'SPAM': 'food.spam'}):
with custom_ctype_to_ucd_mapping({'SPAM': 'notfood'}):
assert wcs.world_axis_physical_types == ['notfood']
def test_caching_components_and_classes():
# Make sure that when we change the WCS object, the classes and components
# are updated (we use a cache internally, so we need to make sure the cache
# is invalidated if needed)
wcs = WCS_SIMPLE_CELESTIAL.deepcopy()
assert wcs.world_axis_object_components == [('celestial', 0, 'spherical.lon.degree'),
('celestial', 1, 'spherical.lat.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], ICRS)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
wcs.wcs.radesys = 'FK5'
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2000.
wcs.wcs.equinox = 2010
frame = wcs.world_axis_object_classes['celestial'][2]['frame']
assert isinstance(frame, FK5)
assert frame.equinox.jyear == 2010.
def test_sub_wcsapi_attributes():
# Regression test for a bug that caused some of the WCS attributes to be
# incorrect when using WCS.sub or WCS.celestial (which is an alias for sub
# with lon/lat types).
wcs = WCS_SPECTRAL_CUBE.deepcopy()
wcs.pixel_shape = (30, 40, 50)
wcs.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
# Use celestial shortcut
wcs_sub1 = wcs.celestial
assert wcs_sub1.pixel_n_dim == 2
assert wcs_sub1.world_n_dim == 2
assert wcs_sub1.array_shape == (50, 30)
assert wcs_sub1.pixel_shape == (30, 50)
assert wcs_sub1.pixel_bounds == [(-1, 11), (5, 15)]
assert wcs_sub1.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']
assert wcs_sub1.world_axis_units == ['deg', 'deg']
assert wcs_sub1.world_axis_names == ['Latitude', 'Longitude']
# Try adding axes
wcs_sub2 = wcs.sub([0, 2, 0])
assert wcs_sub2.pixel_n_dim == 3
assert wcs_sub2.world_n_dim == 3
assert wcs_sub2.array_shape == (None, 40, None)
assert wcs_sub2.pixel_shape == (None, 40, None)
assert wcs_sub2.pixel_bounds == [None, (-2, 18), None]
assert wcs_sub2.world_axis_physical_types == [None, 'em.freq', None]
assert wcs_sub2.world_axis_units == ['', 'Hz', '']
assert wcs_sub2.world_axis_names == ['', 'Frequency', '']
# Use strings
wcs_sub3 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub3.pixel_n_dim == 2
assert wcs_sub3.world_n_dim == 2
assert wcs_sub3.array_shape == (30, 50)
assert wcs_sub3.pixel_shape == (50, 30)
assert wcs_sub3.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub3.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub3.world_axis_units == ['deg', 'deg']
assert wcs_sub3.world_axis_names == ['Longitude', 'Latitude']
# Now try without CNAME set
wcs.wcs.cname = [''] * wcs.wcs.naxis
wcs_sub4 = wcs.sub(['longitude', 'latitude'])
assert wcs_sub4.pixel_n_dim == 2
assert wcs_sub4.world_n_dim == 2
assert wcs_sub4.array_shape == (30, 50)
assert wcs_sub4.pixel_shape == (50, 30)
assert wcs_sub4.pixel_bounds == [(5, 15), (-1, 11)]
assert wcs_sub4.world_axis_physical_types == ['pos.galactic.lon', 'pos.galactic.lat']
assert wcs_sub4.world_axis_units == ['deg', 'deg']
assert wcs_sub4.world_axis_names == ['', '']
HEADER_POLARIZED = """
CTYPE1 = 'HPLT-TAN'
CTYPE2 = 'HPLN-TAN'
CTYPE3 = 'STOKES'
"""
@pytest.fixture
def header_polarized():
return Header.fromstring(HEADER_POLARIZED, sep='\n')
def test_phys_type_polarization(header_polarized):
w = WCS(header_polarized)
assert w.world_axis_physical_types[2] == 'phys.polarization.stokes'
###############################################################################
# Spectral transformations
###############################################################################
HEADER_SPECTRAL_FRAMES = """
BUNIT = 'Jy/beam'
EQUINOX = 2.000000000E+03
CTYPE1 = 'RA---SIN'
CRVAL1 = 2.60108333333E+02
CDELT1 = -2.777777845E-04
CRPIX1 = 1.0
CUNIT1 = 'deg'
CTYPE2 = 'DEC--SIN'
CRVAL2 = -9.75000000000E-01
CDELT2 = 2.777777845E-04
CRPIX2 = 1.0
CUNIT2 = 'deg'
CTYPE3 = 'FREQ'
CRVAL3 = 1.37835117405E+09
CDELT3 = 9.765625000E+04
CRPIX3 = 32.0
CUNIT3 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_frames():
return Header.fromstring(HEADER_SPECTRAL_FRAMES, sep='\n')
def test_spectralcoord_frame(header_spectral_frames):
# This is a test to check the numerical results of transformations between
# different velocity frames. We simply make sure that the returned
# SpectralCoords are in the right frame but don't check the transformations
# since this is already done in test_spectralcoord_accuracy
# in astropy.coordinates.
with iers.conf.set_temp('auto_download', False):
obstime = Time(f"2009-05-04T04:44:23", scale='utc')
header = header_spectral_frames.copy()
header['MJD-OBS'] = obstime.mjd
header['CRVAL1'] = 16.33211
header['CRVAL2'] = -34.2221
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
# We start off with a WCS defined in topocentric frequency
with pytest.warns(FITSFixedWarning):
wcs_topo = WCS(header)
# We convert a single pixel coordinate to world coordinates and keep only
# the second high level object - a SpectralCoord:
sc_topo = wcs_topo.pixel_to_world(0, 0, 31)[1]
# We check that this is in topocentric frame with zero velocities
assert isinstance(sc_topo, SpectralCoord)
assert isinstance(sc_topo.observer, ITRS)
assert sc_topo.observer.obstime.isot == obstime.isot
assert_equal(sc_topo.observer.data.differentials['s'].d_xyz.value, 0)
observatory = EarthLocation.from_geodetic(144.2, -20.2).get_itrs(obstime=obstime).transform_to(ICRS())
assert observatory.separation_3d(sc_topo.observer.transform_to(ICRS())) < 1 * u.km
for specsys, expected_frame in VELOCITY_FRAMES.items():
header['SPECSYS'] = specsys
with pytest.warns(FITSFixedWarning):
wcs = WCS(header)
sc = wcs.pixel_to_world(0, 0, 31)[1]
# Now transform to the expected velocity frame, which should leave
# the spectral coordinate unchanged
sc_check = sc.with_observer_stationary_relative_to(expected_frame)
assert_quantity_allclose(sc.quantity, sc_check.quantity)
@pytest.mark.parametrize(('ctype3', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_different_ctypes(header_spectral_frames, ctype3, observer):
header = header_spectral_frames.copy()
header['CTYPE3'] = ctype3
header['CRVAL3'] = 0.1
header['CDELT3'] = 0.001
if ctype3[0] == 'V':
header['CUNIT3'] = 'm s-1'
else:
header['CUNIT3'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
skycoord, spectralcoord = wcs.pixel_to_world(0, 0, 31)
assert isinstance(spectralcoord, SpectralCoord)
if observer:
pix = wcs.world_to_pixel(skycoord, spectralcoord)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix = wcs.world_to_pixel(skycoord, spectralcoord)
assert_allclose(pix, [0, 0, 31], rtol=1e-6, atol=1e-9)
def test_non_convergence_warning():
"""Test case for issue #11446
Since we can't define a target accuracy when plotting a WCS `all_world2pix`
should not error but only warn when the default accuracy can't be reached.
"""
# define a minimal WCS where convergence fails for certain image positions
wcs = WCS(naxis=2)
crpix = [0, 0]
a = b = ap = bp = np.zeros((4, 4))
a[3, 0] = -1.20116753e-07
test_pos_x = [1000, 1]
test_pos_y = [0, 2]
wcs.sip = Sip(a, b, ap, bp, crpix)
# first make sure the WCS works when using a low accuracy
expected = wcs.all_world2pix(test_pos_x, test_pos_y, 0, tolerance=1e-3)
# then check that it fails when using the default accuracy
with pytest.raises(NoConvergence):
wcs.all_world2pix(test_pos_x, test_pos_y, 0)
# at last check that world_to_pixel_values raises a warning but returns
# the same 'low accuray' result
with pytest.warns(UserWarning):
assert_allclose(wcs.world_to_pixel_values(test_pos_x, test_pos_y),
expected)
HEADER_SPECTRAL_1D = """
CTYPE1 = 'FREQ'
CRVAL1 = 1.37835117405E+09
CDELT1 = 9.765625000E+04
CRPIX1 = 32.0
CUNIT1 = 'Hz'
SPECSYS = 'TOPOCENT'
RESTFRQ = 1.420405752E+09 / [Hz]
RADESYS = 'FK5'
"""
@pytest.fixture
def header_spectral_1d():
return Header.fromstring(HEADER_SPECTRAL_1D, sep='\n')
@pytest.mark.parametrize(('ctype1', 'observer'), product(['ZOPT', 'BETA', 'VELO', 'VRAD', 'VOPT'], [False, True]))
def test_spectral_1d(header_spectral_1d, ctype1, observer):
# This is a regression test for issues that happened with 1-d WCS
# where the target is not defined but observer is.
header = header_spectral_1d.copy()
header['CTYPE1'] = ctype1
header['CRVAL1'] = 0.1
header['CDELT1'] = 0.001
if ctype1[0] == 'V':
header['CUNIT1'] = 'm s-1'
else:
header['CUNIT1'] = ''
header['RESTWAV'] = 1.420405752E+09
header['MJD-OBS'] = 55197
if observer:
header['OBSGEO-L'] = 144.2
header['OBSGEO-B'] = -20.2
header['OBSGEO-H'] = 0.
header['SPECSYS'] = 'BARYCENT'
with warnings.catch_warnings():
warnings.simplefilter('ignore', FITSFixedWarning)
wcs = WCS(header)
# First ensure that transformations round-trip
spectralcoord = wcs.pixel_to_world(31)
assert isinstance(spectralcoord, SpectralCoord)
assert spectralcoord.target is None
assert (spectralcoord.observer is not None) is observer
if observer:
expected_message = 'No target defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix = wcs.world_to_pixel(spectralcoord)
assert_allclose(pix, [31], rtol=1e-6)
# Also make sure that we can convert a SpectralCoord on which the observer
# is not defined but the target is.
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
expected_message = 'No observer defined on SpectralCoord'
else:
expected_message = 'No observer defined on WCS'
with pytest.warns(AstropyUserWarning, match=expected_message):
pix2 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix2, [31], rtol=1e-6)
# And finally check case when both observer and target are defined on the
# SpectralCoord
with pytest.warns(AstropyUserWarning, match='No velocity defined on frame'):
spectralcoord_no_obs = SpectralCoord(spectralcoord.quantity,
doppler_rest=spectralcoord.doppler_rest,
doppler_convention=spectralcoord.doppler_convention,
observer=ICRS(10 * u.deg, 20 * u.deg, distance=0 * u.kpc),
target=ICRS(10 * u.deg, 20 * u.deg, distance=1 * u.kpc))
if observer:
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
else:
with pytest.warns(AstropyUserWarning, match='No observer defined on WCS'):
pix3 = wcs.world_to_pixel(spectralcoord_no_obs)
assert_allclose(pix3, [31], rtol=1e-6)
|
633e7be86a4c03acab794d3e1fc79eb1d3dd6b2d8a81e4d7c4939e0831bf08c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['AltAz']
_90DEG = 90*u.deg
doc_components = """
az : `~astropy.coordinates.Angle`, optional, keyword-only
The Azimuth for this object (``alt`` must also be given and
``representation`` must be None).
alt : `~astropy.coordinates.Angle`, optional, keyword-only
The Altitude for this object (``az`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_az_cosalt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in azimuth (including the ``cos(alt)`` factor) for
this object (``pm_alt`` must also be given).
pm_alt : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in altitude for this object (``pm_az_cosalt`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to AltAz and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" horizontal coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class AltAz(BaseCoordinateFrame):
"""
A coordinate or frame in the Altitude-Azimuth system (Horizontal
coordinates) with respect to the WGS84 ellipsoid. Azimuth is oriented
East of North (i.e., N=0, E=90 degrees). Altitude is also known as
elevation angle, so this frame is also in the Azimuth-Elevation system.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from AltAz to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'az'),
RepresentationMapping('lat', 'alt')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def secz(self):
"""
Secant of the zenith angle for this coordinate, a common estimate of
the airmass.
"""
return 1/np.sin(self.alt)
@property
def zen(self):
"""
The zenith angle (or zenith distance / co-altitude) for this coordinate.
"""
return _90DEG.to(self.alt.unit) - self.alt
# self-transform defined in cirs_observed_transforms.py
|
734813195f38c7fb08624145bf42806818dec113462bcd524f2cf9bae263b49d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import units as u
from astropy.utils.decorators import format_doc
from astropy.coordinates import representation as r
from astropy.coordinates.baseframe import BaseCoordinateFrame, RepresentationMapping, base_doc
from astropy.coordinates.attributes import (TimeAttribute,
QuantityAttribute,
EarthLocationAttribute)
__all__ = ['HADec']
doc_components = """
ha : `~astropy.coordinates.Angle`, optional, keyword-only
The Hour Angle for this object (``dec`` must also be given and
``representation`` must be None).
dec : `~astropy.coordinates.Angle`, optional, keyword-only
The Declination for this object (``ha`` must also be given and
``representation`` must be None).
distance : `~astropy.units.Quantity` ['length'], optional, keyword-only
The Distance for this object along the line-of-sight.
pm_ha_cosdec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in hour angle (including the ``cos(dec)`` factor) for
this object (``pm_dec`` must also be given).
pm_dec : `~astropy.units.Quantity` ['angular speed'], optional, keyword-only
The proper motion in declination for this object (``pm_ha_cosdec`` must
also be given).
radial_velocity : `~astropy.units.Quantity` ['speed'], optional, keyword-only
The radial velocity of this object."""
doc_footer = """
Other parameters
----------------
obstime : `~astropy.time.Time`
The time at which the observation is taken. Used for determining the
position and orientation of the Earth.
location : `~astropy.coordinates.EarthLocation`
The location on the Earth. This can be specified either as an
`~astropy.coordinates.EarthLocation` object or as anything that can be
transformed to an `~astropy.coordinates.ITRS` frame.
pressure : `~astropy.units.Quantity` ['pressure']
The atmospheric pressure as an `~astropy.units.Quantity` with pressure
units. This is necessary for performing refraction corrections.
Setting this to 0 (the default) will disable refraction calculations
when transforming to/from this frame.
temperature : `~astropy.units.Quantity` ['temperature']
The ground-level temperature as an `~astropy.units.Quantity` in
deg C. This is necessary for performing refraction corrections.
relative_humidity : `~astropy.units.Quantity` ['dimensionless'] or number.
The relative humidity as a dimensionless quantity between 0 to 1.
This is necessary for performing refraction corrections.
obswl : `~astropy.units.Quantity` ['length']
The average wavelength of observations as an `~astropy.units.Quantity`
with length units. This is necessary for performing refraction
corrections.
Notes
-----
The refraction model is based on that implemented in ERFA, which is fast
but becomes inaccurate for altitudes below about 5 degrees. Near and below
altitudes of 0, it can even give meaningless answers, and in this case
transforming to HADec and back to another frame can give highly discrepant
results. For much better numerical stability, leave the ``pressure`` at
``0`` (the default), thereby disabling the refraction correction and
yielding "topocentric" equatorial coordinates.
"""
@format_doc(base_doc, components=doc_components, footer=doc_footer)
class HADec(BaseCoordinateFrame):
"""
A coordinate or frame in the Hour Angle-Declination system (Equatorial
coordinates) with respect to the WGS84 ellipsoid. Hour Angle is oriented
with respect to upper culmination such that the hour angle is negative to
the East and positive to the West.
This frame is assumed to *include* refraction effects if the ``pressure``
frame attribute is non-zero.
The frame attributes are listed under **Other Parameters**, which are
necessary for transforming from HADec to some other system.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'ha', u.hourangle),
RepresentationMapping('lat', 'dec')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
obstime = TimeAttribute(default=None)
location = EarthLocationAttribute(default=None)
pressure = QuantityAttribute(default=0, unit=u.hPa)
temperature = QuantityAttribute(default=0, unit=u.deg_C)
relative_humidity = QuantityAttribute(default=0, unit=u.dimensionless_unscaled)
obswl = QuantityAttribute(default=1*u.micron, unit=u.micron)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.has_data:
self._set_data_lon_wrap_angle(self.data)
@staticmethod
def _set_data_lon_wrap_angle(data):
if hasattr(data, 'lon'):
data.lon.wrap_angle = 180. * u.deg
return data
def represent_as(self, base, s='base', in_frame_units=False):
"""
Ensure the wrap angle for any spherical
representations.
"""
data = super().represent_as(base, s, in_frame_units=in_frame_units)
self._set_data_lon_wrap_angle(data)
return data
# self-transform defined in cirs_observed_transforms.py
|
35eb3244c46a81c539794b3cab839f3b049671dd5a89a05e4e8a947f29254800 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from contextlib import ExitStack
import pytest
import numpy as np
from numpy import testing as npt
from astropy import units as u
from astropy.time import Time
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.utils.compat import NUMPY_LT_1_19
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.coordinates import (Angle, ICRS, FK4, FK5, Galactic, SkyCoord,
CartesianRepresentation)
from astropy.coordinates.angle_formats import dms_to_degrees, hms_to_hours
def test_angle_arrays():
"""
Test arrays values with Angle objects.
"""
# Tests incomplete
a1 = Angle([0, 45, 90, 180, 270, 360, 720.], unit=u.degree)
npt.assert_almost_equal([0., 45., 90., 180., 270., 360., 720.], a1.value)
a2 = Angle(np.array([-90, -45, 0, 45, 90, 180, 270, 360]), unit=u.degree)
npt.assert_almost_equal([-90, -45, 0, 45, 90, 180, 270, 360],
a2.value)
a3 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"])
npt.assert_almost_equal([12., 45., 5., 229.18311805],
a3.value)
assert a3.unit == u.degree
a4 = Angle(["12 degrees", "3 hours", "5 deg", "4rad"], u.radian)
npt.assert_almost_equal(a4.degree, a3.value)
assert a4.unit == u.radian
a5 = Angle([0, 45, 90, 180, 270, 360], unit=u.degree)
a6 = a5.sum()
npt.assert_almost_equal(a6.value, 945.0)
assert a6.unit is u.degree
with ExitStack() as stack:
stack.enter_context(pytest.raises(TypeError))
# Arrays where the elements are Angle objects are not supported -- it's
# really tricky to do correctly, if at all, due to the possibility of
# nesting.
if not NUMPY_LT_1_19:
stack.enter_context(
pytest.warns(DeprecationWarning,
match='automatic object dtype is deprecated'))
a7 = Angle([a1, a2, a3], unit=u.degree)
a8 = Angle(["04:02:02", "03:02:01", "06:02:01"], unit=u.degree)
npt.assert_almost_equal(a8.value, [4.03388889, 3.03361111, 6.03361111])
a9 = Angle(np.array(["04:02:02", "03:02:01", "06:02:01"]), unit=u.degree)
npt.assert_almost_equal(a9.value, a8.value)
with pytest.raises(u.UnitsError):
a10 = Angle(["04:02:02", "03:02:01", "06:02:01"])
def test_dms():
a1 = Angle([0, 45.5, -45.5], unit=u.degree)
d, m, s = a1.dms
npt.assert_almost_equal(d, [0, 45, -45])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
def test_hms():
a1 = Angle([0, 11.5, -11.5], unit=u.hour)
h, m, s = a1.hms
npt.assert_almost_equal(h, [0, 11, -11])
npt.assert_almost_equal(m, [0, 30, -30])
npt.assert_almost_equal(s, [0, 0, -0])
hms = a1.hms
hours = hms[0] + hms[1] / 60. + hms[2] / 3600.
npt.assert_almost_equal(a1.hour, hours)
with pytest.warns(AstropyDeprecationWarning, match='hms_to_hours'):
a2 = Angle(hms, unit=u.hour)
npt.assert_almost_equal(a2.radian, a1.radian)
def test_array_coordinates_creation():
"""
Test creating coordinates from arrays.
"""
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4])*u.deg)
assert not c.ra.isscalar
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2])*u.deg, np.array([3, 4, 5])*u.deg)
with pytest.raises(ValueError):
c = ICRS(np.array([1, 2, 4, 5])*u.deg, np.array([[3, 4], [5, 6]])*u.deg)
# make sure cartesian initialization also works
cart = CartesianRepresentation(x=[1., 2.]*u.kpc, y=[3., 4.]*u.kpc, z=[5., 6.]*u.kpc)
c = ICRS(cart)
# also ensure strings can be arrays
c = SkyCoord(['1d0m0s', '2h02m00.3s'], ['3d', '4d'])
# but invalid strings cannot
with pytest.raises(ValueError):
c = SkyCoord(Angle(['10m0s', '2h02m00.3s']), Angle(['3d', '4d']))
with pytest.raises(ValueError):
c = SkyCoord(Angle(['1d0m0s', '2h02m00.3s']), Angle(['3x', '4d']))
def test_array_coordinates_distances():
"""
Test creating coordinates from arrays and distances.
"""
# correct way
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2] * u.kpc)
with pytest.raises(ValueError):
# scalar distance and mismatched array coordinates
ICRS(ra=np.array([1, 2, 3])*u.deg, dec=np.array([[3, 4], [5, 6]])*u.deg, distance=2. * u.kpc)
with pytest.raises(ValueError):
# more distance values than coordinates
ICRS(ra=np.array([1, 2])*u.deg, dec=np.array([3, 4])*u.deg, distance=[.1, .2, 3.] * u.kpc)
@pytest.mark.parametrize(('arrshape', 'distance'), [((2, ), None), ((4, 2, 5), None), ((4, 2, 5), 2 * u.kpc)])
def test_array_coordinates_transformations(arrshape, distance):
"""
Test transformation on coordinates with array content (first length-2 1D, then a 3D array)
"""
# M31 coordinates from test_transformations
raarr = np.ones(arrshape) * 10.6847929
decarr = np.ones(arrshape) * 41.2690650
if distance is not None:
distance = np.ones(arrshape) * distance
print(raarr, decarr, distance)
c = ICRS(ra=raarr*u.deg, dec=decarr*u.deg, distance=distance)
g = c.transform_to(Galactic())
assert g.l.shape == arrshape
npt.assert_array_almost_equal(g.l.degree, 121.17440967)
npt.assert_array_almost_equal(g.b.degree, -21.57299631)
if distance is not None:
assert g.distance.unit == c.distance.unit
# now make sure round-tripping works through FK5
c2 = c.transform_to(FK5()).transform_to(ICRS())
npt.assert_array_almost_equal(c.ra.radian, c2.ra.radian)
npt.assert_array_almost_equal(c.dec.radian, c2.dec.radian)
assert c2.ra.shape == arrshape
if distance is not None:
assert c2.distance.unit == c.distance.unit
# also make sure it's possible to get to FK4, which uses a direct transform function.
fk4 = c.transform_to(FK4())
npt.assert_array_almost_equal(fk4.ra.degree, 10.0004, decimal=4)
npt.assert_array_almost_equal(fk4.dec.degree, 40.9953, decimal=4)
assert fk4.ra.shape == arrshape
if distance is not None:
assert fk4.distance.unit == c.distance.unit
# now check the reverse transforms run
cfk4 = fk4.transform_to(ICRS())
assert cfk4.ra.shape == arrshape
def test_array_precession():
"""
Ensures that FK5 coordinates as arrays precess their equinoxes
"""
j2000 = Time('J2000')
j1975 = Time('J1975')
fk5 = FK5([1, 1.1]*u.radian, [0.5, 0.6]*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK5(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
npt.assert_array_less(0.05, np.abs(fk5.ra.degree - fk5_2.ra.degree))
npt.assert_array_less(0.05, np.abs(fk5.dec.degree - fk5_2.dec.degree))
def test_array_separation():
c1 = ICRS([0, 0]*u.deg, [0, 0]*u.deg)
c2 = ICRS([1, 2]*u.deg, [0, 0]*u.deg)
npt.assert_array_almost_equal(c1.separation(c2).degree, [1, 2])
c3 = ICRS([0, 3.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
c4 = ICRS([1, 1.]*u.deg, [0., 0]*u.deg, distance=[1, 1.] * u.kpc)
# the 3-1 separation should be twice the 0-1 separation, but not *exactly* the same
sep = c3.separation_3d(c4)
sepdiff = sep[1] - (2 * sep[0])
assert abs(sepdiff.value) < 1e-5
assert sepdiff != 0
def test_array_indexing():
ra = np.linspace(0, 360, 10)
dec = np.linspace(-90, 90, 10)
j1975 = Time(1975, format='jyear')
c1 = FK5(ra*u.deg, dec*u.deg, equinox=j1975)
c2 = c1[4]
assert c2.ra.degree == 160
assert c2.dec.degree == -10
c3 = c1[2:5]
assert_allclose(c3.ra, [80, 120, 160] * u.deg)
assert_allclose(c3.dec, [-50, -30, -10] * u.deg)
c4 = c1[np.array([2, 5, 8])]
assert_allclose(c4.ra, [80, 200, 320] * u.deg)
assert_allclose(c4.dec, [-50, 10, 70] * u.deg)
# now make sure the equinox is preserved
assert c2.equinox == c1.equinox
assert c3.equinox == c1.equinox
assert c4.equinox == c1.equinox
def test_array_len():
input_length = [1, 5]
for length in input_length:
ra = np.linspace(0, 360, length)
dec = np.linspace(0, 90, length)
c = ICRS(ra*u.deg, dec*u.deg)
assert len(c) == length
assert c.shape == (length,)
with pytest.raises(TypeError):
c = ICRS(0*u.deg, 0*u.deg)
len(c)
assert c.shape == tuple()
def test_array_eq():
c1 = ICRS([1, 2]*u.deg, [3, 4]*u.deg)
c2 = ICRS([1, 2]*u.deg, [3, 5]*u.deg)
c3 = ICRS([1, 3]*u.deg, [3, 4]*u.deg)
c4 = ICRS([1, 2]*u.deg, [3, 4.2]*u.deg)
assert np.all(c1 == c1)
assert np.any(c1 != c2)
assert np.any(c1 != c3)
assert np.any(c1 != c4)
|
d53b59c42a40b47b3496359d744f581958eda4cf73a3f03c9ac642770c0701d5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``HTML``
reader/writer and aims to document its functionality.
Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
to be installed.
"""
from io import StringIO
from astropy.io.ascii import html
from astropy.io.ascii import core
from astropy.table import Table
import pytest
import numpy as np
from .common import setup_function, teardown_function # noqa
from astropy.io import ascii
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound
@pytest.mark.skipif('not HAS_BS4')
def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>',
'html.parser')
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == '<html><head></head><body><p>foo</p></body></html>'
assert soup_str.soup is soup
def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in 'abcde':
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup('<html><body></body></html>', 'html.parser')
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>'
'<td>B</td></tr></table>', 'html.parser').table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {'table_id': 2}, 1) is False
assert html.identify_table(soup, {'table_id': 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False
assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True
@pytest.mark.skipif('not HAS_BS4')
def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td></td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
# Now with a specific value '...' => missing
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td>...</td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
@pytest.mark.skipif('not HAS_BS4')
def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = ['<table>',
'<tr><th>A</th> <th>B</th></tr>',
'<tr><td>1</td><td>2</td></tr>',
'</table>']
# Swap column names
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])
assert dat.colnames == ['B', 'A']
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])
assert dat.colnames == ['A']
assert len(dat) == 1
assert np.all(dat['A'] == 2)
@pytest.mark.skipif('not HAS_BS4')
def test_no_names():
"""
Test reading a table with no column header
"""
table_in = ['<table>',
'<tr><td>1</td></tr>',
'<tr><td>2</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.colnames == ['col1']
assert len(dat) == 2
dat = Table.read(table_in, format='ascii.html', names=['a'])
assert dat.colnames == ['a']
assert len(dat) == 2
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>',
'<tr><td>B</td></tr></table>']
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},
guess=False)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},
guess=False)
assert err.match("ERROR: HTML table number 3 not found$")
@pytest.mark.skipif('not HAS_BS4')
def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):
try:
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': parser}, guess=False)
except FeatureNotFound:
if parser == 'html.parser':
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': 'foo'}, guess=False)
@pytest.mark.skipif('HAS_BS4')
def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([])
@pytest.mark.skipif('not HAS_BS4')
def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>',
'<tr><td>1</td><td>a</td><td>1.05</td></tr>',
'<tr><td>2</td><td>b</td><td>2.75</td></tr>',
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>',
'<tr><td>4</td><td>d</td><td>10.5</td></tr>',
'<tr><td>5</td><td>e</td><td>27.5</td></tr>',
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>',
'<tr><td>7</td><td>g</td><td>105.0</td></tr>',
'<tr><td>8</td><td>h</td><td>275.0</td></tr>',
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
@pytest.mark.skipif('not HAS_BS4')
def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>',
'html.parser').tr)]
expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>')
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([]))
@pytest.mark.skipif('not HAS_BS4')
def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>C1</th><th>C2</th><th>C3</th></tr>'
# start_line should return None if no valid header is found
lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><th>Header</th></tr>')
with pytest.raises(TypeError):
header.start_line(lines)
@pytest.mark.skipif('not HAS_BS4')
def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>1</td><td>a</td><td>1.05</td></tr>'
# end_line returns the index of the last data element + 1
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>4</td><td>d</td><td>10.5</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>7</td><td>g</td><td>105.0</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>'
# start_line should raise an error if no table data exists
lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><td>Data</td></tr>')
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines)
def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()
assert out == expected.strip()
def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({'multicol': False}).write(table)[0].strip() == \
expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read('data/html2.html', format='ascii.html')
str_type = np.dtype((str, 21))
expected = Table(np.array([(['1', '2.5000000000000000001'], 3),
(['1a', '1'], 3.5)],
dtype=[('A', str_type, (2,)), ('B', '<f8')]))
assert np.all(table == expected)
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach # noqa
t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c'])
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(out, format='ascii.html',
htmldict={'raw_html_cols': t.colnames,
'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world'),
format='html')
t_expected = Table([['Hello world'], [2]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'),
format='html')
t_expected = Table([[1], ['Hello world']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8'))
t['a'] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'),
format='html')
t_expected = Table([['TEST'], [1]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t, buffer_output, fill_values=('a', 'z'),
format='html')
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True)
t['a'][0:2] = np.ma.masked
t['b'][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')],
format='html')
t_expected = Table([['MASKED', 'MASKED', 3, 4], [
'MASKED', 'MASKED', '--', 'b']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue()
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = ['<table>',
'<tr><td>Δ</td></tr>',
'<tr><td>Δ</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert np.all(dat['col1'] == ['Δ', 'Δ'])
|
2c6696c691cd866479ac6eb9edddb8af3fbe8c7acbbb22d7f3cb08efa3189b31 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
# NOTE: bic_t - bic_g doctest is skipped because it produced slightly
# different result in arm64 and big-endian s390x CI jobs.
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +SKIP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<https://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2.4, .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-634.5257517810961
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<https://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
367f0a33efbe0a9cc040c8a07e9a134ddb1ce7085c6c5a145963c0058f26e039 | import difflib
import functools
import sys
import numbers
import numpy as np
from .misc import indent
__all__ = ['fixed_width_indent', 'diff_values', 'report_diff_values',
'where_not_allclose']
# Smaller default shift-width for indent
fixed_width_indent = functools.partial(indent, width=2)
def diff_values(a, b, rtol=0.0, atol=0.0):
"""
Diff two scalar values. If both values are floats, they are compared to
within the given absolute and relative tolerance.
Parameters
----------
a, b : int, float, str
Scalar values to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
is_different : bool
`True` if they are different, else `False`.
"""
if isinstance(a, float) and isinstance(b, float):
if np.isnan(a) and np.isnan(b):
return False
return not np.allclose(a, b, rtol=rtol, atol=atol)
else:
return a != b
def report_diff_values(a, b, fileobj=sys.stdout, indent_width=0, rtol=0.0, atol=0.0):
"""
Write a diff report between two values to the specified file-like object.
Parameters
----------
a, b
Values to compare. Anything that can be turned into strings
and compared using :py:mod:`difflib` should work.
fileobj : object
File-like object to write to.
The default is ``sys.stdout``, which writes to terminal.
indent_width : int
Character column(s) to indent.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
identical : bool
`True` if no diff, else `False`.
"""
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
if a.shape != b.shape:
fileobj.write(
fixed_width_indent(' Different array shapes:\n',
indent_width))
report_diff_values(str(a.shape), str(b.shape), fileobj=fileobj,
indent_width=indent_width + 1)
return False
if (np.issubdtype(a.dtype, np.floating) and
np.issubdtype(b.dtype, np.floating)):
diff_indices = np.transpose(where_not_allclose(a, b, rtol=rtol, atol=atol))
else:
diff_indices = np.transpose(np.where(a != b))
num_diffs = diff_indices.shape[0]
for idx in diff_indices[:3]:
lidx = idx.tolist()
fileobj.write(fixed_width_indent(f' at {lidx!r}:\n', indent_width))
report_diff_values(a[tuple(idx)], b[tuple(idx)], fileobj=fileobj,
indent_width=indent_width + 1, rtol=rtol, atol=atol)
if num_diffs > 3:
fileobj.write(fixed_width_indent(
f' ...and at {num_diffs - 3:d} more indices.\n',
indent_width))
return False
return num_diffs == 0
typea = type(a)
typeb = type(b)
if typea == typeb:
lnpad = ' '
sign_a = 'a>'
sign_b = 'b>'
if isinstance(a, numbers.Number):
a = repr(a)
b = repr(b)
else:
a = str(a)
b = str(b)
else:
padding = max(len(typea.__name__), len(typeb.__name__)) + 3
lnpad = (padding + 1) * ' '
sign_a = ('(' + typea.__name__ + ') ').rjust(padding) + 'a>'
sign_b = ('(' + typeb.__name__ + ') ').rjust(padding) + 'b>'
is_a_str = isinstance(a, str)
is_b_str = isinstance(b, str)
a = (repr(a) if ((is_a_str and not is_b_str) or
(not is_a_str and isinstance(a, numbers.Number)))
else str(a))
b = (repr(b) if ((is_b_str and not is_a_str) or
(not is_b_str and isinstance(b, numbers.Number)))
else str(b))
identical = True
for line in difflib.ndiff(a.splitlines(), b.splitlines()):
if line[0] == '-':
identical = False
line = sign_a + line[1:]
elif line[0] == '+':
identical = False
line = sign_b + line[1:]
else:
line = lnpad + line
fileobj.write(fixed_width_indent(
' {}\n'.format(line.rstrip('\n')), indent_width))
return identical
def where_not_allclose(a, b, rtol=1e-5, atol=1e-8):
"""
A version of :func:`numpy.allclose` that returns the indices
where the two arrays differ, instead of just a boolean value.
Parameters
----------
a, b : array-like
Input arrays to compare.
rtol, atol : float
Relative and absolute tolerances as accepted by
:func:`numpy.allclose`.
Returns
-------
idx : tuple of array
Indices where the two arrays differ.
"""
# Create fixed mask arrays to handle INF and NaN; currently INF and NaN
# are handled as equivalent
if not np.all(np.isfinite(a)):
a = np.ma.fix_invalid(a).data
if not np.all(np.isfinite(b)):
b = np.ma.fix_invalid(b).data
if atol == 0.0 and rtol == 0.0:
# Use a faster comparison for the most simple (and common) case
return np.where(a != b)
return np.where(np.abs(a - b) > (atol + rtol * np.abs(b)))
|
2dc581d7257062bc2f22e683debcc13e5093078cf353b0d4baf54158c4791ee2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Furthermore, some helper routines to turn strings and other types of
objects into two values, and vice versa.
"""
import decimal
import numpy as np
import astropy.units as u
def day_frac(val1, val2, factor=None, divisor=None):
"""Return the sum of ``val1`` and ``val2`` as two float64s.
The returned floats are an integer part and the fractional remainder,
with the latter guaranteed to be within -0.5 and 0.5 (inclusive on
either side, as the integer is rounded to even).
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. It is assumed the sum is less
than about 1e16, otherwise the remainder will be greater than 1.0.
Parameters
----------
val1, val2 : array of float
Values to be summed.
factor : float, optional
If given, multiply the sum by it.
divisor : float, optional
If given, divide the sum by it.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if factor is not None:
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if divisor is not None:
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
# Calculate remaining fraction. This can have gotten >0.5 or <-0.5, which means
# we would lose one bit of precision. So, correct for that. Here, we need
# particular care for the case that frac=0.5 and check>0 or frac=-0.5 and check<0,
# since in that case if check is large enough, rounding was done the wrong way.
frac, check = two_sum(sum12 - day, err12)
excess = np.where(frac * np.sign(check) != 0.5, np.round(frac),
np.round(frac+2*check))
day += excess
frac = sum12 - day
frac += err12
return day, frac
def quantity_day_frac(val1, val2=None):
"""Like ``day_frac``, but for quantities with units of time.
The quantities are separately converted to days. Here, we need to take
care with the conversion since while the routines here can do accurate
multiplication, the conversion factor itself may not be accurate. For
instance, if the quantity is in seconds, the conversion factor is
1./86400., which is not exactly representable as a float.
To work around this, for conversion factors less than unity, rather than
multiply by that possibly inaccurate factor, the value is divided by the
conversion factor of a day to that unit (i.e., by 86400. for seconds). For
conversion factors larger than 1, such as 365.25 for years, we do just
multiply. With this scheme, one has precise conversion factors for all
regular time units that astropy defines. Note, however, that it does not
necessarily work for all custom time units, and cannot work when conversion
to time is via an equivalency. For those cases, one remains limited by the
fact that Quantity calculations are done in double precision, not in
quadruple precision as for time.
"""
if val2 is not None:
res11, res12 = quantity_day_frac(val1)
res21, res22 = quantity_day_frac(val2)
# This summation is can at most lose 1 ULP in the second number.
return res11 + res21, res12 + res22
try:
factor = val1.unit.to(u.day)
except Exception:
# Not a simple scaling, so cannot do the full-precision one.
# But at least try normal conversion, since equivalencies may be set.
return val1.to_value(u.day), 0.
if factor == 1.:
return day_frac(val1.value, 0.)
if factor > 1:
return day_frac(val1.value, 0., factor=factor)
else:
divisor = u.day.to(val1.unit)
return day_frac(val1.value, 0., divisor=divisor)
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a # bvirtual in Shewchuk
ea = x - eb # avirtual in Shewchuk
eb = b - eb # broundoff in Shewchuk
ea = a - ea # aroundoff in Shewchuk
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
_enough_decimal_places = 34 # to represent two doubles
def longdouble_to_twoval(val1, val2=None):
if val2 is None:
val2 = val1.dtype.type(0.)
else:
best_type = np.result_type(val1.dtype, val2.dtype)
val1 = val1.astype(best_type, copy=False)
val2 = val2.astype(best_type, copy=False)
# day_frac is independent of dtype, as long as the dtype
# are the same and no factor or divisor is given.
i, f = day_frac(val1, val2)
return i.astype(float, copy=False), f.astype(float, copy=False)
def decimal_to_twoval1(val1, val2=None):
with decimal.localcontext() as ctx:
ctx.prec = _enough_decimal_places
d = decimal.Decimal(val1)
i = round(d)
f = d - i
return float(i), float(f)
def bytes_to_twoval1(val1, val2=None):
return decimal_to_twoval1(val1.decode('ascii'))
def twoval_to_longdouble(val1, val2):
return val1.astype(np.longdouble) + val2.astype(np.longdouble)
def twoval_to_decimal1(val1, val2):
with decimal.localcontext() as ctx:
ctx.prec = _enough_decimal_places
return decimal.Decimal(val1) + decimal.Decimal(val2)
def twoval_to_string1(val1, val2, fmt):
if val2 == 0.:
# For some formats, only a single float is really used.
# For those, let numpy take care of correct number of digits.
return str(val1)
result = format(twoval_to_decimal1(val1, val2), fmt).strip('0')
if result[-1] == '.':
result += '0'
return result
def twoval_to_bytes1(val1, val2, fmt):
return twoval_to_string1(val1, val2, fmt).encode('ascii')
decimal_to_twoval = np.vectorize(decimal_to_twoval1)
bytes_to_twoval = np.vectorize(bytes_to_twoval1)
twoval_to_decimal = np.vectorize(twoval_to_decimal1)
twoval_to_string = np.vectorize(twoval_to_string1, excluded='fmt')
twoval_to_bytes = np.vectorize(twoval_to_bytes1, excluded='fmt')
|
f4c2620245313ca327b332d7e37d08bb864d87949863c8e83d386295213e5003 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Core units classes and functions
"""
import inspect
import operator
import textwrap
import warnings
import numpy as np
from astropy.utils.decorators import lazyproperty
from astropy.utils.exceptions import AstropyWarning
from astropy.utils.misc import isiterable
from . import format as unit_format
from .utils import is_effectively_unity, resolve_fractions, sanitize_scale, validate_power
__all__ = [
'UnitsError', 'UnitsWarning', 'UnitConversionError', 'UnitTypeError',
'UnitBase', 'NamedUnit', 'IrreducibleUnit', 'Unit', 'CompositeUnit',
'PrefixUnit', 'UnrecognizedUnit', 'def_unit', 'get_current_unit_registry',
'set_enabled_units', 'add_enabled_units',
'set_enabled_equivalencies', 'add_enabled_equivalencies',
'set_enabled_aliases', 'add_enabled_aliases',
'dimensionless_unscaled', 'one',
]
UNITY = 1.0
def _flatten_units_collection(items):
"""
Given a list of sequences, modules or dictionaries of units, or
single units, return a flat set of all the units found.
"""
if not isinstance(items, list):
items = [items]
result = set()
for item in items:
if isinstance(item, UnitBase):
result.add(item)
else:
if isinstance(item, dict):
units = item.values()
elif inspect.ismodule(item):
units = vars(item).values()
elif isiterable(item):
units = item
else:
continue
for unit in units:
if isinstance(unit, UnitBase):
result.add(unit)
return result
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs
Raises
------
ValueError if an equivalency cannot be interpreted
"""
if equivalencies is None:
return []
normalized = []
for i, equiv in enumerate(equivalencies):
if len(equiv) == 2:
funit, tunit = equiv
a = b = lambda x: x
elif len(equiv) == 3:
funit, tunit, a = equiv
b = a
elif len(equiv) == 4:
funit, tunit, a, b = equiv
else:
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
if not (funit is Unit(funit) and
(tunit is None or tunit is Unit(tunit)) and
callable(a) and
callable(b)):
raise ValueError(
f"Invalid equivalence entry {i}: {equiv!r}")
normalized.append((funit, tunit, a, b))
return normalized
class _UnitRegistry:
"""
Manages a registry of the enabled units.
"""
def __init__(self, init=[], equivalencies=[], aliases={}):
if isinstance(init, _UnitRegistry):
# If passed another registry we don't need to rebuild everything.
# but because these are mutable types we don't want to create
# conflicts so everything needs to be copied.
self._equivalencies = init._equivalencies.copy()
self._aliases = init._aliases.copy()
self._all_units = init._all_units.copy()
self._registry = init._registry.copy()
self._non_prefix_units = init._non_prefix_units.copy()
# The physical type is a dictionary containing sets as values.
# All of these must be copied otherwise we could alter the old
# registry.
self._by_physical_type = {k: v.copy() for k, v in
init._by_physical_type.items()}
else:
self._reset_units()
self._reset_equivalencies()
self._reset_aliases()
self.add_enabled_units(init)
self.add_enabled_equivalencies(equivalencies)
self.add_enabled_aliases(aliases)
def _reset_units(self):
self._all_units = set()
self._non_prefix_units = set()
self._registry = {}
self._by_physical_type = {}
def _reset_equivalencies(self):
self._equivalencies = set()
def _reset_aliases(self):
self._aliases = {}
@property
def registry(self):
return self._registry
@property
def all_units(self):
return self._all_units
@property
def non_prefix_units(self):
return self._non_prefix_units
def set_enabled_units(self, units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by
methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
"""
self._reset_units()
return self.add_enabled_units(units)
def add_enabled_units(self, units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for
searching through by methods like
`UnitBase.find_equivalent_units` and `UnitBase.compose`.
"""
units = _flatten_units_collection(units)
for unit in units:
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for st in unit._names:
if (st in self._registry and unit != self._registry[st]):
raise ValueError(
"Object with name {!r} already exists in namespace. "
"Filter the set of units to avoid name clashes before "
"enabling them.".format(st))
for st in unit._names:
self._registry[st] = unit
self._all_units.add(unit)
if not isinstance(unit, PrefixUnit):
self._non_prefix_units.add(unit)
hash = unit._get_physical_type_id()
self._by_physical_type.setdefault(hash, set()).add(unit)
def get_units_with_physical_type(self, unit):
"""
Get all units in the registry with the same physical type as
the given unit.
Parameters
----------
unit : UnitBase instance
"""
return self._by_physical_type.get(unit._get_physical_type_id(), set())
@property
def equivalencies(self):
return list(self._equivalencies)
def set_enabled_equivalencies(self, equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
self._reset_equivalencies()
return self.add_enabled_equivalencies(equivalencies)
def add_enabled_equivalencies(self, equivalencies):
"""
Adds to the set of equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
List of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# pre-normalize list to help catch mistakes
equivalencies = _normalize_equivalencies(equivalencies)
self._equivalencies |= set(equivalencies)
@property
def aliases(self):
return self._aliases
def set_enabled_aliases(self, aliases):
"""
Set aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
self._reset_aliases()
self.add_enabled_aliases(aliases)
def add_enabled_aliases(self, aliases):
"""
Add aliases for units.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
"""
for alias, unit in aliases.items():
if alias in self._registry and unit != self._registry[alias]:
raise ValueError(
f"{alias} already means {self._registry[alias]}, so "
f"cannot be used as an alias for {unit}.")
if alias in self._aliases and unit != self._aliases[alias]:
raise ValueError(
f"{alias} already is an alias for {self._aliases[alias]}, so "
f"cannot be used as an alias for {unit}.")
for alias, unit in aliases.items():
if alias not in self._registry and alias not in self._aliases:
self._aliases[alias] = unit
class _UnitContext:
def __init__(self, init=[], equivalencies=[]):
_unit_registries.append(
_UnitRegistry(init=init, equivalencies=equivalencies))
def __enter__(self):
pass
def __exit__(self, type, value, tb):
_unit_registries.pop()
_unit_registries = [_UnitRegistry()]
def get_current_unit_registry():
return _unit_registries[-1]
def set_enabled_units(units):
"""
Sets the units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be "enabled" for searching through by methods
like `UnitBase.find_equivalent_units` and `UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> with u.set_enabled_units([u.pc]):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
pc | 3.08568e+16 m | parsec ,
]
>>> u.m.find_equivalent_units()
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
micron | 1e-06 m | ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
]
"""
# get a context with a new registry, using equivalencies of the current one
context = _UnitContext(
equivalencies=get_current_unit_registry().equivalencies)
# in this new current registry, enable the units requested
get_current_unit_registry().set_enabled_units(units)
return context
def add_enabled_units(units):
"""
Adds to the set of units enabled in the unit registry.
These units are searched when using
`UnitBase.find_equivalent_units`, for example.
This may be used either permanently, or as a context manager using
the ``with`` statement (see example below).
Parameters
----------
units : list of sequence, dict, or module
This is a list of things in which units may be found
(sequences, dicts or modules), or units themselves. The
entire set will be added to the "enabled" set for searching
through by methods like `UnitBase.find_equivalent_units` and
`UnitBase.compose`.
Examples
--------
>>> from astropy import units as u
>>> from astropy.units import imperial
>>> with u.add_enabled_units(imperial):
... u.m.find_equivalent_units()
...
Primary name | Unit definition | Aliases
[
AU | 1.49598e+11 m | au, astronomical_unit ,
Angstrom | 1e-10 m | AA, angstrom ,
cm | 0.01 m | centimeter ,
earthRad | 6.3781e+06 m | R_earth, Rearth ,
ft | 0.3048 m | foot ,
fur | 201.168 m | furlong ,
inch | 0.0254 m | ,
jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter ,
lsec | 2.99792e+08 m | lightsecond ,
lyr | 9.46073e+15 m | lightyear ,
m | irreducible | meter ,
mi | 1609.34 m | mile ,
micron | 1e-06 m | ,
mil | 2.54e-05 m | thou ,
nmi | 1852 m | nauticalmile, NM ,
pc | 3.08568e+16 m | parsec ,
solRad | 6.957e+08 m | R_sun, Rsun ,
yd | 0.9144 m | yard ,
]
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further units requested
get_current_unit_registry().add_enabled_units(units)
return context
def set_enabled_equivalencies(equivalencies):
"""
Sets the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Use with care.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
Examples
--------
Exponentiation normally requires dimensionless quantities. To avoid
problems with complex phases::
>>> from astropy import units as u
>>> with u.set_enabled_equivalencies(u.dimensionless_angles()):
... phase = 0.5 * u.cycle
... np.exp(1j*phase) # doctest: +FLOAT_CMP
<Quantity -1.+1.2246468e-16j>
"""
# get a context with a new registry, using all units of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the equivalencies requested
get_current_unit_registry().set_enabled_equivalencies(equivalencies)
return context
def add_enabled_equivalencies(equivalencies):
"""
Adds to the equivalencies enabled in the unit registry.
These equivalencies are used if no explicit equivalencies are given,
both in unit conversion and in finding equivalent units.
This is meant in particular for allowing angles to be dimensionless.
Since no equivalencies are enabled by default, generally it is recommended
to use `set_enabled_equivalencies`.
Parameters
----------
equivalencies : list of tuple
list of equivalent pairs, e.g., as returned by
`~astropy.units.equivalencies.dimensionless_angles`.
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_equivalencies(equivalencies)
return context
def set_enabled_aliases(aliases):
"""
Set aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Parameters
----------
aliases : dict of str, Unit
The aliases to set. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().set_enabled_aliases(aliases)
return context
def add_enabled_aliases(aliases):
"""
Add aliases for units.
This is useful for handling alternate spellings for units, or
misspelled units in files one is trying to read.
Since no aliases are enabled by default, generally it is recommended
to use `set_enabled_aliases`.
Parameters
----------
aliases : dict of str, Unit
The aliases to add. The keys must be the string aliases, and values
must be the `astropy.units.Unit` that the alias will be mapped to.
Raises
------
ValueError
If the alias already defines a different unit.
Examples
--------
To temporarily allow for a misspelled 'Angstroem' unit::
>>> from astropy import units as u
>>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}):
... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom)
True
"""
# get a context with a new registry, which is a copy of the current one
context = _UnitContext(get_current_unit_registry())
# in this new current registry, enable the further equivalencies requested
get_current_unit_registry().add_enabled_aliases(aliases)
return context
class UnitsError(Exception):
"""
The base class for unit-specific exceptions.
"""
class UnitScaleError(UnitsError, ValueError):
"""
Used to catch the errors involving scaled units,
which are not recognized by FITS format.
"""
pass
class UnitConversionError(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
class UnitTypeError(UnitsError, TypeError):
"""
Used specifically for errors in setting to units not allowed by a class.
E.g., would be raised if the unit of an `~astropy.coordinates.Angle`
instances were set to a non-angular unit.
"""
class UnitsWarning(AstropyWarning):
"""
The base class for unit-specific warnings.
"""
class UnitBase:
"""
Abstract base class for units.
Most of the arithmetic operations on units are defined in this
base class.
Should not be instantiated by users directly.
"""
# Make sure that __rmul__ of units gets called over the __mul__ of Numpy
# arrays to avoid element-wise multiplication.
__array_priority__ = 1000
_hash = None
_type_id = None
def __deepcopy__(self, memo):
# This may look odd, but the units conversion will be very
# broken after deep-copying if we don't guarantee that a given
# physical unit corresponds to only one instance
return self
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return unit_format.Latex.to_string(self)
def __bytes__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self).encode('unicode_escape')
def __str__(self):
"""Return string representation for unit"""
return unit_format.Generic.to_string(self)
def __repr__(self):
string = unit_format.Generic.to_string(self)
return f'Unit("{string}")'
def _get_physical_type_id(self):
"""
Returns an identifier that uniquely identifies the physical
type of this unit. It is comprised of the bases and powers of
this unit, without the scale. Since it is hashable, it is
useful as a dictionary key.
"""
if self._type_id is None:
unit = self.decompose()
self._type_id = tuple(zip((base.name for base in unit.bases), unit.powers))
return self._type_id
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
raise AttributeError(
"Can not get names from unnamed units. "
"Perhaps you meant to_string()?")
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
raise AttributeError(
"Can not get aliases from unnamed units. "
"Perhaps you meant to_string()?")
@property
def scale(self):
"""
Return the scale of the unit.
"""
return 1.0
@property
def bases(self):
"""
Return the bases of the unit.
"""
return [self]
@property
def powers(self):
"""
Return the powers of the unit.
"""
return [1]
def to_string(self, format=unit_format.Generic):
"""
Output the unit in the given format as a string.
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
f = unit_format.get_format(format)
return f.to_string(self)
def __format__(self, format_spec):
"""Try to format units using a formatter."""
try:
return self.to_string(format=format_spec)
except ValueError:
return format(str(self), format_spec)
@staticmethod
def _normalize_equivalencies(equivalencies):
"""
Normalizes equivalencies, ensuring each is a 4-tuple of the form::
(from_unit, to_unit, forward_func, backward_func)
Parameters
----------
equivalencies : list of equivalency pairs, or None
Returns
-------
A normalized list, including possible global defaults set by, e.g.,
`set_enabled_equivalencies`, except when `equivalencies`=`None`,
in which case the returned list is always empty.
Raises
------
ValueError if an equivalency cannot be interpreted
"""
normalized = _normalize_equivalencies(equivalencies)
if equivalencies is not None:
normalized += get_current_unit_registry().equivalencies
return normalized
def __pow__(self, p):
p = validate_power(p)
return CompositeUnit(1, [self], [p], _error_check=False)
def __truediv__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
return CompositeUnit(1, [self, m], [1, -1], _error_check=False)
try:
# Cannot handle this as Unit, re-try as Quantity
from .quantity import Quantity
return Quantity(1, self) / m
except TypeError:
return NotImplemented
def __rtruediv__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) / self
try:
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a
# unit, for the common case of <array> / <unit>.
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result /= self
return result
else:
return Quantity(m, self**(-1))
except TypeError:
return NotImplemented
def __mul__(self, m):
if isinstance(m, (bytes, str)):
m = Unit(m)
if isinstance(m, UnitBase):
if m.is_unity():
return self
elif self.is_unity():
return m
return CompositeUnit(1, [self, m], [1, 1], _error_check=False)
# Cannot handle this as Unit, re-try as Quantity.
try:
from .quantity import Quantity
return Quantity(1, unit=self) * m
except TypeError:
return NotImplemented
def __rmul__(self, m):
if isinstance(m, (bytes, str)):
return Unit(m) * self
# Cannot handle this as Unit. Here, m cannot be a Quantity,
# so we make it into one, fasttracking when it does not have a unit
# for the common case of <array> * <unit>.
try:
from .quantity import Quantity
if hasattr(m, 'unit'):
result = Quantity(m)
result *= self
return result
else:
return Quantity(m, unit=self)
except TypeError:
return NotImplemented
def __rlshift__(self, m):
try:
from .quantity import Quantity
return Quantity(m, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __rrshift__(self, m):
warnings.warn(">> is not implemented. Did you mean to convert "
"to a Quantity with unit {} using '<<'?".format(self),
AstropyWarning)
return NotImplemented
def __hash__(self):
if self._hash is None:
parts = ([str(self.scale)] +
[x.name for x in self.bases] +
[str(x) for x in self.powers])
self._hash = hash(tuple(parts))
return self._hash
def __getstate__(self):
# If we get pickled, we should *not* store the memoized members since
# hashes of strings vary between sessions.
state = self.__dict__.copy()
state.pop('_hash', None)
state.pop('_type_id', None)
return state
def __eq__(self, other):
if self is other:
return True
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
# Other is unit-like, but the test below requires it is a UnitBase
# instance; if it is not, give up (so that other can try).
if not isinstance(other, UnitBase):
return NotImplemented
try:
return is_effectively_unity(self._to(other))
except UnitsError:
return False
def __ne__(self, other):
return not (self == other)
def __le__(self, other):
scale = self._to(Unit(other))
return scale <= 1. or is_effectively_unity(scale)
def __ge__(self, other):
scale = self._to(Unit(other))
return scale >= 1. or is_effectively_unity(scale)
def __lt__(self, other):
return not (self >= other)
def __gt__(self, other):
return not (self <= other)
def __neg__(self):
return self * -1.
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : `~astropy.units.Unit`, str, or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
bool
"""
equivalencies = self._normalize_equivalencies(equivalencies)
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other = Unit(other, parse_strict='silent')
return self._is_equivalent(other, equivalencies)
def _is_equivalent(self, other, equivalencies=[]):
"""Returns `True` if this unit is equivalent to `other`.
See `is_equivalent`, except that a proper Unit object should be
given (i.e., no string) and that the equivalency list should be
normalized using `_normalize_equivalencies`.
"""
if isinstance(other, UnrecognizedUnit):
return False
if (self._get_physical_type_id() ==
other._get_physical_type_id()):
return True
elif len(equivalencies):
unit = self.decompose()
other = other.decompose()
for a, b, forward, backward in equivalencies:
if b is None:
# after canceling, is what's left convertible
# to dimensionless (according to the equivalency)?
try:
(other/unit).decompose([a])
return True
except Exception:
pass
else:
if(a._is_equivalent(unit) and b._is_equivalent(other) or
b._is_equivalent(unit) and a._is_equivalent(other)):
return True
return False
def _apply_equivalencies(self, unit, other, equivalencies):
"""
Internal function (used from `_get_converter`) to apply
equivalence pairs.
"""
def make_converter(scale1, func, scale2):
def convert(v):
return func(_condition_arg(v) / scale1) * scale2
return convert
for funit, tunit, a, b in equivalencies:
if tunit is None:
try:
ratio_in_funit = (other.decompose() /
unit.decompose()).decompose([funit])
return make_converter(ratio_in_funit.scale, a, 1.)
except UnitsError:
pass
else:
try:
scale1 = funit._to(unit)
scale2 = tunit._to(other)
return make_converter(scale1, a, scale2)
except UnitsError:
pass
try:
scale1 = tunit._to(unit)
scale2 = funit._to(other)
return make_converter(scale1, b, scale2)
except UnitsError:
pass
def get_err_str(unit):
unit_str = unit.to_string('unscaled')
physical_type = unit.physical_type
if physical_type != 'unknown':
unit_str = f"'{unit_str}' ({physical_type})"
else:
unit_str = f"'{unit_str}'"
return unit_str
unit_str = get_err_str(unit)
other_str = get_err_str(other)
raise UnitConversionError(
f"{unit_str} and {other_str} are not convertible")
def _get_converter(self, other, equivalencies=[]):
"""Get a converter for values in ``self`` to ``other``.
If no conversion is necessary, returns ``unit_scale_converter``
(which is used as a check in quantity helpers).
"""
# First see if it is just a scaling.
try:
scale = self._to(other)
except UnitsError:
pass
else:
if scale == 1.:
return unit_scale_converter
else:
return lambda val: scale * _condition_arg(val)
# if that doesn't work, maybe we can do it with equivalencies?
try:
return self._apply_equivalencies(
self, other, self._normalize_equivalencies(equivalencies))
except UnitsError as exc:
# Last hope: maybe other knows how to do it?
# We assume the equivalencies have the unit itself as first item.
# TODO: maybe better for other to have a `_back_converter` method?
if hasattr(other, 'equivalencies'):
for funit, tunit, a, b in other.equivalencies:
if other is funit:
try:
return lambda v: b(self._get_converter(
tunit, equivalencies=equivalencies)(v))
except Exception:
pass
raise exc
def _to(self, other):
"""
Returns the scale to the specified unit.
See `to`, except that a Unit object should be given (i.e., no
string), and that all defaults are used, i.e., no
equivalencies and value=1.
"""
# There are many cases where we just want to ensure a Quantity is
# of a particular unit, without checking whether it's already in
# a particular unit. If we're being asked to convert from a unit
# to itself, we can short-circuit all of this.
if self is other:
return 1.0
# Don't presume decomposition is possible; e.g.,
# conversion to function units is through equivalencies.
if isinstance(other, UnitBase):
self_decomposed = self.decompose()
other_decomposed = other.decompose()
# Check quickly whether equivalent. This is faster than
# `is_equivalent`, because it doesn't generate the entire
# physical type list of both units. In other words it "fails
# fast".
if(self_decomposed.powers == other_decomposed.powers and
all(self_base is other_base for (self_base, other_base)
in zip(self_decomposed.bases, other_decomposed.bases))):
return self_decomposed.scale / other_decomposed.scale
raise UnitConversionError(
f"'{self!r}' is not a scaled version of '{other!r}'")
def to(self, other, value=UNITY, equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : unit-like
The unit to convert to.
value : int, float, or scalar array-like, optional
Value(s) in the current unit to be converted to the
specified unit. If not provided, defaults to 1.0
equivalencies : list of tuple
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent
"""
if other is self and value is UNITY:
return UNITY
else:
return self._get_converter(Unit(other),
equivalencies=equivalencies)(value)
def in_units(self, other, value=1.0, equivalencies=[]):
"""
Alias for `to` for backward compatibility with pynbody.
"""
return self.to(
other, value=value, equivalencies=equivalencies)
def decompose(self, bases=set()):
"""
Return a unit object composed of only irreducible units.
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `UnitsError` if it's not possible
to do so.
Returns
-------
unit : `~astropy.units.CompositeUnit`
New object containing only irreducible unit objects.
"""
raise NotImplementedError()
def _compose(self, equivalencies=[], namespace=[], max_depth=2, depth=0,
cached_results=None):
def is_final_result(unit):
# Returns True if this result contains only the expected
# units
for base in unit.bases:
if base not in namespace:
return False
return True
unit = self.decompose()
key = hash(unit)
cached = cached_results.get(key)
if cached is not None:
if isinstance(cached, Exception):
raise cached
return cached
# Prevent too many levels of recursion
# And special case for dimensionless unit
if depth >= max_depth:
cached_results[key] = [unit]
return [unit]
# Make a list including all of the equivalent units
units = [unit]
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self._is_equivalent(funit):
scale = funit.decompose().scale / unit.scale
units.append(Unit(a(1.0 / scale) * tunit).decompose())
elif self._is_equivalent(tunit):
scale = tunit.decompose().scale / unit.scale
units.append(Unit(b(1.0 / scale) * funit).decompose())
else:
if self._is_equivalent(funit):
units.append(Unit(unit.scale))
# Store partial results
partial_results = []
# Store final results that reduce to a single unit or pair of
# units
if len(unit.bases) == 0:
final_results = [{unit}, set()]
else:
final_results = [set(), set()]
for tunit in namespace:
tunit_decomposed = tunit.decompose()
for u in units:
# If the unit is a base unit, look for an exact match
# to one of the bases of the target unit. If found,
# factor by the same power as the target unit's base.
# This allows us to factor out fractional powers
# without needing to do an exhaustive search.
if len(tunit_decomposed.bases) == 1:
for base, power in zip(u.bases, u.powers):
if tunit_decomposed._is_equivalent(base):
tunit = tunit ** power
tunit_decomposed = tunit_decomposed ** power
break
composed = (u / tunit_decomposed).decompose()
factored = composed * tunit
len_bases = len(composed.bases)
if is_final_result(factored) and len_bases <= 1:
final_results[len_bases].add(factored)
else:
partial_results.append(
(len_bases, composed, tunit))
# Do we have any minimal results?
for final_result in final_results:
if len(final_result):
results = final_results[0].union(final_results[1])
cached_results[key] = results
return results
partial_results.sort(key=operator.itemgetter(0))
# ...we have to recurse and try to further compose
results = []
for len_bases, composed, tunit in partial_results:
try:
composed_list = composed._compose(
equivalencies=equivalencies,
namespace=namespace,
max_depth=max_depth, depth=depth + 1,
cached_results=cached_results)
except UnitsError:
composed_list = []
for subcomposed in composed_list:
results.append(
(len(subcomposed.bases), subcomposed, tunit))
if len(results):
results.sort(key=operator.itemgetter(0))
min_length = results[0][0]
subresults = set()
for len_bases, composed, tunit in results:
if len_bases > min_length:
break
else:
factored = composed * tunit
if is_final_result(factored):
subresults.add(factored)
if len(subresults):
cached_results[key] = subresults
return subresults
if not is_final_result(self):
result = UnitsError(
f"Cannot represent unit {self} in terms of the given units")
cached_results[key] = result
raise result
cached_results[key] = [self]
return [self]
def compose(self, equivalencies=[], units=None, max_depth=2,
include_prefix_units=None):
"""
Return the simplest possible composite unit(s) that represent
the given unit. Since there may be multiple equally simple
compositions of the unit, a list of units is always returned.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
This list is in addition to possible global defaults set by, e.g.,
`set_enabled_equivalencies`.
Use `None` to turn off all equivalencies.
units : set of `~astropy.units.Unit`, optional
If not provided, any known units may be used to compose
into. Otherwise, ``units`` is a dict, module or sequence
containing the units to compose into.
max_depth : int, optional
The maximum recursion depth to use when composing into
composite units.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `True` if a sequence is passed in to ``units``,
`False` otherwise.
Returns
-------
units : list of `CompositeUnit`
A list of candidate compositions. These will all be
equally simple, but it may not be possible to
automatically determine which of the candidates are
better.
"""
# if units parameter is specified and is a sequence (list|tuple),
# include_prefix_units is turned on by default. Ex: units=[u.kpc]
if include_prefix_units is None:
include_prefix_units = isinstance(units, (list, tuple))
# Pre-normalize the equivalencies list
equivalencies = self._normalize_equivalencies(equivalencies)
# The namespace of units to compose into should be filtered to
# only include units with bases in common with self, otherwise
# they can't possibly provide useful results. Having too many
# destination units greatly increases the search space.
def has_bases_in_common(a, b):
if len(a.bases) == 0 and len(b.bases) == 0:
return True
for ab in a.bases:
for bb in b.bases:
if ab == bb:
return True
return False
def has_bases_in_common_with_equiv(unit, other):
if has_bases_in_common(unit, other):
return True
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if unit._is_equivalent(funit):
if has_bases_in_common(tunit.decompose(), other):
return True
elif unit._is_equivalent(tunit):
if has_bases_in_common(funit.decompose(), other):
return True
else:
if unit._is_equivalent(funit):
if has_bases_in_common(dimensionless_unscaled, other):
return True
return False
def filter_units(units):
filtered_namespace = set()
for tunit in units:
if (isinstance(tunit, UnitBase) and
(include_prefix_units or
not isinstance(tunit, PrefixUnit)) and
has_bases_in_common_with_equiv(
decomposed, tunit.decompose())):
filtered_namespace.add(tunit)
return filtered_namespace
decomposed = self.decompose()
if units is None:
units = filter_units(self._get_units_with_same_physical_type(
equivalencies=equivalencies))
if len(units) == 0:
units = get_current_unit_registry().non_prefix_units
elif isinstance(units, dict):
units = set(filter_units(units.values()))
elif inspect.ismodule(units):
units = filter_units(vars(units).values())
else:
units = filter_units(_flatten_units_collection(units))
def sort_results(results):
if not len(results):
return []
# Sort the results so the simplest ones appear first.
# Simplest is defined as "the minimum sum of absolute
# powers" (i.e. the fewest bases), and preference should
# be given to results where the sum of powers is positive
# and the scale is exactly equal to 1.0
results = list(results)
results.sort(key=lambda x: np.abs(x.scale))
results.sort(key=lambda x: np.sum(np.abs(x.powers)))
results.sort(key=lambda x: np.sum(x.powers) < 0.0)
results.sort(key=lambda x: not is_effectively_unity(x.scale))
last_result = results[0]
filtered = [last_result]
for result in results[1:]:
if str(result) != str(last_result):
filtered.append(result)
last_result = result
return filtered
return sort_results(self._compose(
equivalencies=equivalencies, namespace=units,
max_depth=max_depth, depth=0, cached_results={}))
def to_system(self, system):
"""
Converts this unit into ones belonging to the given system.
Since more than one result may be possible, a list is always
returned.
Parameters
----------
system : module
The module that defines the unit system. Commonly used
ones include `astropy.units.si` and `astropy.units.cgs`.
To use your own module it must contain unit objects and a
sequence member named ``bases`` containing the base units of
the system.
Returns
-------
units : list of `CompositeUnit`
The list is ranked so that units containing only the base
units of that system will appear first.
"""
bases = set(system.bases)
def score(compose):
# In case that compose._bases has no elements we return
# 'np.inf' as 'score value'. It does not really matter which
# number we would return. This case occurs for instance for
# dimensionless quantities:
compose_bases = compose.bases
if len(compose_bases) == 0:
return np.inf
else:
sum = 0
for base in compose_bases:
if base in bases:
sum += 1
return sum / float(len(compose_bases))
x = self.decompose(bases=bases)
composed = x.compose(units=system)
composed = sorted(composed, key=score, reverse=True)
return composed
@lazyproperty
def si(self):
"""
Returns a copy of the current `Unit` instance in SI units.
"""
from . import si
return self.to_system(si)[0]
@lazyproperty
def cgs(self):
"""
Returns a copy of the current `Unit` instance with CGS units.
"""
from . import cgs
return self.to_system(cgs)[0]
@property
def physical_type(self):
"""
Physical type(s) dimensionally compatible with the unit.
Returns
-------
`~astropy.units.physical.PhysicalType`
A representation of the physical type(s) of a unit.
Examples
--------
>>> from astropy import units as u
>>> u.m.physical_type
PhysicalType('length')
>>> (u.m ** 2 / u.s).physical_type
PhysicalType({'diffusivity', 'kinematic viscosity'})
Physical types can be compared to other physical types
(recommended in packages) or to strings.
>>> area = (u.m ** 2).physical_type
>>> area == u.m.physical_type ** 2
True
>>> area == "area"
True
`~astropy.units.physical.PhysicalType` objects can be used for
dimensional analysis.
>>> number_density = u.m.physical_type ** -3
>>> velocity = (u.m / u.s).physical_type
>>> number_density * velocity
PhysicalType('particle flux')
"""
from . import physical
return physical.get_physical_type(self)
def _get_units_with_same_physical_type(self, equivalencies=[]):
"""
Return a list of registered units with the same physical type
as this unit.
This function is used by Quantity to add its built-in
conversions to equivalent units.
This is a private method, since end users should be encouraged
to use the more powerful `compose` and `find_equivalent_units`
methods (which use this under the hood).
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also pull options from.
See :ref:`astropy:unit_equivalencies`. It must already be
normalized using `_normalize_equivalencies`.
"""
unit_registry = get_current_unit_registry()
units = set(unit_registry.get_units_with_physical_type(self))
for funit, tunit, a, b in equivalencies:
if tunit is not None:
if self.is_equivalent(funit) and tunit not in units:
units.update(
unit_registry.get_units_with_physical_type(tunit))
if self._is_equivalent(tunit) and funit not in units:
units.update(
unit_registry.get_units_with_physical_type(funit))
else:
if self.is_equivalent(funit):
units.add(dimensionless_unscaled)
return units
class EquivalentUnitsList(list):
"""
A class to handle pretty-printing the result of
`find_equivalent_units`.
"""
HEADING_NAMES = ('Primary name', 'Unit definition', 'Aliases')
ROW_LEN = 3 # len(HEADING_NAMES), but hard-code since it is constant
NO_EQUIV_UNITS_MSG = 'There are no equivalent units'
def __repr__(self):
if len(self) == 0:
return self.NO_EQUIV_UNITS_MSG
else:
lines = self._process_equivalent_units(self)
lines.insert(0, self.HEADING_NAMES)
widths = [0] * self.ROW_LEN
for line in lines:
for i, col in enumerate(line):
widths[i] = max(widths[i], len(col))
f = " {{0:<{0}s}} | {{1:<{1}s}} | {{2:<{2}s}}".format(*widths)
lines = [f.format(*line) for line in lines]
lines = (lines[0:1] +
['['] +
[f'{x} ,' for x in lines[1:]] +
[']'])
return '\n'.join(lines)
def _repr_html_(self):
"""
Outputs a HTML table representation within Jupyter notebooks.
"""
if len(self) == 0:
return f"<p>{self.NO_EQUIV_UNITS_MSG}</p>"
else:
# HTML tags to use to compose the table in HTML
blank_table = '<table style="width:50%">{}</table>'
blank_row_container = "<tr>{}</tr>"
heading_row_content = "<th>{}</th>" * self.ROW_LEN
data_row_content = "<td>{}</td>" * self.ROW_LEN
# The HTML will be rendered & the table is simple, so don't
# bother to include newlines & indentation for the HTML code.
heading_row = blank_row_container.format(
heading_row_content.format(*self.HEADING_NAMES))
data_rows = self._process_equivalent_units(self)
all_rows = heading_row
for row in data_rows:
html_row = blank_row_container.format(
data_row_content.format(*row))
all_rows += html_row
return blank_table.format(all_rows)
@staticmethod
def _process_equivalent_units(equiv_units_data):
"""
Extract attributes, and sort, the equivalent units pre-formatting.
"""
processed_equiv_units = []
for u in equiv_units_data:
irred = u.decompose().to_string()
if irred == u.name:
irred = 'irreducible'
processed_equiv_units.append(
(u.name, irred, ', '.join(u.aliases)))
processed_equiv_units.sort()
return processed_equiv_units
def find_equivalent_units(self, equivalencies=[], units=None,
include_prefix_units=False):
"""
Return a list of all the units that are the same type as ``self``.
Parameters
----------
equivalencies : list of tuple
A list of equivalence pairs to also list. See
:ref:`astropy:unit_equivalencies`.
Any list given, including an empty one, supersedes global defaults
that may be in effect (as set by `set_enabled_equivalencies`)
units : set of `~astropy.units.Unit`, optional
If not provided, all defined units will be searched for
equivalencies. Otherwise, may be a dict, module or
sequence containing the units to search for equivalencies.
include_prefix_units : bool, optional
When `True`, include prefixed units in the result.
Default is `False`.
Returns
-------
units : list of `UnitBase`
A list of unit objects that match ``u``. A subclass of
`list` (``EquivalentUnitsList``) is returned that
pretty-prints the list of units when output.
"""
results = self.compose(
equivalencies=equivalencies, units=units, max_depth=1,
include_prefix_units=include_prefix_units)
results = {x.bases[0] for x in results if len(x.bases) == 1}
return self.EquivalentUnitsList(results)
def is_unity(self):
"""
Returns `True` if the unit is unscaled and dimensionless.
"""
return False
class NamedUnit(UnitBase):
"""
The base class of units that have a name.
Parameters
----------
st : str, list of str, 2-tuple
The name of the unit. If a list of strings, the first element
is the canonical (short) name, and the rest of the elements
are aliases. If a tuple of lists, the first element is a list
of short names, and the second element is a list of long
names; all but the first short name are considered "aliases".
Each name *should* be a valid Python identifier to make it
easy to access, but this is not required.
namespace : dict, optional
When provided, inject the unit, and all of its aliases, in the
given namespace dictionary. If a unit by the same name is
already in the namespace, a ValueError is raised.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, doc=None, format=None, namespace=None):
UnitBase.__init__(self)
if isinstance(st, (bytes, str)):
self._names = [st]
self._short_names = [st]
self._long_names = []
elif isinstance(st, tuple):
if not len(st) == 2:
raise ValueError("st must be string, list or 2-tuple")
self._names = st[0] + [n for n in st[1] if n not in st[0]]
if not len(self._names):
raise ValueError("must provide at least one name")
self._short_names = st[0][:]
self._long_names = st[1][:]
else:
if len(st) == 0:
raise ValueError(
"st list must have at least one entry")
self._names = st[:]
self._short_names = [st[0]]
self._long_names = st[1:]
if format is None:
format = {}
self._format = format
if doc is None:
doc = self._generate_doc()
else:
doc = textwrap.dedent(doc)
doc = textwrap.fill(doc)
self.__doc__ = doc
self._inject(namespace)
def _generate_doc(self):
"""
Generate a docstring for the unit if the user didn't supply
one. This is only used from the constructor and may be
overridden in subclasses.
"""
names = self.names
if len(self.names) > 1:
return "{1} ({0})".format(*names[:2])
else:
return names[0]
def get_format_name(self, format):
"""
Get a name for this unit that is specific to a particular
format.
Uses the dictionary passed into the `format` kwarg in the
constructor.
Parameters
----------
format : str
The name of the format
Returns
-------
name : str
The name of the unit for the given format.
"""
return self._format.get(format, self.name)
@property
def names(self):
"""
Returns all of the names associated with this unit.
"""
return self._names
@property
def name(self):
"""
Returns the canonical (short) name associated with this unit.
"""
return self._names[0]
@property
def aliases(self):
"""
Returns the alias (long) names for this unit.
"""
return self._names[1:]
@property
def short_names(self):
"""
Returns all of the short names associated with this unit.
"""
return self._short_names
@property
def long_names(self):
"""
Returns all of the long names associated with this unit.
"""
return self._long_names
def _inject(self, namespace=None):
"""
Injects the unit, and all of its aliases, in the given
namespace dictionary.
"""
if namespace is None:
return
# Loop through all of the names first, to ensure all of them
# are new, then add them all as a single "transaction" below.
for name in self._names:
if name in namespace and self != namespace[name]:
raise ValueError(
"Object with name {!r} already exists in "
"given namespace ({!r}).".format(
name, namespace[name]))
for name in self._names:
namespace[name] = self
def _recreate_irreducible_unit(cls, names, registered):
"""
This is used to reconstruct units when passed around by
multiprocessing.
"""
registry = get_current_unit_registry().registry
if names[0] in registry:
# If in local registry return that object.
return registry[names[0]]
else:
# otherwise, recreate the unit.
unit = cls(names)
if registered:
# If not in local registry but registered in origin registry,
# enable unit in local registry.
get_current_unit_registry().add_enabled_units([unit])
return unit
class IrreducibleUnit(NamedUnit):
"""
Irreducible units are the units that all other units are defined
in terms of.
Examples are meters, seconds, kilograms, amperes, etc. There is
only once instance of such a unit per type.
"""
def __reduce__(self):
# When IrreducibleUnit objects are passed to other processes
# over multiprocessing, they need to be recreated to be the
# ones already in the subprocesses' namespace, not new
# objects, or they will be considered "unconvertible".
# Therefore, we have a custom pickler/unpickler that
# understands how to recreate the Unit on the other side.
registry = get_current_unit_registry().registry
return (_recreate_irreducible_unit,
(self.__class__, list(self.names), self.name in registry),
self.__getstate__())
@property
def represents(self):
"""The unit that this named unit represents.
For an irreducible unit, that is always itself.
"""
return self
def decompose(self, bases=set()):
if len(bases) and self not in bases:
for base in bases:
try:
scale = self._to(base)
except UnitsError:
pass
else:
if is_effectively_unity(scale):
return base
else:
return CompositeUnit(scale, [base], [1],
_error_check=False)
raise UnitConversionError(
f"Unit {self} can not be decomposed into the requested bases")
return self
class UnrecognizedUnit(IrreducibleUnit):
"""
A unit that did not parse correctly. This allows for
round-tripping it as a string, but no unit operations actually work
on it.
Parameters
----------
st : str
The name of the unit.
"""
# For UnrecognizedUnits, we want to use "standard" Python
# pickling, not the special case that is used for
# IrreducibleUnits.
__reduce__ = object.__reduce__
def __repr__(self):
return f"UnrecognizedUnit({str(self)})"
def __bytes__(self):
return self.name.encode('ascii', 'replace')
def __str__(self):
return self.name
def to_string(self, format=None):
return self.name
def _unrecognized_operator(self, *args, **kwargs):
raise ValueError(
"The unit {!r} is unrecognized, so all arithmetic operations "
"with it are invalid.".format(self.name))
__pow__ = __truediv__ = __rtruediv__ = __mul__ = __rmul__ = __lt__ = \
__gt__ = __le__ = __ge__ = __neg__ = _unrecognized_operator
def __eq__(self, other):
try:
other = Unit(other, parse_strict='silent')
except (ValueError, UnitsError, TypeError):
return NotImplemented
return isinstance(other, type(self)) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def is_equivalent(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
return self == other
def _get_converter(self, other, equivalencies=None):
self._normalize_equivalencies(equivalencies)
raise ValueError(
"The unit {!r} is unrecognized. It can not be converted "
"to other units.".format(self.name))
def get_format_name(self, format):
return self.name
def is_unity(self):
return False
class _UnitMetaClass(type):
"""
This metaclass exists because the Unit constructor should
sometimes return instances that already exist. This "overrides"
the constructor before the new instance is actually created, so we
can return an existing one.
"""
def __call__(self, s="", represents=None, format=None, namespace=None,
doc=None, parse_strict='raise'):
# Short-circuit if we're already a unit
if hasattr(s, '_get_physical_type_id'):
return s
# turn possible Quantity input for s or represents into a Unit
from .quantity import Quantity
if isinstance(represents, Quantity):
if is_effectively_unity(represents.value):
represents = represents.unit
else:
represents = CompositeUnit(represents.value *
represents.unit.scale,
bases=represents.unit.bases,
powers=represents.unit.powers,
_error_check=False)
if isinstance(s, Quantity):
if is_effectively_unity(s.value):
s = s.unit
else:
s = CompositeUnit(s.value * s.unit.scale,
bases=s.unit.bases,
powers=s.unit.powers,
_error_check=False)
# now decide what we really need to do; define derived Unit?
if isinstance(represents, UnitBase):
# This has the effect of calling the real __new__ and
# __init__ on the Unit class.
return super().__call__(
s, represents, format=format, namespace=namespace, doc=doc)
# or interpret a Quantity (now became unit), string or number?
if isinstance(s, UnitBase):
return s
elif isinstance(s, (bytes, str)):
if len(s.strip()) == 0:
# Return the NULL unit
return dimensionless_unscaled
if format is None:
format = unit_format.Generic
f = unit_format.get_format(format)
if isinstance(s, bytes):
s = s.decode('ascii')
try:
return f.parse(s)
except NotImplementedError:
raise
except Exception as e:
if parse_strict == 'silent':
pass
else:
# Deliberately not issubclass here. Subclasses
# should use their name.
if f is not unit_format.Generic:
format_clause = f.name + ' '
else:
format_clause = ''
msg = ("'{}' did not parse as {}unit: {} "
"If this is meant to be a custom unit, "
"define it with 'u.def_unit'. To have it "
"recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. "
"For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html"
.format(s, format_clause, str(e)))
if parse_strict == 'raise':
raise ValueError(msg)
elif parse_strict == 'warn':
warnings.warn(msg, UnitsWarning)
else:
raise ValueError("'parse_strict' must be 'warn', "
"'raise' or 'silent'")
return UnrecognizedUnit(s)
elif isinstance(s, (int, float, np.floating, np.integer)):
return CompositeUnit(s, [], [], _error_check=False)
elif isinstance(s, tuple):
from .structured import StructuredUnit
return StructuredUnit(s)
elif s is None:
raise TypeError("None is not a valid Unit")
else:
raise TypeError(f"{s} can not be converted to a Unit")
class Unit(NamedUnit, metaclass=_UnitMetaClass):
"""
The main unit class.
There are a number of different ways to construct a Unit, but
always returns a `UnitBase` instance. If the arguments refer to
an already-existing unit, that existing unit instance is returned,
rather than a new one.
- From a string::
Unit(s, format=None, parse_strict='silent')
Construct from a string representing a (possibly compound) unit.
The optional `format` keyword argument specifies the format the
string is in, by default ``"generic"``. For a description of
the available formats, see `astropy.units.format`.
The optional ``parse_strict`` keyword controls what happens when an
unrecognized unit string is passed in. It may be one of the following:
- ``'raise'``: (default) raise a ValueError exception.
- ``'warn'``: emit a Warning, and return an
`UnrecognizedUnit` instance.
- ``'silent'``: return an `UnrecognizedUnit` instance.
- From a number::
Unit(number)
Creates a dimensionless unit.
- From a `UnitBase` instance::
Unit(unit)
Returns the given unit unchanged.
- From no arguments::
Unit()
Returns the dimensionless unit.
- The last form, which creates a new `Unit` is described in detail
below.
See also: https://docs.astropy.org/en/stable/units/
Parameters
----------
st : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance
The unit that this named unit represents.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to have it
displayed as ``\\Omega`` by the ``latex`` formatter. In that
case, `format` argument should be set to::
{'latex': r'\\Omega'}
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace.
Raises
------
ValueError
If any of the given unit names are already in the registry.
ValueError
If any of the given unit names are not valid Python tokens.
"""
def __init__(self, st, represents=None, doc=None,
format=None, namespace=None):
represents = Unit(represents)
self._represents = represents
NamedUnit.__init__(self, st, namespace=namespace, doc=doc,
format=format)
@property
def represents(self):
"""The unit that this named unit represents."""
return self._represents
def decompose(self, bases=set()):
return self._represents.decompose(bases=bases)
def is_unity(self):
return self._represents.is_unity()
def __hash__(self):
if self._hash is None:
self._hash = hash((self.name, self._represents))
return self._hash
@classmethod
def _from_physical_type_id(cls, physical_type_id):
# get string bases and powers from the ID tuple
bases = [cls(base) for base, _ in physical_type_id]
powers = [power for _, power in physical_type_id]
if len(physical_type_id) == 1 and powers[0] == 1:
unit = bases[0]
else:
unit = CompositeUnit(1, bases, powers,
_error_check=False)
return unit
class PrefixUnit(Unit):
"""
A unit that is simply a SI-prefixed version of another unit.
For example, ``mm`` is a `PrefixUnit` of ``.001 * m``.
The constructor is the same as for `Unit`.
"""
class CompositeUnit(UnitBase):
"""
Create a composite unit using expressions of previously defined
units.
Direct use of this class is not recommended. Instead use the
factory function `Unit` and arithmetic operators to compose
units.
Parameters
----------
scale : number
A scaling factor for the unit.
bases : sequence of `UnitBase`
A sequence of units this unit is composed of.
powers : sequence of numbers
A sequence of powers (in parallel with ``bases``) for each
of the base units.
"""
_decomposed_cache = None
def __init__(self, scale, bases, powers, decompose=False,
decompose_bases=set(), _error_check=True):
# There are many cases internal to astropy.units where we
# already know that all the bases are Unit objects, and the
# powers have been validated. In those cases, we can skip the
# error checking for performance reasons. When the private
# kwarg `_error_check` is False, the error checking is turned
# off.
if _error_check:
for base in bases:
if not isinstance(base, UnitBase):
raise TypeError(
"bases must be sequence of UnitBase instances")
powers = [validate_power(p) for p in powers]
if not decompose and len(bases) == 1 and powers[0] >= 0:
# Short-cut; with one unit there's nothing to expand and gather,
# as that has happened already when creating the unit. But do only
# positive powers, since for negative powers we need to re-sort.
unit = bases[0]
power = powers[0]
if power == 1:
scale *= unit.scale
self._bases = unit.bases
self._powers = unit.powers
elif power == 0:
self._bases = []
self._powers = []
else:
scale *= unit.scale ** power
self._bases = unit.bases
self._powers = [operator.mul(*resolve_fractions(p, power))
for p in unit.powers]
self._scale = sanitize_scale(scale)
else:
# Regular case: use inputs as preliminary scale, bases, and powers,
# then "expand and gather" identical bases, sanitize the scale, &c.
self._scale = scale
self._bases = bases
self._powers = powers
self._expand_and_gather(decompose=decompose,
bases=decompose_bases)
def __repr__(self):
if len(self._bases):
return super().__repr__()
else:
if self._scale != 1.0:
return f'Unit(dimensionless with a scale of {self._scale})'
else:
return 'Unit(dimensionless)'
@property
def scale(self):
"""
Return the scale of the composite unit.
"""
return self._scale
@property
def bases(self):
"""
Return the bases of the composite unit.
"""
return self._bases
@property
def powers(self):
"""
Return the powers of the composite unit.
"""
return self._powers
def _expand_and_gather(self, decompose=False, bases=set()):
def add_unit(unit, power, scale):
if bases and unit not in bases:
for base in bases:
try:
scale *= unit._to(base) ** power
except UnitsError:
pass
else:
unit = base
break
if unit in new_parts:
a, b = resolve_fractions(new_parts[unit], power)
new_parts[unit] = a + b
else:
new_parts[unit] = power
return scale
new_parts = {}
scale = self._scale
for b, p in zip(self._bases, self._powers):
if decompose and b not in bases:
b = b.decompose(bases=bases)
if isinstance(b, CompositeUnit):
scale *= b._scale ** p
for b_sub, p_sub in zip(b._bases, b._powers):
a, b = resolve_fractions(p_sub, p)
scale = add_unit(b_sub, a * b, scale)
else:
scale = add_unit(b, p, scale)
new_parts = [x for x in new_parts.items() if x[1] != 0]
new_parts.sort(key=lambda x: (-x[1], getattr(x[0], 'name', '')))
self._bases = [x[0] for x in new_parts]
self._powers = [x[1] for x in new_parts]
self._scale = sanitize_scale(scale)
def __copy__(self):
"""
For compatibility with python copy module.
"""
return CompositeUnit(self._scale, self._bases[:], self._powers[:])
def decompose(self, bases=set()):
if len(bases) == 0 and self._decomposed_cache is not None:
return self._decomposed_cache
for base in self.bases:
if (not isinstance(base, IrreducibleUnit) or
(len(bases) and base not in bases)):
break
else:
if len(bases) == 0:
self._decomposed_cache = self
return self
x = CompositeUnit(self.scale, self.bases, self.powers, decompose=True,
decompose_bases=bases)
if len(bases) == 0:
self._decomposed_cache = x
return x
def is_unity(self):
unit = self.decompose()
return len(unit.bases) == 0 and unit.scale == 1.0
si_prefixes = [
(['Y'], ['yotta'], 1e24),
(['Z'], ['zetta'], 1e21),
(['E'], ['exa'], 1e18),
(['P'], ['peta'], 1e15),
(['T'], ['tera'], 1e12),
(['G'], ['giga'], 1e9),
(['M'], ['mega'], 1e6),
(['k'], ['kilo'], 1e3),
(['h'], ['hecto'], 1e2),
(['da'], ['deka', 'deca'], 1e1),
(['d'], ['deci'], 1e-1),
(['c'], ['centi'], 1e-2),
(['m'], ['milli'], 1e-3),
(['u'], ['micro'], 1e-6),
(['n'], ['nano'], 1e-9),
(['p'], ['pico'], 1e-12),
(['f'], ['femto'], 1e-15),
(['a'], ['atto'], 1e-18),
(['z'], ['zepto'], 1e-21),
(['y'], ['yocto'], 1e-24)
]
binary_prefixes = [
(['Ki'], ['kibi'], 2. ** 10),
(['Mi'], ['mebi'], 2. ** 20),
(['Gi'], ['gibi'], 2. ** 30),
(['Ti'], ['tebi'], 2. ** 40),
(['Pi'], ['pebi'], 2. ** 50),
(['Ei'], ['exbi'], 2. ** 60)
]
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False):
"""
Set up all of the standard metric prefixes for a unit. This
function should not be used directly, but instead use the
`prefixes` kwarg on `def_unit`.
Parameters
----------
excludes : list of str, optional
Any prefixes to exclude from creation to avoid namespace
collisions.
namespace : dict, optional
When provided, inject the unit (and all of its aliases) into
the given namespace dictionary.
prefixes : list, optional
When provided, it is a list of prefix definitions of the form:
(short_names, long_tables, factor)
"""
if prefixes is True:
prefixes = si_prefixes
elif prefixes is False:
prefixes = []
for short, full, factor in prefixes:
names = []
format = {}
for prefix in short:
if prefix in excludes:
continue
for alias in u.short_names:
names.append(prefix + alias)
# This is a hack to use Greek mu as a prefix
# for some formatters.
if prefix == 'u':
format['latex'] = r'\mu ' + u.get_format_name('latex')
format['unicode'] = '\N{MICRO SIGN}' + u.get_format_name('unicode')
for key, val in u._format.items():
format.setdefault(key, prefix + val)
for prefix in full:
if prefix in excludes:
continue
for alias in u.long_names:
names.append(prefix + alias)
if len(names):
PrefixUnit(names, CompositeUnit(factor, [u], [1],
_error_check=False),
namespace=namespace, format=format)
def def_unit(s, represents=None, doc=None, format=None, prefixes=False,
exclude_prefixes=[], namespace=None):
"""
Factory function for defining new units.
Parameters
----------
s : str or list of str
The name of the unit. If a list, the first element is the
canonical (short) name, and the rest of the elements are
aliases.
represents : UnitBase instance, optional
The unit that this named unit represents. If not provided,
a new `IrreducibleUnit` is created.
doc : str, optional
A docstring describing the unit.
format : dict, optional
A mapping to format-specific representations of this unit.
For example, for the ``Ohm`` unit, it might be nice to
have it displayed as ``\\Omega`` by the ``latex``
formatter. In that case, `format` argument should be set
to::
{'latex': r'\\Omega'}
prefixes : bool or list, optional
When `True`, generate all of the SI prefixed versions of the
unit as well. For example, for a given unit ``m``, will
generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of
prefix definitions of the form:
(short_names, long_tables, factor)
Default is `False`. This function always returns the base
unit object, even if multiple scaled versions of the unit were
created.
exclude_prefixes : list of str, optional
If any of the SI prefixes need to be excluded, they may be
listed here. For example, ``Pa`` can be interpreted either as
"petaannum" or "Pascal". Therefore, when defining the
prefixes for ``a``, ``exclude_prefixes`` should be set to
``["P"]``.
namespace : dict, optional
When provided, inject the unit (and all of its aliases and
prefixes), into the given namespace dictionary.
Returns
-------
unit : `~astropy.units.UnitBase`
The newly-defined unit, or a matching unit that was already
defined.
"""
if represents is not None:
result = Unit(s, represents, namespace=namespace, doc=doc,
format=format)
else:
result = IrreducibleUnit(
s, namespace=namespace, doc=doc, format=format)
if prefixes:
_add_prefixes(result, excludes=exclude_prefixes, namespace=namespace,
prefixes=prefixes)
return result
def _condition_arg(value):
"""
Validate value is acceptable for conversion purposes.
Will convert into an array if not a scalar, and can be converted
into an array
Parameters
----------
value : int or float value, or sequence of such values
Returns
-------
Scalar value or numpy array
Raises
------
ValueError
If value is not as expected
"""
if isinstance(value, (np.ndarray, float, int, complex, np.void)):
return value
avalue = np.array(value)
if avalue.dtype.kind not in ['i', 'f', 'c']:
raise ValueError("Value not scalar compatible or convertible to "
"an int, float, or complex array")
return avalue
def unit_scale_converter(val):
"""Function that just multiplies the value by unity.
This is a separate function so it can be recognized and
discarded in unit conversion.
"""
return 1. * _condition_arg(val)
dimensionless_unscaled = CompositeUnit(1, [], [], _error_check=False)
# Abbreviation of the above, see #1980
one = dimensionless_unscaled
# Maintain error in old location for backward compatibility
# TODO: Is this still needed? Should there be a deprecation warning?
unit_format.fits.UnitScaleError = UnitScaleError
|
af18797ee2348ae8e489114f96680e652ebd1cdbfe04a46c7471e8091a9532a8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains the fundamental classes used for representing
coordinates in astropy.
"""
import warnings
from collections import namedtuple
import numpy as np
from . import angle_formats as form
from astropy import units as u
from astropy.utils import isiterable
__all__ = ['Angle', 'Latitude', 'Longitude']
# these are used by the `hms` and `dms` attributes
hms_tuple = namedtuple('hms_tuple', ('h', 'm', 's'))
dms_tuple = namedtuple('dms_tuple', ('d', 'm', 's'))
signed_dms_tuple = namedtuple('signed_dms_tuple', ('sign', 'd', 'm', 's'))
class Angle(u.SpecificTypeQuantity):
"""
One or more angular value(s) with units equivalent to radians or degrees.
An angle can be specified either as an array, scalar, tuple (see
below), string, `~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports a variety of formats.
The examples below illustrate common ways of initializing an `Angle`
object. First some imports::
>>> from astropy.coordinates import Angle
>>> from astropy import units as u
The angle values can now be provided::
>>> Angle('10.2345d')
<Angle 10.2345 deg>
>>> Angle(['10.2345d', '-20d'])
<Angle [ 10.2345, -20. ] deg>
>>> Angle('1:2:30.43 degrees')
<Angle 1.04178611 deg>
>>> Angle('1 2 0 hours')
<Angle 1.03333333 hourangle>
>>> Angle(np.arange(1, 8), unit=u.deg)
<Angle [1., 2., 3., 4., 5., 6., 7.] deg>
>>> Angle('1°2′3″')
<Angle 1.03416667 deg>
>>> Angle('1°2′3″N')
<Angle 1.03416667 deg>
>>> Angle('1d2m3.4s')
<Angle 1.03427778 deg>
>>> Angle('1d2m3.4sS')
<Angle -1.03427778 deg>
>>> Angle('-1h2m3s')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2m3sE')
<Angle -1.03416667 hourangle>
>>> Angle('-1h2.5m')
<Angle -1.04166667 hourangle>
>>> Angle('-1h2.5mW')
<Angle 1.04166667 hourangle>
>>> Angle('-1:2.5', unit=u.deg)
<Angle -1.04166667 deg>
>>> Angle(10.2345 * u.deg)
<Angle 10.2345 deg>
>>> Angle(Angle(10.2345 * u.deg))
<Angle 10.2345 deg>
Parameters
----------
angle : `~numpy.array`, scalar, `~astropy.units.Quantity`, :class:`~astropy.coordinates.Angle`
The angle value. If a tuple, will be interpreted as ``(h, m,
s)`` or ``(d, m, s)`` depending on ``unit``. If a string, it
will be interpreted following the rules described above.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
dtype : `~numpy.dtype`, optional
See `~astropy.units.Quantity`.
copy : bool, optional
See `~astropy.units.Quantity`.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
"""
_equivalent_unit = u.radian
_include_easy_conversion_members = True
def __new__(cls, angle, unit=None, dtype=np.inexact, copy=True, **kwargs):
if not isinstance(angle, u.Quantity):
if unit is not None:
unit = cls._convert_unit_to_angle_unit(u.Unit(unit))
if isinstance(angle, tuple):
angle = cls._tuple_to_float(angle, unit)
elif isinstance(angle, str):
angle, angle_unit = form.parse_angle(angle, unit)
if angle_unit is None:
angle_unit = unit
if isinstance(angle, tuple):
if angle_unit == u.hourangle:
form._check_hour_range(angle[0])
form._check_minute_range(angle[1])
a = np.abs(angle[0]) + angle[1] / 60.
if len(angle) == 3:
form._check_second_range(angle[2])
a += angle[2] / 3600.
angle = np.copysign(a, angle[0])
if angle_unit is not unit:
# Possible conversion to `unit` will be done below.
angle = u.Quantity(angle, angle_unit, copy=False)
elif (isiterable(angle) and
not (isinstance(angle, np.ndarray) and
angle.dtype.kind not in 'SUVO')):
angle = [Angle(x, unit, copy=False) for x in angle]
return super().__new__(cls, angle, unit, dtype=dtype, copy=copy,
**kwargs)
@staticmethod
def _tuple_to_float(angle, unit):
"""
Converts an angle represented as a 3-tuple or 2-tuple into a floating
point number in the given unit.
"""
# TODO: Numpy array of tuples?
if unit == u.hourangle:
return form.hms_to_hours(*angle)
elif unit == u.degree:
return form.dms_to_degrees(*angle)
else:
raise u.UnitsError(f"Can not parse '{angle}' as unit '{unit}'")
@staticmethod
def _convert_unit_to_angle_unit(unit):
return u.hourangle if unit is u.hour else unit
def _set_unit(self, unit):
super()._set_unit(self._convert_unit_to_angle_unit(unit))
@property
def hour(self):
"""
The angle's value in hours (read-only property).
"""
return self.hourangle
@property
def hms(self):
"""
The angle's value in hours, as a named tuple with ``(h, m, s)``
members. (This is a read-only property.)
"""
return hms_tuple(*form.hours_to_hms(self.hourangle))
@property
def dms(self):
"""
The angle's value in degrees, as a named tuple with ``(d, m, s)``
members. (This is a read-only property.)
"""
return dms_tuple(*form.degrees_to_dms(self.degree))
@property
def signed_dms(self):
"""
The angle's value in degrees, as a named tuple with ``(sign, d, m, s)``
members. The ``d``, ``m``, ``s`` are thus always positive, and the sign of
the angle is given by ``sign``. (This is a read-only property.)
This is primarily intended for use with `dms` to generate string
representations of coordinates that are correct for negative angles.
"""
return signed_dms_tuple(np.sign(self.degree),
*form.degrees_to_dms(np.abs(self.degree)))
def to_string(self, unit=None, decimal=False, sep='fromunit',
precision=None, alwayssign=False, pad=False,
fields=3, format=None):
""" A string representation of the angle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. Must be an angular unit. If not
provided, the unit used to initialize the angle will be
used.
decimal : bool, optional
If `True`, a decimal representation will be used, otherwise
the returned string will be in sexagesimal form.
sep : str, optional
The separator between numbers in a sexagesimal
representation. E.g., if it is ':', the result is
``'12:41:11.1241'``. Also accepts 2 or 3 separators. E.g.,
``sep='hms'`` would give the result ``'12h41m11.1241s'``, or
sep='-:' would yield ``'11-21:17.124'``. Alternatively, the
special string 'fromunit' means 'dms' if the unit is
degrees, or 'hms' if the unit is hours.
precision : int, optional
The level of decimal precision. If ``decimal`` is `True`,
this is the raw precision, otherwise it gives the
precision of the last place of the sexagesimal
representation (seconds). If `None`, or not provided, the
number of decimal places is determined by the value, and
will be between 0-8 decimal places as required.
alwayssign : bool, optional
If `True`, include the sign no matter what. If `False`,
only include the sign if it is negative.
pad : bool, optional
If `True`, include leading zeros when needed to ensure a
fixed number of characters for sexagesimal representation.
fields : int, optional
Specifies the number of fields to display when outputting
sexagesimal notation. For example:
- fields == 1: ``'5d'``
- fields == 2: ``'5d45m'``
- fields == 3: ``'5d45m32.5s'``
By default, all fields are displayed.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
- 'latex_inline': Return a LaTeX-formatted string which is the
same as with ``format='latex'`` for |Angle| instances
- 'unicode': Return a string containing non-ASCII unicode
characters, such as the degree symbol
Returns
-------
strrepr : str or array
A string representation of the angle. If the angle is an array, this
will be an array with a unicode dtype.
"""
if unit is None:
unit = self.unit
else:
unit = self._convert_unit_to_angle_unit(u.Unit(unit))
separators = {
None: {
u.degree: 'dms',
u.hourangle: 'hms'},
'latex': {
u.degree: [r'^\circ', r'{}^\prime', r'{}^{\prime\prime}'],
u.hourangle: [r'^{\mathrm{h}}', r'^{\mathrm{m}}', r'^{\mathrm{s}}']},
'unicode': {
u.degree: '°′″',
u.hourangle: 'ʰᵐˢ'}
}
# 'latex_inline' provides no functionality beyond what 'latex' offers,
# but it should be implemented to avoid ValueErrors in user code.
separators['latex_inline'] = separators['latex']
if sep == 'fromunit':
if format not in separators:
raise ValueError(f"Unknown format '{format}'")
seps = separators[format]
if unit in seps:
sep = seps[unit]
# Create an iterator so we can format each element of what
# might be an array.
if unit is u.degree:
if decimal:
values = self.degree
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{:g}'.format
else:
if sep == 'fromunit':
sep = 'dms'
values = self.degree
func = lambda x: form.degrees_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit is u.hourangle:
if decimal:
values = self.hour
if precision is not None:
func = ("{0:0." + str(precision) + "f}").format
else:
func = '{:g}'.format
else:
if sep == 'fromunit':
sep = 'hms'
values = self.hour
func = lambda x: form.hours_to_string(
x, precision=precision, sep=sep, pad=pad,
fields=fields)
elif unit.is_equivalent(u.radian):
if decimal:
values = self.to_value(unit)
if precision is not None:
func = ("{0:1." + str(precision) + "f}").format
else:
func = "{:g}".format
elif sep == 'fromunit':
values = self.to_value(unit)
unit_string = unit.to_string(format=format)
if format == 'latex' or format == 'latex_inline':
unit_string = unit_string[1:-1]
if precision is not None:
def plain_unit_format(val):
return ("{0:0." + str(precision) + "f}{1}").format(
val, unit_string)
func = plain_unit_format
else:
def plain_unit_format(val):
return f"{val:g}{unit_string}"
func = plain_unit_format
else:
raise ValueError(
f"'{unit.name}' can not be represented in sexagesimal notation")
else:
raise u.UnitsError(
"The unit value provided is not an angular unit.")
def do_format(val):
# Check if value is not nan to avoid ValueErrors when turning it into
# a hexagesimal string.
if not np.isnan(val):
s = func(float(val))
if alwayssign and not s.startswith('-'):
s = '+' + s
if format == 'latex' or format == 'latex_inline':
s = f'${s}$'
return s
s = f"{val}"
return s
format_ufunc = np.vectorize(do_format, otypes=['U'])
result = format_ufunc(values)
if result.ndim == 0:
result = result[()]
return result
def _wrap_at(self, wrap_angle):
"""
Implementation that assumes ``angle`` is already validated
and that wrapping is inplace.
"""
# Convert the wrap angle and 360 degrees to the native unit of
# this Angle, then do all the math on raw Numpy arrays rather
# than Quantity objects for speed.
a360 = u.degree.to(self.unit, 360.0)
wrap_angle = wrap_angle.to_value(self.unit)
wrap_angle_floor = wrap_angle - a360
self_angle = self.view(np.ndarray)
# Do the wrapping, but only if any angles need to be wrapped
#
# This invalid catch block is needed both for the floor division
# and for the comparisons later on (latter not really needed
# any more for >= 1.19 (NUMPY_LT_1_19), but former is).
with np.errstate(invalid='ignore'):
wraps = (self_angle - wrap_angle_floor) // a360
valid = np.isfinite(wraps) & (wraps != 0)
if np.any(valid):
self_angle -= wraps * a360
# Rounding errors can cause problems.
self_angle[self_angle >= wrap_angle] -= a360
self_angle[self_angle < wrap_angle_floor] += a360
def wrap_at(self, wrap_angle, inplace=False):
"""
Wrap the `~astropy.coordinates.Angle` object at the given ``wrap_angle``.
This method forces all the angle values to be within a contiguous
360 degree range so that ``wrap_angle - 360d <= angle <
wrap_angle``. By default a new Angle object is returned, but if the
``inplace`` argument is `True` then the `~astropy.coordinates.Angle`
object is wrapped in place and nothing is returned.
For instance::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20.0, 150.0, 350.0] * u.deg)
>>> a.wrap_at(360 * u.deg).degree # Wrap into range 0 to 360 degrees # doctest: +FLOAT_CMP
array([340., 150., 350.])
>>> a.wrap_at('180d', inplace=True) # Wrap into range -180 to 180 degrees # doctest: +FLOAT_CMP
>>> a.degree # doctest: +FLOAT_CMP
array([-20., 150., -10.])
Parameters
----------
wrap_angle : angle-like
Specifies a single value for the wrap angle. This can be any
object that can initialize an `~astropy.coordinates.Angle` object,
e.g. ``'180d'``, ``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
inplace : bool
If `True` then wrap the object in place instead of returning
a new `~astropy.coordinates.Angle`
Returns
-------
out : Angle or None
If ``inplace is False`` (default), return new
`~astropy.coordinates.Angle` object with angles wrapped accordingly.
Otherwise wrap in place and return `None`.
"""
wrap_angle = Angle(wrap_angle, copy=False) # Convert to an Angle
if not inplace:
self = self.copy()
self._wrap_at(wrap_angle)
return None if inplace else self
def is_within_bounds(self, lower=None, upper=None):
"""
Check if all angle(s) satisfy ``lower <= angle < upper``
If ``lower`` is not specified (or `None`) then no lower bounds check is
performed. Likewise ``upper`` can be left unspecified. For example::
>>> from astropy.coordinates import Angle
>>> import astropy.units as u
>>> a = Angle([-20, 150, 350] * u.deg)
>>> a.is_within_bounds('0d', '360d')
False
>>> a.is_within_bounds(None, '360d')
True
>>> a.is_within_bounds(-30 * u.deg, None)
True
Parameters
----------
lower : angle-like or None
Specifies lower bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
upper : angle-like or None
Specifies upper bound for checking. This can be any object
that can initialize an `~astropy.coordinates.Angle` object, e.g. ``'180d'``,
``180 * u.deg``, or ``Angle(180, unit=u.deg)``.
Returns
-------
is_within_bounds : bool
`True` if all angles satisfy ``lower <= angle < upper``
"""
ok = True
if lower is not None:
ok &= np.all(Angle(lower) <= self)
if ok and upper is not None:
ok &= np.all(self < Angle(upper))
return bool(ok)
def _str_helper(self, format=None):
if self.isscalar:
return self.to_string(format=format)
def formatter(x):
return x.to_string(format=format)
return np.array2string(self, formatter={'all': formatter})
def __str__(self):
return self._str_helper()
def _repr_latex_(self):
return self._str_helper(format='latex')
def _no_angle_subclass(obj):
"""Return any Angle subclass objects as an Angle objects.
This is used to ensure that Latitude and Longitude change to Angle
objects when they are used in calculations (such as lon/2.)
"""
if isinstance(obj, tuple):
return tuple(_no_angle_subclass(_obj) for _obj in obj)
return obj.view(Angle) if isinstance(obj, (Latitude, Longitude)) else obj
class Latitude(Angle):
"""
Latitude-like angle(s) which must be in the range -90 to +90 deg.
A Latitude object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of being constrained
so that::
-90.0 * u.deg <= angle(s) <= +90.0 * u.deg
Any attempt to set a value outside that range will result in a
`ValueError`.
The input angle(s) can be specified either as an array, list,
scalar, tuple (see below), string,
:class:`~astropy.units.Quantity` or another
:class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : array, list, scalar, `~astropy.units.Quantity`, `~astropy.coordinates.Angle`
The angle value(s). If a tuple, will be interpreted as ``(h, m, s)``
or ``(d, m, s)`` depending on ``unit``. If a string, it will be
interpreted following the rules described for
:class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like, optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Longitude`.
"""
def __new__(cls, angle, unit=None, **kwargs):
# Forbid creating a Lat from a Long.
if isinstance(angle, Longitude):
raise TypeError("A Latitude angle cannot be created from a Longitude angle")
self = super().__new__(cls, angle, unit=unit, **kwargs)
self._validate_angles()
return self
def _validate_angles(self, angles=None):
"""Check that angles are between -90 and 90 degrees.
If not given, the check is done on the object itself"""
# Convert the lower and upper bounds to the "native" unit of
# this angle. This limits multiplication to two values,
# rather than the N values in `self.value`. Also, the
# comparison is performed on raw arrays, rather than Quantity
# objects, for speed.
if angles is None:
angles = self
if angles.unit is u.deg:
limit = 90
elif angles.unit is u.rad:
limit = 0.5 * np.pi
else:
limit = u.degree.to(angles.unit, 90.0)
# This invalid catch block can be removed when the minimum numpy
# version is >= 1.19 (NUMPY_LT_1_19)
with np.errstate(invalid='ignore'):
invalid_angles = (np.any(angles.value < -limit) or
np.any(angles.value > limit))
if invalid_angles:
raise ValueError('Latitude angle(s) must be within -90 deg <= angle <= 90 deg, '
'got {}'.format(angles.to(u.degree)))
def __setitem__(self, item, value):
# Forbid assigning a Long to a Lat.
if isinstance(value, Longitude):
raise TypeError("A Longitude angle cannot be assigned to a Latitude angle")
# first check bounds
if value is not np.ma.masked:
self._validate_angles(value)
super().__setitem__(item, value)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
class LongitudeInfo(u.QuantityInfo):
_represent_as_dict_attrs = u.QuantityInfo._represent_as_dict_attrs + ('wrap_angle',)
class Longitude(Angle):
"""
Longitude-like angle(s) which are wrapped within a contiguous 360 degree range.
A ``Longitude`` object is distinguished from a pure
:class:`~astropy.coordinates.Angle` by virtue of a ``wrap_angle``
property. The ``wrap_angle`` specifies that all angle values
represented by the object will be in the range::
wrap_angle - 360 * u.deg <= angle(s) < wrap_angle
The default ``wrap_angle`` is 360 deg. Setting ``wrap_angle=180 *
u.deg`` would instead result in values between -180 and +180 deg.
Setting the ``wrap_angle`` attribute of an existing ``Longitude``
object will result in re-wrapping the angle values in-place.
The input angle(s) can be specified either as an array, list,
scalar, tuple, string, :class:`~astropy.units.Quantity`
or another :class:`~astropy.coordinates.Angle`.
The input parser is flexible and supports all of the input formats
supported by :class:`~astropy.coordinates.Angle`.
Parameters
----------
angle : tuple or angle-like
The angle value(s). If a tuple, will be interpreted as ``(h, m s)`` or
``(d, m, s)`` depending on ``unit``. If a string, it will be interpreted
following the rules described for :class:`~astropy.coordinates.Angle`.
If ``angle`` is a sequence or array of strings, the resulting
values will be in the given ``unit``, or if `None` is provided,
the unit will be taken from the first given value.
unit : unit-like ['angle'], optional
The unit of the value specified for the angle. This may be
any string that `~astropy.units.Unit` understands, but it is
better to give an actual unit object. Must be an angular
unit.
wrap_angle : angle-like or None, optional
Angle at which to wrap back to ``wrap_angle - 360 deg``.
If ``None`` (default), it will be taken to be 360 deg unless ``angle``
has a ``wrap_angle`` attribute already (i.e., is a ``Longitude``),
in which case it will be taken from there.
Raises
------
`~astropy.units.UnitsError`
If a unit is not provided or it is not an angular unit.
`TypeError`
If the angle parameter is an instance of :class:`~astropy.coordinates.Latitude`.
"""
_wrap_angle = None
_default_wrap_angle = Angle(360 * u.deg)
info = LongitudeInfo()
def __new__(cls, angle, unit=None, wrap_angle=None, **kwargs):
# Forbid creating a Long from a Lat.
if isinstance(angle, Latitude):
raise TypeError("A Longitude angle cannot be created from "
"a Latitude angle.")
self = super().__new__(cls, angle, unit=unit, **kwargs)
if wrap_angle is None:
wrap_angle = getattr(angle, 'wrap_angle', self._default_wrap_angle)
self.wrap_angle = wrap_angle # angle-like b/c property setter
return self
def __setitem__(self, item, value):
# Forbid assigning a Lat to a Long.
if isinstance(value, Latitude):
raise TypeError("A Latitude angle cannot be assigned to a Longitude angle")
super().__setitem__(item, value)
self._wrap_at(self.wrap_angle)
@property
def wrap_angle(self):
return self._wrap_angle
@wrap_angle.setter
def wrap_angle(self, value):
self._wrap_angle = Angle(value, copy=False)
self._wrap_at(self.wrap_angle)
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._wrap_angle = getattr(obj, '_wrap_angle',
self._default_wrap_angle)
# Any calculation should drop to Angle
def __array_ufunc__(self, *args, **kwargs):
results = super().__array_ufunc__(*args, **kwargs)
return _no_angle_subclass(results)
|
b0e7d3d4dd5b56d027e8aa18b048fdd82fd2a16738d3f8b4d5ba3ac467267c0b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value : object
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None) # None if instance (frame) has no data!
if instance_shape is not None and (getattr(out, 'shape', ()) and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {} should be scalar or have shape {}, "
"but is has shape {} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
f'Invalid time input {self.name}={value!r}.') from err
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out : object
The correctly-typed object.
converted : boolean
A boolean which indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : number or `~astropy.units.Quantity` or None, optional
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str, optional
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit-like or None, optional
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None, optional
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None,
shape=None):
if default is None and unit is None:
raise ValueError('Either a default quantity value must be '
'provided, or a unit must be provided to define a '
'QuantityAttribute.')
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled
and np.any(value != 0)):
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"')
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.coordinates import SkyCoord
if value is None:
return None, False
elif isinstance(value, SkyCoord) and isinstance(value.frame, self._frame):
return value.frame, True
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {}. Allowed '
'classes are: {}'
.format(value.__class__,
self.allowed_classes))
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
|
6c60f617d68d59a5c00e98827b1a706081322d51dbe055fe6b1a0f354c1142fe | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Cosmological units and equivalencies.
""" # (newline needed for unit summary)
import astropy.units as u
from astropy.units.utils import generate_unit_summary as _generate_unit_summary
__all__ = ["littleh", "redshift",
# redshift equivalencies
"dimensionless_redshift", "with_redshift",
"redshift_distance", "redshift_hubble", "redshift_temperature",
# other equivalencies
"with_H0"]
__doctest_requires__ = {('with_redshift', 'redshift_distance'): ['scipy']}
_ns = globals()
###############################################################################
# Cosmological Units
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit.
redshift = u.def_unit(['redshift'], prefixes=False, namespace=_ns,
doc="Cosmological redshift.", format={'latex': r''})
u.def_physical_type(redshift, "redshift")
# This is not formally a unit, but is used in that way in many contexts, and
# an appropriate equivalency is only possible if it's treated as a unit (see
# https://arxiv.org/pdf/1308.4150.pdf for more)
# Also note that h or h100 or h_100 would be a better name, but they either
# conflict or have numbers in them, which is disallowed
littleh = u.def_unit(['littleh'], namespace=_ns, prefixes=False,
doc='Reduced/"dimensionless" Hubble constant',
format={'latex': r'h_{100}'})
###############################################################################
# Equivalencies
def dimensionless_redshift():
"""Allow redshift to be 1-to-1 equivalent to dimensionless.
It is special compared to other equivalency pairs in that it
allows this independent of the power to which the redshift is raised,
and independent of whether it is part of a more complicated unit.
It is similar to u.dimensionless_angles() in this respect.
"""
return u.Equivalency([(redshift, None)], "dimensionless_redshift")
def redshift_distance(cosmology=None, kind="comoving", **atzkw):
"""Convert quantities between redshift and distance.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
kind : {'comoving', 'lookback', 'luminosity'} or None, optional
The distance type for the Equivalency.
Note this does NOT include the angular diameter distance as this
distance measure is not monotonic.
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.Mpc, cu.redshift_distance(WMAP9, kind="comoving")) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
allowed_kinds = ('comoving', 'lookback', 'luminosity')
if kind not in allowed_kinds:
raise ValueError(f"`kind` is not one of {allowed_kinds}")
method = getattr(cosmology, kind + "_distance")
def z_to_distance(z):
"""Redshift to distance."""
return method(z)
def distance_to_z(d):
"""Distance to redshift."""
return z_at_value(method, d << u.Mpc, **atzkw)
return u.Equivalency([(redshift, u.Mpc, z_to_distance, distance_to_z)],
"redshift_distance",
{'cosmology': cosmology, "distance": kind})
def redshift_hubble(cosmology=None, **atzkw):
"""Convert quantities between redshift and Hubble parameter and little-h.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and Hubble parameter and little-h unit.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> equivalency = cu.redshift_hubble(WMAP9) # construct equivalency
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_hubble(z):
"""Redshift to Hubble parameter."""
return cosmology.H(z)
def hubble_to_z(H):
"""Hubble parameter to redshift."""
return z_at_value(cosmology.H, H << (u.km / u.s / u.Mpc), **atzkw)
def z_to_littleh(z):
"""Redshift to :math:`h`-unit Quantity."""
return z_to_hubble(z).to_value(u.km / u.s / u.Mpc) / 100 * littleh
def littleh_to_z(h):
""":math:`h`-unit Quantity to redshift."""
return hubble_to_z(h * 100)
return u.Equivalency([(redshift, u.km / u.s / u.Mpc, z_to_hubble, hubble_to_z),
(redshift, littleh, z_to_littleh, littleh_to_z)],
"redshift_hubble",
{'cosmology': cosmology})
def redshift_temperature(cosmology=None, **atzkw):
"""Convert quantities between redshift and CMB temperature.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If None, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
**atzkw
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
Equivalency between redshift and temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> z = 1100 * cu.redshift
>>> z.to(u.K, cu.redshift_temperature(WMAP9))
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology, z_at_value
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
def z_to_Tcmb(z):
return cosmology.Tcmb(z)
def Tcmb_to_z(T):
return z_at_value(cosmology.Tcmb, T << u.K, **atzkw)
return u.Equivalency([(redshift, u.K, z_to_Tcmb, Tcmb_to_z)],
"redshift_temperature",
{'cosmology': cosmology})
def with_redshift(cosmology=None, *,
distance="comoving", hubble=True, Tcmb=True,
atzkw=None):
"""Convert quantities between measures of cosmological distance.
Note: by default all equivalencies are on and must be explicitly turned off.
Care should be taken to not misinterpret a relativistic, gravitational, etc
redshift as a cosmological one.
Parameters
----------
cosmology : `~astropy.cosmology.Cosmology`, str, or None, optional
A cosmology realization or built-in cosmology's name (e.g. 'Planck18').
If `None`, will use the default cosmology
(controlled by :class:`~astropy.cosmology.default_cosmology`).
distance : {'comoving', 'lookback', 'luminosity'} or None (optional, keyword-only)
The type of distance equivalency to create or `None`.
Default is 'comoving'.
hubble : bool (optional, keyword-only)
Whether to create a Hubble parameter <-> redshift equivalency, using
``Cosmology.H``. Default is `True`.
Tcmb : bool (optional, keyword-only)
Whether to create a CMB temperature <-> redshift equivalency, using
``Cosmology.Tcmb``. Default is `True`.
atzkw : dict or None (optional, keyword-only)
keyword arguments for :func:`~astropy.cosmology.z_at_value`
Returns
-------
`~astropy.units.equivalencies.Equivalency`
With equivalencies between redshift and distance / Hubble / temperature.
Examples
--------
>>> import astropy.units as u
>>> import astropy.cosmology.units as cu
>>> from astropy.cosmology import WMAP9
>>> equivalency = cu.with_redshift(WMAP9)
>>> z = 1100 * cu.redshift
Redshift to (comoving) distance:
>>> z.to(u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 14004.03157418 Mpc>
Redshift to the Hubble parameter:
>>> z.to(u.km / u.s / u.Mpc, equivalency) # doctest: +FLOAT_CMP
<Quantity 1565637.40154275 km / (Mpc s)>
>>> z.to(cu.littleh, equivalency) # doctest: +FLOAT_CMP
<Quantity 15656.37401543 littleh>
Redshift to CMB temperature:
>>> z.to(u.K, equivalency)
<Quantity 3000.225 K>
"""
from astropy.cosmology import default_cosmology
# get cosmology: None -> default and process str / class
cosmology = cosmology if cosmology is not None else default_cosmology.get()
with default_cosmology.set(cosmology): # if already cosmo, passes through
cosmology = default_cosmology.get()
atzkw = atzkw if atzkw is not None else {}
equivs = [] # will append as built
# Hubble <-> Redshift
if hubble:
equivs.extend(redshift_hubble(cosmology, **atzkw))
# CMB Temperature <-> Redshift
if Tcmb:
equivs.extend(redshift_temperature(cosmology, **atzkw))
# Distance <-> Redshift, but need to choose which distance
if distance is not None:
equivs.extend(redshift_distance(cosmology, kind=distance, **atzkw))
# -----------
return u.Equivalency(equivs, "with_redshift",
{'cosmology': cosmology,
'distance': distance, 'hubble': hubble, 'Tcmb': Tcmb})
# ===================================================================
def with_H0(H0=None):
"""
Convert between quantities with little-h and the equivalent physical units.
Parameters
----------
H0 : None or `~astropy.units.Quantity` ['frequency']
The value of the Hubble constant to assume. If a
`~astropy.units.Quantity`, will assume the quantity *is* ``H0``. If
`None` (default), use the ``H0`` attribute from
:mod:`~astropy.cosmology.default_cosmology`.
References
----------
For an illuminating discussion on why you may or may not want to use
little-h at all, see https://arxiv.org/pdf/1308.4150.pdf
"""
if H0 is None:
from .realizations import default_cosmology
H0 = default_cosmology.get().H0
h100_val_unit = u.Unit(100 / (H0.to_value((u.km / u.s) / u.Mpc)) * littleh)
return u.Equivalency([(h100_val_unit, None)], "with_H0", kwargs={"H0": H0})
# ===================================================================
# Enable the set of default equivalencies.
# If the cosmology package is imported, this is added to the list astropy-wide.
u.add_enabled_equivalencies(dimensionless_redshift())
# =============================================================================
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
if __doc__ is not None:
__doc__ += _generate_unit_summary(_ns)
|
2764faad1fc7d0c3f1d309d6ca726297bae1aedbe5264594b16c24d962e45785 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
__all__ = ['fits2bitmap', 'main']
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.image as mimg
from astropy.utils.introspection import minversion
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical(f'data in FITS extension {ext} is not a 2D array')
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# explicitly define the output format
out_format = os.path.splitext(out_fn)[1][1:]
try:
if minversion(matplotlib, '3.5'):
matplotlib.colormaps[cmap]
else:
from matplotlib import cm
cm.get_cmap(cmap)
except (ValueError, KeyError):
log.critical(f'{cmap} is not a valid matplotlib colormap name.')
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info(f'Saved file to {out_fn}.')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
|
8973b67dc462cb0e8bb8ff1bd8126eedbf428d232d2e906269c9d6f7014ad091 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import matplotlib.pyplot as plt
from matplotlib.backend_bases import KeyEvent
import numpy as np
import astropy.units as u
from astropy.coordinates import FK5, SkyCoord
from astropy.time import Time
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
from astropy.coordinates import galactocentric_frame_defaults
from .test_images import BaseImageTests
class TestDisplayWorldCoordinate(BaseImageTests):
def teardown_method(self, method):
plt.close('all')
def test_overlay_coords(self, ignore_matplotlibrc, tmpdir):
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs)
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test1.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '0\xb029\'45" -0\xb029\'20" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
event3 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event3)
# Test that it still displays world coords when there are no overlay coords
string_world2 = ax._display_world_coords(0.523412, 0.518311)
assert string_world2 == '0\xb029\'45" -0\xb029\'20" (world)'
overlay = ax.get_coords_overlay('fk5')
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test2.png').strpath)
event4 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event4)
# Test that it displays the overlay world coordinates
string_world3 = ax._display_world_coords(0.523412, 0.518311)
assert string_world3 == '267.176\xb0 -28\xb045\'56" (world, overlay 1)'
overlay = ax.get_coords_overlay(FK5())
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test3.png').strpath)
event5 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event5)
# Test that it displays the overlay world coordinates
string_world4 = ax._display_world_coords(0.523412, 0.518311)
assert string_world4 == '267.176\xb0 -28\xb045\'56" (world, overlay 2)'
overlay = ax.get_coords_overlay(FK5(equinox=Time("J2030")))
# Regression test for bug that caused format to always be taken from
# main world coordinates.
overlay[0].set_major_formatter('d.ddd')
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test4.png').strpath)
event6 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event6)
# Test that it displays the overlay world coordinates
string_world5 = ax._display_world_coords(0.523412, 0.518311)
assert string_world5 == '267.652\xb0 -28\xb046\'23" (world, overlay 3)'
def test_cube_coords(self, ignore_matplotlibrc, tmpdir):
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('y', 50, 'x'))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '3h26m52.0s 30\xb037\'17\" 2563 (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_cube_coords_uncorr_slicing(self, ignore_matplotlibrc, tmpdir):
# Regression test for a bug that occurred with coordinate formatting if
# some dimensions were uncorrelated and sliced out.
wcs = WCS(self.cube_header)
fig = plt.figure(figsize=(4, 4))
canvas = fig.canvas
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=wcs, slices=('x', 'y', 2))
fig.add_axes(ax)
# On some systems, fig.canvas.draw is not enough to force a draw, so we
# save to a temporary file.
fig.savefig(tmpdir.join('test.png').strpath)
# Testing default displayed world coordinates
string_world = ax._display_world_coords(0.523412, 0.518311)
assert string_world == '3h26m56.6s 30\xb018\'19\" (world)'
# Test pixel coordinates
event1 = KeyEvent('test_pixel_coords', canvas, 'w')
fig.canvas.callbacks.process('key_press_event', event1)
string_pixel = ax._display_world_coords(0.523412, 0.523412)
assert string_pixel == "0.523412 0.523412 (pixel)"
def test_plot_coord_3d_transform(self):
wcs = WCS(self.msx_header)
with galactocentric_frame_defaults.set('latest'):
coord = SkyCoord(0 * u.kpc, 0 * u.kpc, 0 * u.kpc, frame='galactocentric')
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=wcs)
point, = ax.plot_coord(coord, 'ro')
np.testing.assert_allclose(point.get_xydata()[0], [0, 0], atol=1e-4)
|
6c97eea834c927338d9976a02a7300f1d04ecf9987261e4180e1e2a877eb05ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import copy
import functools
import datetime
from copy import deepcopy
from decimal import Decimal, localcontext
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose
import erfa
from erfa import ErfaWarning
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils import isiterable, iers
from astropy.time import (Time, TimeDelta, ScaleValueError, STANDARD_TIME_SCALES,
TimeString, TimezoneInfo, TIME_FORMATS)
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy.table import Column, Table
from astropy.utils.compat.optional_deps import HAS_PYTZ, HAS_H5PY # noqa
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
allclose_year = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic:
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5 + 0.00037179926839122024,
-0.5 + 0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.) / 10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('format_', Time.FORMATS)
def test_empty_value(self, format_):
t = Time([], format=format_)
assert t.size == 0
assert t.shape == (0,)
assert t.format == format_
t_value = t.value
assert t_value.size == 0
assert t_value.shape == (0,)
t2 = Time(t_value, format=format_)
assert t2.size == 0
assert t2.shape == (0,)
assert t2.format == format_
t3 = t2.tai
assert t3.size == 0
assert t3.shape == (0,)
assert t3.format == format_
assert t3.scale == 'tai'
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5a = t4[3]
assert t5a.location == t4.location[3]
assert t5a.location.shape == ()
t5b = t4[3:4]
assert t5b.location.shape == (1,)
# Check that indexing a size-1 array returns a scalar location as well;
# see gh-10113.
t5c = t5b[0]
assert t5c.location.shape == ()
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_precision_input(self):
"""Verifies that precision can only be 0-9 (inclusive). Any other
value should raise a ValueError exception."""
err_message = 'precision attribute must be an int'
with pytest.raises(ValueError, match=err_message):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc',
precision=10)
with pytest.raises(ValueError, match=err_message):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.precision = -1
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843728'
assert t.tcb.iso == '2006-01-15 21:25:56.8939523'
def test_transforms_no_location(self):
"""Location should default to geocenter (relevant for TDB, TCB)."""
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=7)
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.5000000'
assert t.ut1.iso == '2006-01-15 21:24:37.8341000'
assert t.tai.iso == '2006-01-15 21:25:10.5000000'
assert t.tt.iso == '2006-01-15 21:25:42.6840000'
assert t.tcg.iso == '2006-01-15 21:25:43.3226905'
assert t.tdb.iso == '2006-01-15 21:25:42.6843725'
assert t.tcb.iso == '2006-01-15 21:25:56.8939519'
# Check we get the same result
t2 = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
location=(0*u.m, 0*u.m, 0*u.m))
assert t == t2
assert t.tdb == t2.tdb
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5'] * 2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5'] * 3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
with iers.conf.set_temp('auto_download', False):
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
dt64 = np.datetime64('2012-06-18T02:00:05.453000000')
Time(dt64, format='datetime64', scale='tai')
Time([dt64, dt64], format='datetime64', scale='tai')
def test_local_format_transforms(self):
"""
Test transformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001 / 3600. / 24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500'
assert_allclose(t.byear, 2006.04217888831, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001 / 3600. / 24. / 365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t.gps
with pytest.raises(ScaleValueError):
t.unix
with pytest.raises(ScaleValueError):
t.cxcsec
with pytest.raises(ScaleValueError):
t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_datetime64(self):
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
dt64_2 = np.datetime64('2000-01-02')
t = Time(dt64, scale='utc', precision=9, format='datetime64')
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64
t = Time(dt64_2, scale='utc', precision=3, format='datetime64')
assert t.iso == '2000-01-02 00:00:00.000'
assert t.datetime64 == dt64_2
assert t.value == dt64_2
t2 = Time(t.iso, scale='utc')
assert t2.datetime64 == dt64_2
t = Time([dt64, dt64_2], scale='utc', format='datetime64')
assert np.all(t.value == [dt64, dt64_2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime64 == np.datetime64('2000-01-01T01:01:01.123456789')
# broadcasting
dt3 = (dt64 + (dt64_2-dt64) * np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc', format='datetime64')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1], format='datetime64')
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2], format='datetime64'))
assert Time(t3[2, 0], format='datetime64') == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError): # unguessable
Time([])
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
with pytest.warns(AstropyDeprecationWarning):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = f'{year:04d}-{month:02d}'
yyyy_mm_dd = f'{year:04d}-{month:02d}-{day:02d}'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = f'{year:04d}-07-01'
else:
yyyy_mm_dd_plus1 = f'{year + 1:04d}-01-01'
with pytest.warns(ErfaWarning):
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
Time(t1, scale='local')
class TestVal2:
"""Tests related to val2"""
@pytest.mark.parametrize("d", [
dict(val="2001:001", val2="ignored", scale="utc"),
dict(val={'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
val2="ignored", scale="utc"),
dict(val=np.datetime64('2005-02-25'), val2="ignored", scale="utc"),
dict(val=datetime.datetime(2000, 1, 2, 12, 0, 0),
val2="ignored", scale="utc"),
])
def test_unused_val2_raises(self, d):
"""Test that providing val2 is for string input lets user know we won't use it"""
with pytest.raises(ValueError):
Time(**d)
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
def test_broadcast_not_writable(self):
val = (2458000 + np.arange(3))[:, None]
val2 = np.linspace(0, 1, 4, endpoint=False)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1, 2] = t_i
t[1, 2] = t_i
assert t_b[1, 2] == t[1, 2], "writing worked"
assert t_b[0, 2] == t[0, 2], "broadcasting didn't cause problems"
assert t_b[1, 1] == t[1, 1], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
def test_broadcast_one_not_writable(self):
val = (2458000 + np.arange(3))
val2 = np.arange(1)
t = Time(val=val, val2=val2, format="jd", scale="tai")
t_b = Time(val=val + 0 * val2, val2=0 * val + val2, format="jd", scale="tai")
t_i = Time(val=57990, val2=0.3, format="jd", scale="tai")
t_b[1] = t_i
t[1] = t_i
assert t_b[1] == t[1], "writing worked"
assert t_b[0] == t[0], "broadcasting didn't cause problems"
assert np.all(t_b == t), "behaved as expected"
class TestSubFormat:
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000',
'2000-01-01T01:01:01.000',
'2000-01-01T01:01:01.123']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+02000-01-01T01:01:01.123']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'-00594-01-01T00:00:00.000']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000',
'+02000-01-01T01:01:01.000',
'+10594-01-01T00:00:00.000']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that the previous FITS-string formatting can still be handled
but with a DeprecationWarning."""
for inputs in (("2000-01-02(TAI)", "tai"),
("1999-01-01T00:00:00.123(ET(NIST))", "tt"),
("2014-12-12T01:00:44.1(UTC)", "utc")):
with pytest.warns(AstropyDeprecationWarning):
t = Time(inputs[0])
assert t.scale == inputs[1]
# Create Time using normal ISOT syntax and compare with FITS
t2 = Time(inputs[0][:inputs[0].index("(")], format="isot",
scale=inputs[1])
assert t == t2
# Explicit check that conversions still work despite warning
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:00.123456789(UTC)')
t = t.tai
assert t.isot == '1999-01-01T00:00:32.123'
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)')
t = t.utc
assert t.isot == '1999-01-01T00:00:00.123'
# Check scale consistency
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="tai")
assert t.scale == "tai"
with pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(ET)', scale="tt")
assert t.scale == "tt"
with pytest.raises(ValueError), pytest.warns(AstropyDeprecationWarning):
t = Time('1999-01-01T00:00:32.123456789(TAI)', scale="utc")
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'tt'
t = Time('J2000')
assert t.scale == 'tt'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
def test_plot_date(self):
"""Test the plot_date format.
Depending on the situation with matplotlib, this can give different
results because the plot date epoch time changed in matplotlib 3.3. This
test tries to use the matplotlib date2num function to make the test
independent of version, but if matplotlib isn't available then the code
(and test) use the pre-3.3 epoch.
"""
try:
from matplotlib.dates import date2num
except ImportError:
# No matplotlib, in which case this uses the epoch 0000-12-31
# as per matplotlib < 3.3.
# Value from:
# matplotlib.dates.set_epoch('0000-12-31')
# val = matplotlib.dates.date2num('2000-01-01')
val = 730120.0
else:
val = date2num(datetime.datetime(2000, 1, 1))
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, val, atol=1e-5, rtol=0)
class TestNumericalSubFormat:
def test_explicit_example(self):
t = Time('54321.000000000001', format='mjd')
assert t == Time(54321, 1e-12, format='mjd')
assert t.mjd == 54321. # Lost precision!
assert t.value == 54321. # Lost precision!
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', 'bytes') == b'54321.000000000001'
expected_long = np.longdouble(54321.) + np.longdouble(1e-12)
# Check we're the same to within the double holding jd2
# (which is less precise than longdouble on arm64).
assert np.allclose(t.to_value('mjd', subfmt='long'),
expected_long, rtol=0, atol=np.finfo(float).eps)
t.out_subfmt = 'str'
assert t.value == '54321.000000000001'
assert t.to_value('mjd') == 54321. # Lost precision!
assert t.mjd == '54321.000000000001'
assert t.to_value('mjd', subfmt='bytes') == b'54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
t.out_subfmt = 'long'
assert np.allclose(t.value, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.to_value('mjd', subfmt=None), expected_long,
rtol=0., atol=np.finfo(float).eps)
assert np.allclose(t.mjd, expected_long,
rtol=0., atol=np.finfo(float).eps)
assert t.to_value('mjd', subfmt='str') == '54321.000000000001'
assert t.to_value('mjd', subfmt='float') == 54321. # Lost precision!
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble(self):
i = 54321
# Create a different long double (which will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
mjd_long = np.longdouble(i) + np.longdouble(f)
assert mjd_long != i, "longdouble failure!"
t = Time(mjd_long, format='mjd')
expected = Time(i, f, format='mjd')
assert abs(t - expected) <= 20. * u.ps
t_float = Time(i + f, format='mjd')
assert t_float == Time(i, format='mjd')
assert t_float != t
assert t.value == 54321. # Lost precision!
assert np.allclose(t.to_value('mjd', subfmt='long'), mjd_long,
rtol=0., atol=np.finfo(float).eps)
t2 = Time(mjd_long, format='mjd', out_subfmt='long')
assert np.allclose(t2.value, mjd_long,
rtol=0., atol=np.finfo(float).eps)
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
def test_explicit_longdouble_one_val(self):
"""Ensure either val1 or val2 being longdouble is possible.
Regression test for issue gh-10033.
"""
i = 54321
f = max(2.**(-np.finfo(np.longdouble).nmant) * 65536,
np.finfo(float).eps)
t1 = Time(i, f, format='mjd')
t2 = Time(np.longdouble(i), f, format='mjd')
t3 = Time(i, np.longdouble(f), format='mjd')
t4 = Time(np.longdouble(i), np.longdouble(f), format='mjd')
assert t1 == t2 == t3 == t4
@pytest.mark.skipif(np.finfo(np.longdouble).eps >= np.finfo(float).eps,
reason="long double is the same as float")
@pytest.mark.parametrize("fmt", ["mjd", "unix", "cxcsec"])
def test_longdouble_for_other_types(self, fmt):
t_fmt = getattr(Time(58000, format="mjd"), fmt) # Get regular float
t_fmt_long = np.longdouble(t_fmt)
# Create a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
atol = np.finfo(float).eps * (1. if fmt == 'mjd' else 24. * 3600.)
t_fmt_long2 = t_fmt_long + max(
t_fmt_long * np.finfo(np.longdouble).eps * 2, atol)
assert t_fmt_long != t_fmt_long2, "longdouble weird!"
tm = Time(t_fmt_long, format=fmt)
tm2 = Time(t_fmt_long2, format=fmt)
assert tm != tm2
tm_long2 = tm2.to_value(fmt, subfmt='long')
assert np.allclose(tm_long2, t_fmt_long2, rtol=0., atol=atol)
def test_subformat_input(self):
s = '54321.01234567890123456789'
i, f = s.split('.') # Note, OK only for fraction < 0.5
t = Time(float(i), float('.' + f), format='mjd')
t_str = Time(s, format='mjd')
t_bytes = Time(s.encode('ascii'), format='mjd')
t_decimal = Time(Decimal(s), format='mjd')
assert t_str == t
assert t_bytes == t
assert t_decimal == t
@pytest.mark.parametrize('out_subfmt', ('str', 'bytes'))
def test_subformat_output(self, out_subfmt):
i = 54321
f = np.array([0., 1e-9, 1e-12])
t = Time(i, f, format='mjd', out_subfmt=out_subfmt)
t_value = t.value
expected = np.array(['54321.0',
'54321.000000001',
'54321.000000000001'], dtype=out_subfmt)
assert np.all(t_value == expected)
assert np.all(Time(expected, format='mjd') == t)
# Explicit sub-format.
t = Time(i, f, format='mjd')
t_mjd_subfmt = t.to_value('mjd', subfmt=out_subfmt)
assert np.all(t_mjd_subfmt == expected)
@pytest.mark.parametrize('fmt,string,val1,val2', [
('jd', '2451544.5333981', 2451544.5, .0333981),
('decimalyear', '2000.54321', 2000., .54321),
('cxcsec', '100.0123456', 100.0123456, None),
('unix', '100.0123456', 100.0123456, None),
('gps', '100.0123456', 100.0123456, None),
('byear', '1950.1', 1950.1, None),
('jyear', '2000.1', 2000.1, None)])
def test_explicit_string_other_formats(self, fmt, string, val1, val2):
t = Time(string, format=fmt)
assert t == Time(val1, val2, format=fmt)
assert t.to_value(fmt, subfmt='str') == string
def test_basic_subformat_setting(self):
t = Time('2001', format='jyear', scale='tai')
t.format = "mjd"
t.out_subfmt = "str"
assert t.value.startswith("5")
def test_basic_subformat_cache_does_not_crash(self):
t = Time('2001', format='jyear', scale='tai')
t.to_value('mjd', subfmt='str')
assert ('mjd', 'str') in t.cache['format']
t.to_value('mjd', 'str')
@pytest.mark.parametrize("fmt", ["jd", "mjd", "cxcsec", "unix", "gps", "jyear"])
def test_decimal_context_does_not_affect_string(self, fmt):
t = Time('2001', format='jyear', scale='tai')
t.format = fmt
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value(fmt, "str")
t2 = Time('2001', format='jyear', scale='tai')
t2.format = fmt
with localcontext() as ctx:
ctx.prec = 40
t2_s_40 = t.to_value(fmt, "str")
assert t_s_2 == t2_s_40, "String representation should not depend on Decimal context"
def test_decimal_context_caching(self):
t = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 2
t_s_2 = t.to_value('mjd', subfmt='decimal')
t2 = Time(val=58000, val2=1e-14, format='mjd', scale='tai')
with localcontext() as ctx:
ctx.prec = 40
t_s_40 = t.to_value('mjd', subfmt='decimal')
t2_s_40 = t2.to_value('mjd', subfmt='decimal')
assert t_s_2 == t_s_40, "Should be the same but cache might make this automatic"
assert t_s_2 == t2_s_40, "Different precision should produce the same results"
@pytest.mark.parametrize("f, s, t", [("sec", "long", np.longdouble),
("sec", "decimal", Decimal),
("sec", "str", str)])
def test_timedelta_basic(self, f, s, t):
dt = (Time("58000", format="mjd", scale="tai")
- Time("58001", format="mjd", scale="tai"))
value = dt.to_value(f, s)
assert isinstance(value, t)
dt.format = f
dt.out_subfmt = s
assert isinstance(dt.value, t)
assert isinstance(dt.to_value(f, None), t)
def test_need_format_argument(self):
t = Time('J2000')
with pytest.raises(TypeError, match="missing.*required.*'format'"):
t.to_value()
with pytest.raises(ValueError, match='format must be one of'):
t.to_value('julian')
def test_wrong_in_subfmt(self):
with pytest.raises(ValueError, match='not among selected'):
Time("58000", format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(np.longdouble(58000), format='mjd', in_subfmt='float')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='str')
with pytest.raises(ValueError, match='not among selected'):
Time(58000., format='mjd', in_subfmt='long')
def test_wrong_subfmt(self):
t = Time(58000., format='mjd')
with pytest.raises(ValueError, match='must match one'):
t.to_value('mjd', subfmt='parrot')
with pytest.raises(ValueError, match='must match one'):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match='must match one'):
t.in_subfmt = 'parrot'
def test_not_allowed_subfmt(self):
"""Test case where format has no defined subfmts"""
t = Time('J2000')
match = 'subformat not allowed for format jyear_str'
with pytest.raises(ValueError, match=match):
t.to_value('jyear_str', subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.out_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', out_subfmt='parrot')
with pytest.raises(ValueError, match=match):
t.in_subfmt = 'parrot'
with pytest.raises(ValueError, match=match):
Time('J2000', format='jyear_str', in_subfmt='parrot')
def test_switch_to_format_with_no_out_subfmt(self):
t = Time('2001-01-01', out_subfmt='date_hm')
assert t.out_subfmt == 'date_hm'
# Now do an in-place switch to format 'jyear_str' that has no subfmts
# where out_subfmt is changed to '*'.
t.format = 'jyear_str'
assert t.out_subfmt == '*'
assert t.value == 'J2001.001'
class TestSofaErrors:
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with pytest.warns(ErfaWarning, match=r'bad day \(JD computed\)') as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate:
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
class TestStardate:
"""Sync chronometers with Starfleet Command"""
def test_iso_to_stardate(self):
assert str(Time('2320-01-01', scale='tai').stardate)[:7] == '1368.99'
assert str(Time('2330-01-01', scale='tai').stardate)[:8] == '10552.76'
assert str(Time('2340-01-01', scale='tai').stardate)[:8] == '19734.02'
@pytest.mark.parametrize('dates',
[(10000, '2329-05-26 03:02'),
(20000, '2340-04-15 19:05'),
(30000, '2351-03-07 11:08')])
def test_stardate_to_iso(self, dates):
stardate, iso = dates
t_star = Time(stardate, format='stardate')
t_iso = Time(t_star, format='iso', out_subfmt='date_hm')
assert t_iso.value == iso
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd', scale='tai')
assert t.fits == '0001-01-01T00:00:00.000'
t = Time(1721425.5 - 366., format='jd', scale='tai')
assert t.fits == '+00000-01-01T00:00:00.000'
t = Time(1721425.5 - 366. - 365., format='jd', scale='tai')
assert t.fits == '-00001-01-01T00:00:00.000'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000'
t = Time(5373484.5, -1. / 24. / 3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_time_from_epoch_jds():
"""Test that jd1/jd2 in a TimeFromEpoch format is always well-formed:
jd1 is an integral value and abs(jd2) <= 0.5.
"""
# From 1999:001 00:00 to 1999:002 12:00 by a non-round step. This will
# catch jd2 == 0 and a case of abs(jd2) == 0.5.
cxcsecs = np.linspace(0, 86400 * 1.5, 49)
for cxcsec in cxcsecs:
t = Time(cxcsec, format='cxcsec')
assert np.round(t.jd1) == t.jd1
assert np.abs(t.jd2) <= 0.5
t = Time(cxcsecs, format='cxcsec')
assert np.all(np.round(t.jd1) == t.jd1)
assert np.all(np.abs(t.jd2) <= 0.5)
assert np.any(np.abs(t.jd2) == 0.5) # At least one exactly 0.5
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err.value)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
with pytest.warns(ErfaWarning):
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion(monkeypatch):
# Check that if we have internet, and downloading is allowed, we
# can get conversion to UT1 for the present, since we will download
# IERS_A in IERS_Auto.
monkeypatch.setattr('astropy.utils.iers.conf.auto_download', True)
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_unix_tai_format():
t = Time('2020-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 37.0)
t = Time('1970-01-01', scale='utc')
assert allclose_sec(t.unix_tai - t.unix, 8 + 8.2e-05)
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err.value)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err.value)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10 * u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError, match=r'does not support leap seconds'):
Time('2015-06-30 23:59:60.000').to_datetime()
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
import pytz
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[[f'{y:04d}-{m:02d}-{d:02d}' for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err.value)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is writeable because it gets boxed into a zero-d array
t = Time('2000:001', scale='utc')
t[()] = '2000:002'
assert t.value.startswith('2000:002')
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err.value)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location=None'.format(loc[0])) in str(err.value)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location={} and '
'got location={}'.format(loc[0], loc[1])) in str(err.value)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location={}'.format(loc[1])) in str(err.value)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err.value)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
def test_strftime_scalar():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strftime_array():
tstrings = ['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1995-12-31 23:59:60']
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S').tolist() == tstrings
def test_strftime_array_2():
tstrings = [['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1995-12-31 23:59:60']]
tstrings = np.array(tstrings)
t = Time(tstrings)
for format in t.FORMATS:
t.format = format
assert np.all(t.strftime('%Y-%m-%d %H:%M:%S') == tstrings)
assert t.strftime('%Y-%m-%d %H:%M:%S').shape == tstrings.shape
def test_strftime_leapsecond():
time_string = '1995-12-31 23:59:60'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S') == time_string
def test_strptime_scalar():
"""Test of Time.strptime
"""
time_string = '2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01', '1998-Jan-01 00:00:02'],
['1998-Jan-01 00:00:03', '1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_badinput():
tstrings = [1, 2, 3]
with pytest.raises(TypeError):
Time.strptime(tstrings, '%S')
def test_strptime_input_bytes_scalar():
time_string = b'2007-May-04 21:08:12'
time_object = Time('2007-05-04 21:08:12')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S')
assert t == time_object
def test_strptime_input_bytes_array():
tstrings = [[b'1998-Jan-01 00:00:01', b'1998-Jan-01 00:00:02'],
[b'1998-Jan-01 00:00:03', b'1998-Jan-01 00:00:04']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01', '1998-01-01 00:00:02'],
['1998-01-01 00:00:03', '1998-01-01 00:00:04']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strptime_leapsecond():
time_obj1 = Time('1995-12-31T23:59:60', format='isot')
time_obj2 = Time.strptime('1995-Dec-31 23:59:60', '%Y-%b-%d %H:%M:%S')
assert time_obj1 == time_obj2
def test_strptime_3_digit_year():
time_obj1 = Time('0995-12-31T00:00:00', format='isot', scale='tai')
time_obj2 = Time.strptime('0995-Dec-31 00:00:00', '%Y-%b-%d %H:%M:%S',
scale='tai')
assert time_obj1 == time_obj2
def test_strptime_fracsec_scalar():
time_string = '2007-May-04 21:08:12.123'
time_object = Time('2007-05-04 21:08:12.123')
t = Time.strptime(time_string, '%Y-%b-%d %H:%M:%S.%f')
assert t == time_object
def test_strptime_fracsec_array():
"""Test of Time.strptime
"""
tstrings = [['1998-Jan-01 00:00:01.123', '1998-Jan-01 00:00:02.000001'],
['1998-Jan-01 00:00:03.000900', '1998-Jan-01 00:00:04.123456']]
tstrings = np.array(tstrings)
time_object = Time([['1998-01-01 00:00:01.123', '1998-01-01 00:00:02.000001'],
['1998-01-01 00:00:03.000900', '1998-01-01 00:00:04.123456']])
t = Time.strptime(tstrings, '%Y-%b-%d %H:%M:%S.%f')
assert np.all(t == time_object)
assert t.shape == tstrings.shape
def test_strftime_scalar_fracsec():
"""Test of Time.strftime
"""
time_string = '2010-09-03 06:00:00.123'
t = Time(time_string)
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == time_string
def test_strftime_scalar_fracsec_precision():
time_string = '2010-09-03 06:00:00.123123123'
t = Time(time_string)
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123'
t.precision = 9
assert t.strftime('%Y-%m-%d %H:%M:%S.%f') == '2010-09-03 06:00:00.123123123'
def test_strftime_array_fracsec():
tstrings = ['2010-09-03 00:00:00.123000', '2005-09-03 06:00:00.000001',
'1995-12-31 23:59:60.000900']
t = Time(tstrings)
t.precision = 6
for format in t.FORMATS:
t.format = format
assert t.strftime('%Y-%m-%d %H:%M:%S.%f').tolist() == tstrings
def test_insert_time():
tm = Time([1, 2], format='unix')
# Insert a scalar using an auto-parsed string
tm2 = tm.insert(1, '1970-01-01 00:01:00')
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert scalar using a Time value
tm2 = tm.insert(1, Time('1970-01-01 00:01:00'))
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=1 array with a Time value
tm2 = tm.insert(1, [Time('1970-01-01 00:01:00')])
assert np.all(tm2 == Time([1, 60, 2], format='unix'))
# Insert length=2 list with float values matching unix format.
# Also actually provide axis=0 unlike all other tests.
tm2 = tm.insert(1, [10, 20], axis=0)
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values matching unix format
tm2 = tm.insert(1, np.array([10, 20]))
assert np.all(tm2 == Time([1, 10, 20, 2], format='unix'))
# Insert length=2 np.array with float values at the end
tm2 = tm.insert(2, np.array([10, 20]))
assert np.all(tm2 == Time([1, 2, 10, 20], format='unix'))
# Insert length=2 np.array with float values at the beginning
# with a negative index
tm2 = tm.insert(-2, np.array([10, 20]))
assert np.all(tm2 == Time([10, 20, 1, 2], format='unix'))
def test_insert_exceptions():
tm = Time(1, format='unix')
with pytest.raises(TypeError) as err:
tm.insert(0, 50)
assert 'cannot insert into scalar' in str(err.value)
tm = Time([1, 2], format='unix')
with pytest.raises(ValueError) as err:
tm.insert(0, 50, axis=1)
assert 'axis must be 0' in str(err.value)
with pytest.raises(TypeError) as err:
tm.insert(slice(None), 50)
assert 'obj arg must be an integer' in str(err.value)
with pytest.raises(IndexError) as err:
tm.insert(-100, 50)
assert 'index -100 is out of bounds for axis 0 with size 2' in str(err.value)
def test_datetime64_no_format():
dt64 = np.datetime64('2000-01-02T03:04:05.123456789')
t = Time(dt64, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456789'
assert t.datetime64 == dt64
assert t.value == dt64
def test_hash_time():
loc1 = EarthLocation(1 * u.m, 2 * u.m, 3 * u.m)
for loc in None, loc1:
t = Time([1, 1, 2, 3], format='cxcsec', location=loc)
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'Time' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'Time' (value is masked)"
t = Time(1, format='cxcsec', location=loc)
t2 = Time(1, format='cxcsec')
assert hash(t) != hash(t2)
t = Time('2000:180', scale='utc')
t2 = Time(t, scale='tai')
assert t == t2
assert hash(t) != hash(t2)
def test_hash_time_delta():
t = TimeDelta([1, 1, 2, 3], format='sec')
t[3] = np.ma.masked
h1 = hash(t[0])
h2 = hash(t[1])
h3 = hash(t[2])
assert h1 == h2
assert h1 != h3
with pytest.raises(TypeError) as exc:
hash(t)
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (must be scalar)"
with pytest.raises(TypeError) as exc:
hash(t[3])
assert exc.value.args[0] == "unhashable type: 'TimeDelta' (value is masked)"
def test_get_time_fmt_exception_messages():
with pytest.raises(ValueError) as err:
Time(10)
assert "No time format was given, and the input is" in str(err.value)
with pytest.raises(ValueError) as err:
Time('2000:001', format='not-a-format')
assert "Format 'not-a-format' is not one of the allowed" in str(err.value)
with pytest.raises(ValueError) as err:
Time('200')
assert 'Input values did not match any of the formats where' in str(err.value)
with pytest.raises(ValueError) as err:
Time('200', format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'ValueError: Time 200 does not match iso format') == str(err.value)
with pytest.raises(ValueError) as err:
Time(200, format='iso')
assert ('Input values did not match the format class iso:' + os.linesep
+ 'TypeError: Input values for iso class must be strings') == str(err.value)
def test_ymdhms_defaults():
t1 = Time({'year': 2001}, format='ymdhms')
assert t1 == Time('2001-01-01')
times_dict_ns = {
'year': [2001, 2002],
'month': [2, 3],
'day': [4, 5],
'hour': [6, 7],
'minute': [8, 9],
'second': [10, 11]
}
table_ns = Table(times_dict_ns)
struct_array_ns = table_ns.as_array()
rec_array_ns = struct_array_ns.view(np.recarray)
ymdhms_names = ('year', 'month', 'day', 'hour', 'minute', 'second')
@pytest.mark.parametrize('tm_input', [table_ns, struct_array_ns, rec_array_ns])
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
@pytest.mark.parametrize('as_row', [False, True])
def test_ymdhms_init_from_table_like(tm_input, kwargs, as_row):
time_ns = Time(['2001-02-04 06:08:10', '2002-03-05 07:09:11'])
if as_row:
tm_input = tm_input[0]
time_ns = time_ns[0]
tm = Time(tm_input, **kwargs)
assert np.all(tm == time_ns)
assert tm.value.dtype.names == ymdhms_names
def test_ymdhms_init_from_dict_array():
times_dict_shape = {
'year': [[2001, 2002],
[2003, 2004]],
'month': [2, 3],
'day': 4
}
time_shape = Time(
[['2001-02-04', '2002-03-04'],
['2003-02-04', '2004-03-04']]
)
time = Time(times_dict_shape, format='ymdhms')
assert np.all(time == time_shape)
assert time.ymdhms.shape == time_shape.shape
@pytest.mark.parametrize('kwargs', [{}, {'format': 'ymdhms'}])
def test_ymdhms_init_from_dict_scalar(kwargs):
"""
Test YMDHMS functionality for a dict input. This includes ensuring that
key and attribute access work. For extra fun use a time within a leap
second.
"""
time_dict = {
'year': 2016,
'month': 12,
'day': 31,
'hour': 23,
'minute': 59,
'second': 60.123456789}
tm = Time(time_dict, **kwargs)
assert tm == Time('2016-12-31T23:59:60.123456789')
for attr in time_dict:
for value in (tm.value[attr], getattr(tm.value, attr)):
if attr == 'second':
assert allclose_sec(time_dict[attr], value)
else:
assert time_dict[attr] == value
# Now test initializing from a YMDHMS format time using the object
tm_rt = Time(tm)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
# Test initializing from a YMDHMS value (np.void, i.e. recarray row)
# without specified format.
tm_rt = Time(tm.ymdhms)
assert tm_rt == tm
assert tm_rt.format == 'ymdhms'
def test_ymdhms_exceptions():
with pytest.raises(ValueError, match='input must be dict or table-like'):
Time(10, format='ymdhms')
match = "'wrong' not allowed as YMDHMS key name(s)"
# NB: for reasons unknown, using match=match in pytest.raises() fails, so we
# fall back to old school ``match in str(err.value)``.
with pytest.raises(ValueError) as err:
Time({'year': 2019, 'wrong': 1}, format='ymdhms')
assert match in str(err.value)
match = "for 2 input key names you must supply 'year', 'month'"
with pytest.raises(ValueError, match=match):
Time({'year': 2019, 'minute': 1}, format='ymdhms')
def test_ymdhms_masked():
tm = Time({'year': [2000, 2001]}, format='ymdhms')
tm[0] = np.ma.masked
assert isinstance(tm.value[0], np.ma.core.mvoid)
for name in ymdhms_names:
assert tm.value[0][name] is np.ma.masked
# Converted from doctest in astropy/test/formats.py for debugging
def test_ymdhms_output():
t = Time({'year': 2015, 'month': 2, 'day': 3,
'hour': 12, 'minute': 13, 'second': 14.567},
scale='utc')
# NOTE: actually comes back as np.void for some reason
# NOTE: not necessarily a python int; might be an int32
assert t.ymdhms.year == 2015
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_ecsv(fmt):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t['a'].format == t2['a'].format
# Some loss of precision in the serialization
assert not np.all(t['a'] == t2['a'])
# But no loss in the format representation
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_fits(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.fits'
t.write(out, format='fits')
t2 = Table.read(out, format='fits', astropy_native=True)
# Currently the format is lost in FITS so set it back
t2['a'].format = fmt
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
@pytest.mark.skipif(not HAS_H5PY, reason='Needs h5py')
@pytest.mark.parametrize('fmt', TIME_FORMATS)
def test_write_every_format_to_hdf5(fmt, tmp_path):
"""Test special-case serialization of certain Time formats"""
t = Table()
# Use a time that tests the default serialization of the time format
tm = (Time('2020-01-01')
+ [[1, 1 / 7],
[3, 4.5]] * u.s)
tm.format = fmt
t['a'] = tm
out = tmp_path / 'out.h5'
t.write(str(out), format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(str(out), format='hdf5', path='root')
assert t['a'].format == t2['a'].format
# No loss of precision in the serialization or representation
assert np.all(t['a'] == t2['a'])
assert np.all(t['a'].value == t2['a'].value)
# There are two stages of validation now - one on input into a format, so that
# the format conversion code has tidy matched arrays to work with, and the
# other when object construction does not go through a format object. Or at
# least, the format object is constructed with "from_jd=True". In this case the
# normal input validation does not happen but the new input validation does,
# and can ensure that strange broadcasting anomalies can't happen.
# This form of construction uses from_jd=True.
def test_broadcasting_writeable():
t = Time('J2015') + np.linspace(-1, 1, 10) * u.day
t[2] = Time(58000, format="mjd")
def test_format_subformat_compatibility():
"""Test that changing format with out_subfmt defined is not a problem.
See #9812, #9810."""
t = Time('2019-12-20', out_subfmt='date_??')
assert t.mjd == 58837.0
assert t.yday == '2019:354:00:00' # Preserves out_subfmt
t2 = t.replicate(format='mjd')
assert t2.out_subfmt == '*' # Changes to default
t2 = t.copy(format='mjd')
assert t2.out_subfmt == '*'
t2 = Time(t, format='mjd')
assert t2.out_subfmt == '*'
t2 = t.copy(format='yday')
assert t2.out_subfmt == 'date_??'
assert t2.value == '2019:354:00:00'
t.format = 'yday'
assert t.value == '2019:354:00:00'
assert t.out_subfmt == 'date_??'
t = Time('2019-12-20', out_subfmt='date')
assert t.mjd == 58837.0
assert t.yday == '2019:354'
@pytest.mark.parametrize('fmt_name,fmt_class', TIME_FORMATS.items())
def test_to_value_with_subfmt_for_every_format(fmt_name, fmt_class):
"""From a starting Time value, test that every valid combination of
to_value(format, subfmt) works. See #9812, #9361.
"""
t = Time('2000-01-01')
subfmts = list(subfmt[0] for subfmt in fmt_class.subfmts) + [None, '*']
for subfmt in subfmts:
t.to_value(fmt_name, subfmt)
@pytest.mark.parametrize('location', [None, (45, 45)])
def test_location_init(location):
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances.
"""
tm = Time('J2010', location=location)
# Init from a scalar Time
tm2 = Time(tm)
assert np.all(tm.location == tm2.location)
assert type(tm.location) is type(tm2.location) # noqa
# From a list of Times
tm2 = Time([tm, tm])
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
# Effectively the same as a list of Times, but just to be sure that
# Table mixin inititialization is working as expected.
tm2 = Table([[tm, tm]])['col0']
if location is None:
assert tm2.location is None
else:
for loc in tm2.location:
assert loc == tm.location
assert type(tm.location) is type(tm2.location) # noqa
def test_location_init_fail():
"""Test fix in #9969 for issue #9962 where the location attribute is
lost when initializing Time from an existing Time instance of list of
Time instances. Make sure exception is correct.
"""
tm = Time('J2010', location=(45, 45))
tm2 = Time('J2010')
with pytest.raises(ValueError,
match='cannot concatenate times unless all locations'):
Time([tm, tm2])
def test_linspace():
"""Test `np.linspace` `__array_func__` implementation for scalar and arrays.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00'])
t2 = Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00'])
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts = np.linspace(t1[0], t2[0], 3)
assert ts[0].isclose(Time('2021-01-01 00:00:00'), atol=atol)
assert ts[1].isclose(Time('2021-01-01 00:30:00'), atol=atol)
assert ts[2].isclose(Time('2021-01-01 01:00:00'), atol=atol)
ts = np.linspace(t1, t2[0], 2, endpoint=False)
assert ts.shape == (2, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:30:00', '2021-01-01 12:30:00']), atol=atol))
ts = np.linspace(t1, t2, 7)
assert ts.shape == (7, 2)
assert all(ts[0].isclose(Time(['2021-01-01 00:00:00', '2021-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2021-01-01 00:10:00', '2021-03-03 00:00:00']), atol=atol))
assert all(ts[5].isclose(Time(['2021-01-01 00:50:00', '2021-10-29 00:00:00']), atol=atol))
assert all(ts[6].isclose(Time(['2021-01-01 01:00:00', '2021-12-28 00:00:00']), atol=atol))
def test_linspace_steps():
"""Test `np.linspace` `retstep` option.
"""
t1 = Time(['2021-01-01 00:00:00', '2021-01-01 12:00:00'])
t2 = Time('2021-01-02 00:00:00')
atol = 2 * np.finfo(float).eps * abs(t1 - t2).max()
ts, st = np.linspace(t1, t2, 7, retstep=True)
assert ts.shape == (7, 2)
assert st.shape == (2,)
assert all(ts[1].isclose(ts[0] + st, atol=atol))
assert all(ts[6].isclose(ts[0] + 6 * st, atol=atol))
assert all(st.isclose(TimeDelta([14400, 7200], format='sec'), atol=atol))
def test_linspace_fmts():
"""Test `np.linspace` `__array_func__` implementation for start/endpoints
from different formats/systems.
"""
t1 = Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00'])
t2 = Time(2458850, format='jd')
t3 = Time(1578009600, format='unix')
atol = 2 * np.finfo(float).eps * abs(t1 - Time([t2, t3])).max()
ts = np.linspace(t1, t2, 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-01 18:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-01 12:00:00']), atol=atol))
ts = np.linspace(t1, Time([t2, t3]), 3)
assert ts.shape == (3, 2)
assert all(ts[0].isclose(Time(['2020-01-01 00:00:00', '2020-01-02 00:00:00']), atol=atol))
assert all(ts[1].isclose(Time(['2020-01-01 06:00:00', '2020-01-02 12:00:00']), atol=atol))
assert all(ts[2].isclose(Time(['2020-01-01 12:00:00', '2020-01-03 00:00:00']), atol=atol))
|
1074294d3634d51f50dd35ce20c6c514c36de4ca434944819b4bef22807b18ce | import decimal
import warnings
import functools
import contextlib
from decimal import Decimal
from datetime import datetime, timedelta
import pytest
from hypothesis import assume, example, given, target
from hypothesis.extra.numpy import array_shapes, arrays
from hypothesis.strategies import (composite, datetimes, floats, integers,
one_of, sampled_from, timedeltas, tuples)
import numpy as np
import erfa
from erfa import ErfaError, ErfaWarning
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import STANDARD_TIME_SCALES, Time, TimeDelta
from astropy.time.utils import day_frac, two_sum
from astropy.utils import iers
allclose_jd = functools.partial(np.allclose, rtol=np.finfo(float).eps, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=np.finfo(float).eps,
atol=np.finfo(float).eps * 24 * 3600)
tiny = np.finfo(float).eps
dt_tiny = TimeDelta(tiny, format='jd')
def setup_module():
# Pre-load leap seconds table to avoid flakiness in hypothesis runs.
# See https://github.com/astropy/astropy/issues/11030
Time('2020-01-01').ut1
@pytest.fixture(scope='module')
def iers_b():
"""This is an expensive operation, so we share it between tests using a
module-scoped fixture instead of using the context manager form. This
is particularly important for Hypothesis, which invokes the decorated
test function many times (100 by default; see conftest.py for details).
"""
with iers.earth_orientation_table.set(iers.IERS_B.open(iers.IERS_B_FILE)):
yield "<using IERS-B orientation table>"
@contextlib.contextmanager
def quiet_erfa():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ErfaWarning)
yield
def assert_almost_equal(a, b, *, rtol=None, atol=None, label=''):
"""Assert numbers are almost equal.
This version also lets hypothesis know how far apart the inputs are, so
that it can work towards a failure and present the worst failure ever seen
as well as the simplest, which often just barely exceeds the threshold.
"""
__tracebackhide__ = True
if rtol is None or rtol == 0:
thresh = atol
elif atol is None:
thresh = rtol * (abs(a) + abs(b)) / 2
else:
thresh = atol + rtol * (abs(a) + abs(b)) / 2
amb = (a - b)
if isinstance(amb, TimeDelta):
ambv = amb.to_value(u.s)
target(ambv, label=label + " (a-b).to_value(u.s), from TimeDelta")
target(-ambv, label=label + " (b-a).to_value(u.s), from TimeDelta")
if isinstance(thresh, u.Quantity):
amb = amb.to(thresh.unit)
else:
try:
target_value = float(amb)
except TypeError:
pass
else:
target(target_value, label=label + " float(a-b)")
target(-target_value, label=label + " float(b-a)")
assert abs(amb) < thresh
# Days that end with leap seconds
# Some time scales use a so-called "leap smear" to cope with these, others
# have times they can't represent or can represent two different ways.
# In any case these days are liable to cause trouble in time conversions.
# Note that from_erfa includes some weird non-integer steps before 1970.
leap_second_table = iers.LeapSeconds.from_iers_leap_seconds()
# Days that contain leap_seconds
leap_second_days = leap_second_table["mjd"] - 1
leap_second_deltas = list(zip(leap_second_days[1:],
np.diff(leap_second_table["tai_utc"])))
today = Time.now()
mjd0 = Time(0, format="mjd")
def reasonable_ordinary_jd():
return tuples(floats(2440000, 2470000), floats(-0.5, 0.5))
@composite
def leap_second_tricky(draw):
mjd = draw(one_of(sampled_from(leap_second_days),
sampled_from(leap_second_days + 1),
sampled_from(leap_second_days - 1)))
return mjd + mjd0.jd1 + mjd0.jd2, draw(floats(0, 1))
def reasonable_jd():
"""Pick a reasonable JD.
These should be not too far in the past or future (so that date conversion
routines don't have to deal with anything too exotic), but they should
include leap second days as a special case, and they should include several
particularly simple cases (today, the beginning of the MJD scale, a
reasonable date) so that hypothesis' example simplification produces
obviously simple examples when they trigger problems.
"""
moments = [(2455000., 0.), (mjd0.jd1, mjd0.jd2), (today.jd1, today.jd2)]
return one_of(sampled_from(moments),
reasonable_ordinary_jd(),
leap_second_tricky())
def unreasonable_ordinary_jd():
"""JD pair that might be unordered or far away"""
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def ordered_jd():
"""JD pair that is ordered but not necessarily near now"""
return tuples(floats(-1e7, 1e7), floats(-0.5, 0.5))
def unreasonable_jd():
return one_of(reasonable_jd(), ordered_jd(), unreasonable_ordinary_jd())
@composite
def jd_arrays(draw, jd_values):
s = draw(array_shapes())
d = np.dtype([("jd1", float), ("jd2", float)])
jdv = jd_values.map(lambda x: np.array(x, dtype=d))
a = draw(arrays(d, s, elements=jdv))
return a["jd1"], a["jd2"]
def unreasonable_delta():
return tuples(floats(-1e7, 1e7), floats(-1e7, 1e7))
def reasonable_delta():
return tuples(floats(-1e4, 1e4), floats(-0.5, 0.5))
# redundant?
def test_abs_jd2_always_less_than_half():
"""Make jd2 approach +/-0.5, and check that it doesn't go over."""
t1 = Time(2400000.5, [-tiny, +tiny], format='jd')
assert np.all(t1.jd1 % 1 == 0)
assert np.all(abs(t1.jd2) < 0.5)
t2 = Time(2400000., [[0.5 - tiny, 0.5 + tiny],
[-0.5 - tiny, -0.5 + tiny]], format='jd')
assert np.all(t2.jd1 % 1 == 0)
assert np.all(abs(t2.jd2) < 0.5)
@given(jd_arrays(unreasonable_jd()))
def test_abs_jd2_always_less_than_half_on_construction(jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd")
target(np.amax(np.abs(t.jd2)))
assert np.all(t.jd1 % 1 == 0)
assert np.all(abs(t.jd2) <= 0.5)
assert np.all((abs(t.jd2) < 0.5) | (t.jd1 % 2 == 0))
@given(integers(-10**8, 10**8), sampled_from([-0.5, 0.5]))
def test_round_to_even(jd1, jd2):
t = Time(jd1, jd2, format="jd")
assert (abs(t.jd2) == 0.5) and (t.jd1 % 2 == 0)
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555., 0.5, format='jd', scale='utc')
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding
# subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000., format='jd')
dt_big_small_by_6 = (dt_big + dt_small) / 6.
dt_frac = dt_big_small_by_6 - TimeDelta(3333., format='jd')
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.
t1 = Time(1e11, format='cxcsec') + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format='cxcsec')
t3 = Time(dt_tiny_sec, 1e11, format='cxcsec')
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking
at the (naively) summed 64-bit result and asserting equality at the
bit level.
"""
t1 = Time(1.23456789e11, format='cxcsec')
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format='cxcsec')
t1 = Time(1.23456789e11, format='cxcsec')
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
t2 = Time('3000:001:13:00:00.00000002', scale='tai')
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13. / 24. + 1e-8 / 86400. - 1.0)
def test_jd1_is_mult_of_one():
"""
Check that jd1 is a multiple of 1.
"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
assert np.round(t1.jd1) == t1.jd1
t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai')
assert np.round(t1.jd1) == t1.jd1
def test_precision_neg():
"""
Check precision when jd1 is negative. This used to fail because ERFA
routines use a test like jd1 > jd2 to decide which component to update.
It was updated to abs(jd1) > abs(jd2) in erfa 1.6 (sofa 20190722).
"""
t1 = Time(-100000.123456, format='jd', scale='tt')
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format='jyear', scale='utc')
t_tai = Time(range(1980, 2001), format='jyear', scale='tai')
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
with iers.conf.set_temp('auto_download', False):
t = Time(['2012-06-30 23:59:59.413',
'2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc
assert np.all(t.iso == np.array(['2012-06-30 23:59:60.000',
'2012-07-01 00:00:00.000']))
# with the bug, both yielded '2012-06-30 23:59:60.000'
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_two_sum(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
s, r = two_sum(i, f)
b = Decimal(s) + Decimal(r)
assert_almost_equal(a, b, atol=Decimal(tiny), rtol=Decimal(0))
# The bounds are here since we want to be sure the sum does not go to infinity,
# which does not have to be completely symmetric; e.g., this used to fail:
# @example(f1=-3.089785075544792e307, f2=1.7976931348623157e308)
# See https://github.com/astropy/astropy/issues/12955#issuecomment-1186293703
@given(floats(min_value=np.finfo(float).min/2, max_value=np.finfo(float).max/2),
floats(min_value=np.finfo(float).min/2, max_value=np.finfo(float).max/2))
def test_two_sum_symmetric(f1, f2):
np.testing.assert_equal(two_sum(f1, f2), two_sum(f2, f1))
@given(floats(allow_nan=False, allow_infinity=False),
floats(allow_nan=False, allow_infinity=False))
@example(f1=8.988465674311579e+307, f2=8.98846567431158e+307)
@example(f1=8.988465674311579e+307, f2=-8.98846567431158e+307)
@example(f1=-8.988465674311579e+307, f2=-8.98846567431158e+307)
def test_two_sum_size(f1, f2):
r1, r2 = two_sum(f1, f2)
assert (abs(r1) > abs(r2) / np.finfo(float).eps
or r1 == r2 == 0
or not np.isfinite(f1 + f2))
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_harmless(i, f):
with decimal.localcontext(decimal.Context(prec=40)):
a = Decimal(i) + Decimal(f)
i_d, f_d = day_frac(i, f)
a_d = Decimal(i_d) + Decimal(f_d)
assert_almost_equal(a, a_d, atol=Decimal(tiny), rtol=Decimal(0))
@given(integers(-2**52+2, 2**52-2), floats(-0.5, 0.5))
@example(i=65536, f=3.637978807091714e-12)
@example(i=1, f=0.49999999999999994)
def test_day_frac_exact(i, f):
assume(abs(f) < 0.5 or i % 2 == 0)
i_d, f_d = day_frac(i, f)
assert i == i_d
assert f == f_d
@given(integers(-2**52+2, 2**52-2), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_day_frac_idempotent(i, f):
i_d, f_d = day_frac(i, f)
assert (i_d, f_d) == day_frac(i_d, f_d)
@given(integers(-2**52+2, 2**52-int(erfa.DJM0)-3), floats(-1, 1))
@example(i=65536, f=3.637978807091714e-12)
def test_mjd_initialization_precise(i, f):
t = Time(val=i, val2=f, format="mjd", scale="tai")
jd1, jd2 = day_frac(i + erfa.DJM0, f)
jd1_t, jd2_t = day_frac(t.jd1, t.jd2)
assert (abs((jd1 - jd1_t) + (jd2 - jd2_t)) * u.day).to(u.ns) < 1 * u.ns
@given(jd_arrays(unreasonable_jd()))
def test_day_frac_always_less_than_half(jds):
jd1, jd2 = jds
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert np.all(t_jd1 % 1 == 0)
assert np.all(abs(t_jd2) <= 0.5)
assert np.all((abs(t_jd2) < 0.5) | (t_jd1 % 2 == 0))
@given(integers(-10**8, 10**8), sampled_from([-0.5, 0.5]))
def test_day_frac_round_to_even(jd1, jd2):
t_jd1, t_jd2 = day_frac(jd1, jd2)
assert (abs(t_jd2) == 0.5) and (t_jd1 % 2 == 0)
@given(scale=sampled_from([sc for sc in STANDARD_TIME_SCALES if sc != 'utc']),
jds=unreasonable_jd())
@example(scale="tai", jds=(0.0, 0.0))
@example(scale="tai", jds=(0.0, -31738.500000000346))
def test_resolution_never_decreases(scale, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale=scale)
with quiet_erfa():
assert t != t + dt_tiny
@given(reasonable_jd())
@example(jds=(2442777.5, 0.9999999999999999))
def test_resolution_never_decreases_utc(jds):
"""UTC is very unhappy with unreasonable times,
Unlike for the other timescales, in which addition is done
directly, here the time is transformed to TAI before addition, and
then back to UTC. Hence, some rounding errors can occur and only
a change of 2*dt_tiny is guaranteed to give a different time.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, format="jd", scale="utc")
with quiet_erfa():
assert t != t + 2*dt_tiny
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd())
@example(scale1='tcg', scale2='ut1', jds=(2445149.5, 0.47187700984387526))
@example(scale1='tai', scale2='tcb', jds=(2441316.5, 0.0))
@example(scale1='tai', scale2='tcb', jds=(0.0, 0.0))
def test_conversion_preserves_jd1_jd2_invariant(iers_b, scale1, scale2, jds):
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
try:
with quiet_erfa():
t2 = getattr(t, scale2)
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(False)
except ErfaError:
assume(False)
assert t2.jd1 % 1 == 0
assert abs(t2.jd2) <= 0.5
assert abs(t2.jd2) < 0.5 or t2.jd1 % 2 == 0
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd())
@example(scale1='tai', scale2='utc', jds=(0.0, 0.0))
@example(scale1='utc', scale2='ut1', jds=(2441316.5, 0.9999999999999991))
@example(scale1='ut1', scale2='tai', jds=(2441498.5, 0.9999999999999999))
def test_conversion_never_loses_precision(iers_b, scale1, scale2, jds):
"""Check that time ordering remains if we convert to another scale.
Here, since scale differences can involve multiplication, we allow
for losing one ULP, i.e., we test that two times that differ by
two ULP will keep the same order if changed to another scale.
"""
jd1, jd2 = jds
t = Time(jd1, jd2, scale=scale1, format="jd")
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
if (scale1 == 'utc' or scale2 == 'utc') and abs(jd1+jd2) < 1:
tiny = 100*u.us
else:
tiny = 2*dt_tiny
try:
with quiet_erfa():
t2 = t + tiny
t_scale2 = getattr(t, scale2)
t2_scale2 = getattr(t2, scale2)
assert t_scale2 < t2_scale2
except iers.IERSRangeError: # UT1 conversion needs IERS data
assume(scale1 != 'ut1' or 2440000 < jd1 + jd2 < 2458000)
assume(scale2 != 'ut1' or 2440000 < jd1 + jd2 < 2458000)
raise
except ErfaError:
# If the generated date is too early to compute a UTC julian date,
# and we're not converting between scales which are known to be safe,
# tell Hypothesis that this example is invalid and to try another.
# See https://docs.astropy.org/en/latest/time/index.html#time-scale
barycentric = {scale1, scale2}.issubset({'tcb', 'tdb'})
geocentric = {scale1, scale2}.issubset({'tai', 'tt', 'tcg'})
assume(jd1 + jd2 >= -31738.5 or geocentric or barycentric)
raise
except AssertionError:
# Before 1972, TAI-UTC changed smoothly but not always very
# consistently; this can cause trouble on day boundaries for UTC to
# UT1; it is not clear whether this will ever be resolved (and is
# unlikely ever to matter).
# Furthermore, exactly at leap-second boundaries, it is possible to
# get the wrong leap-second correction due to rounding errors.
# The latter is xfail'd for now, but should be fixed; see gh-13517.
if 'ut1' in (scale1, scale2):
if abs(t_scale2 - t2_scale2 - 1 * u.s) < 1*u.ms:
pytest.xfail()
assume(t.jd > 2441317.5 or t.jd2 < 0.4999999)
raise
@given(sampled_from(leap_second_deltas), floats(0.1, 0.9))
def test_leap_stretch_mjd(d, f):
mjd, delta = d
t0 = Time(mjd, format="mjd", scale="utc")
th = Time(mjd + f, format="mjd", scale="utc")
t1 = Time(mjd + 1, format="mjd", scale="utc")
assert_quantity_allclose((t1 - t0).to(u.s), (1 * u.day + delta * u.s))
assert_quantity_allclose((th - t0).to(u.s), f * (1 * u.day + delta * u.s))
assert_quantity_allclose((t1 - th).to(u.s), (1 - f) * (1 * u.day + delta * u.s))
@given(scale=sampled_from(STANDARD_TIME_SCALES),
jds=unreasonable_jd(),
delta=floats(-10000, 10000))
@example(scale='utc',
jds=(0.0, 2.2204460492503136e-13),
delta=6.661338147750941e-13)
@example(scale='utc',
jds=(2441682.5, 2.2204460492503136e-16),
delta=7.327471962526035e-12)
@example(scale='utc', jds=(0.0, 5.787592627370942e-13), delta=0.0)
@example(scale='utc', jds=(1.0, 0.25000000023283064), delta=-1.0)
@example(scale='utc', jds=(0.0, 0.0), delta=2*2.220446049250313e-16)
@example(scale='utc', jds=(2442778.5, 0.0), delta=-2.220446049250313e-16)
def test_jd_add_subtract_round_trip(scale, jds, delta):
jd1, jd2 = jds
minimum_for_change = np.finfo(float).eps
thresh = 2*dt_tiny
if scale == 'utc':
if jd1+jd2 < 1 or jd1+jd2+delta < 1:
# Near-zero UTC JDs degrade accuracy; not clear why,
# but also not so relevant, so ignoring.
minimum_for_change = 1e-9
thresh = minimum_for_change * u.day
else:
# UTC goes via TAI, so one can loose an extra bit.
minimum_for_change *= 2
t = Time(jd1, jd2, scale=scale, format="jd")
try:
with quiet_erfa():
t2 = t + delta*u.day
if abs(delta) >= minimum_for_change:
assert t2 != t
t3 = t2 - delta*u.day
assert_almost_equal(t3, t, atol=thresh, rtol=0)
except ErfaError:
assume(scale != 'utc' or 2440000 < jd1+jd2 < 2460000)
raise
@given(scale=sampled_from(TimeDelta.SCALES),
jds=reasonable_jd(),
delta=floats(-3*tiny, 3*tiny))
@example(scale='tai', jds=(0.0, 3.5762786865234384), delta=2.220446049250313e-16)
@example(scale='tai', jds=(2441316.5, 0.0), delta=6.938893903907228e-17)
@example(scale='tai', jds=(2441317.5, 0.0), delta=-6.938893903907228e-17)
@example(scale='tai', jds=(2440001.0, 0.49999999999999994), delta=5.551115123125783e-17)
def test_time_argminmaxsort(scale, jds, delta):
jd1, jd2 = jds
t = (Time(jd1, jd2, scale=scale, format="jd")
+ TimeDelta([0, delta], scale=scale, format='jd'))
imin = t.argmin()
imax = t.argmax()
isort = t.argsort()
# Be careful in constructing diff, for case that abs(jd2[1]-jd2[0]) ~ 1.
# and that is compensated by jd1[1]-jd1[0] (see example above).
diff, extra = two_sum(t.jd2[1], -t.jd2[0])
diff += t.jd1[1]-t.jd1[0]
diff += extra
if diff < 0: # item 1 smaller
assert delta < 0
assert imin == 1 and imax == 0 and np.all(isort == [1, 0])
elif diff == 0: # identical within precision
assert abs(delta) <= tiny
assert imin == 0 and imax == 0 and np.all(isort == [0, 1])
else:
assert delta > 0
assert imin == 0 and imax == 1 and np.all(isort == [0, 1])
@given(sampled_from(STANDARD_TIME_SCALES), unreasonable_jd(), unreasonable_jd())
@example(scale='utc',
jds_a=(2455000.0, 0.0),
jds_b=(2443144.5, 0.5000462962962965))
@example(scale='utc',
jds_a=(2459003.0, 0.267502885949074),
jds_b=(2454657.001045462, 0.49895453779026877))
def test_timedelta_full_precision(scale, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
assume(scale != 'utc'
or (2440000 < jd1_a+jd2_a < 2460000
and 2440000 < jd1_b+jd2_b < 2460000))
if scale == 'utc':
# UTC subtraction implies a scale change, so possible rounding errors.
tiny = 2 * dt_tiny
else:
tiny = dt_tiny
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
dt = t_b - t_a
assert dt != (t_b + tiny) - t_a
with quiet_erfa():
assert_almost_equal(t_b-dt/2, t_a+dt/2, atol=2*dt_tiny, rtol=0,
label="midpoint")
assert_almost_equal(t_b+dt, t_a+2*dt, atol=2*dt_tiny, rtol=0, label="up")
assert_almost_equal(t_b-2*dt, t_a-dt, atol=2*dt_tiny, rtol=0, label="down")
@given(scale=sampled_from(STANDARD_TIME_SCALES),
jds_a=unreasonable_jd(),
jds_b=unreasonable_jd(),
x=integers(1, 100),
y=integers(1, 100))
def test_timedelta_full_precision_arithmetic(scale, jds_a, jds_b, x, y):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
t_a = Time(jd1_a, jd2_a, scale=scale, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale, format="jd")
with quiet_erfa():
try:
dt = t_b - t_a
dt_x = x*dt/(x+y)
dt_y = y*dt/(x+y)
assert_almost_equal(dt_x + dt_y, dt, atol=(x+y)*dt_tiny, rtol=0)
except ErfaError:
assume(scale != 'utc'
or (2440000 < jd1_a+jd2_a < 2460000
and 2440000 < jd1_b+jd2_b < 2460000))
raise
@given(scale1=sampled_from(STANDARD_TIME_SCALES),
scale2=sampled_from(STANDARD_TIME_SCALES),
jds_a=reasonable_jd(),
jds_b=reasonable_jd())
def test_timedelta_conversion(scale1, scale2, jds_a, jds_b):
jd1_a, jd2_a = jds_a
jd1_b, jd2_b = jds_b
# not translation invariant so can't convert TimeDelta
assume('utc' not in [scale1, scale2])
# Conversions a problem but within UT1 it should work
assume(('ut1' not in [scale1, scale2]) or scale1 == scale2)
t_a = Time(jd1_a, jd2_a, scale=scale1, format="jd")
t_b = Time(jd1_b, jd2_b, scale=scale2, format="jd")
with quiet_erfa():
dt = t_b - t_a
t_a_2 = getattr(t_a, scale2)
t_b_2 = getattr(t_b, scale2)
dt_2 = getattr(dt, scale2)
assert_almost_equal(t_b_2 - t_a_2, dt_2, atol=dt_tiny, rtol=0,
label="converted")
# Implicit conversion
assert_almost_equal(t_b_2 - t_a_2, dt, atol=dt_tiny, rtol=0,
label="not converted")
# UTC disagrees when there are leap seconds
_utc_bad = [(pytest.param(s, marks=pytest.mark.xfail) if s == 'utc' else s)
for s in STANDARD_TIME_SCALES]
@given(datetimes(), datetimes()) # datetimes have microsecond resolution
@example(dt1=datetime(1235, 1, 1, 0, 0),
dt2=datetime(9950, 1, 1, 0, 0, 0, 890773))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_difference_agrees_with_timedelta(scale, dt1, dt2):
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert_almost_equal(t2-t1,
TimeDelta(dt2-dt1,
scale=None if scale == 'utc' else scale),
atol=2*u.us)
@given(days=integers(-3000*365, 3000*365),
microseconds=integers(0, 24*60*60*1000000))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_to_timedelta(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert (TimeDelta(td, scale=scale)
== TimeDelta(days, microseconds/(86400*1e6), scale=scale, format="jd"))
@given(days=integers(-3000*365, 3000*365),
microseconds=integers(0, 24*60*60*1000000))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_roundtrip(scale, days, microseconds):
td = timedelta(days=days, microseconds=microseconds)
assert td == TimeDelta(td, scale=scale).value
@given(days=integers(-3000*365, 3000*365), day_frac=floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@example(days=1048576, day_frac=1.157407503171726e-10)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_datetime_roundtrip(scale, days, day_frac):
td = TimeDelta(days, day_frac, format="jd", scale=scale)
td.format = "datetime"
assert_almost_equal(td, TimeDelta(td.value, scale=scale), atol=2*u.us)
@given(integers(-3000*365, 3000*365), floats(0, 1))
@example(days=262144, day_frac=2.314815006343452e-11)
@pytest.mark.parametrize("scale", _utc_bad)
def test_timedelta_from_parts(scale, days, day_frac):
kwargs = dict(format="jd", scale=scale)
whole = TimeDelta(days, day_frac, **kwargs)
from_parts = TimeDelta(days, **kwargs) + TimeDelta(day_frac, **kwargs)
assert whole == from_parts
def test_datetime_difference_agrees_with_timedelta_no_hypothesis():
scale = "tai"
dt1 = datetime(1235, 1, 1, 0, 0)
dt2 = datetime(9950, 1, 1, 0, 0, 0, 890773)
t1 = Time(dt1, scale=scale)
t2 = Time(dt2, scale=scale)
assert(abs((t2-t1) - TimeDelta(dt2-dt1, scale=scale)) < 1*u.us)
# datetimes have microsecond resolution
@given(datetimes(), timedeltas())
@example(dt=datetime(2000, 1, 1, 0, 0),
td=timedelta(days=-397683, microseconds=2))
@example(dt=datetime(2179, 1, 1, 0, 0),
td=timedelta(days=-795365, microseconds=53))
@example(dt=datetime(2000, 1, 1, 0, 0),
td=timedelta(days=1590729, microseconds=10))
@example(dt=datetime(4357, 1, 1, 0, 0),
td=timedelta(days=-1590729, microseconds=107770))
@example(dt=datetime(4357, 1, 1, 0, 0, 0, 29),
td=timedelta(days=-1590729, microseconds=746292))
@pytest.mark.parametrize("scale", _utc_bad)
def test_datetime_timedelta_sum(scale, dt, td):
try:
dt + td
except OverflowError:
assume(False)
dt_a = Time(dt, scale=scale)
td_a = TimeDelta(td, scale=None if scale == 'utc' else scale)
assert_almost_equal(dt_a+td_a, Time(dt+td, scale=scale), atol=2*u.us)
@given(jds=reasonable_jd(),
lat1=floats(-90, 90),
lat2=floats(-90, 90),
lon=floats(-180, 180))
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lat_independent(iers_b, kind, jds, lat1, lat2, lon):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat1))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat2))
try:
assert_almost_equal(t1.sidereal_time(kind),
t2.sidereal_time(kind),
atol=1*u.uas)
except iers.IERSRangeError:
assume(False)
@given(jds=reasonable_jd(),
lat=floats(-90, 90),
lon=floats(-180, 180),
lon_delta=floats(-360, 360))
@pytest.mark.parametrize("kind", ["apparent", "mean"])
def test_sidereal_lon_independent(iers_b, kind, jds, lat, lon, lon_delta):
jd1, jd2 = jds
t1 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon, lat))
t2 = Time(jd1, jd2, scale="ut1", format="jd", location=(lon+lon_delta, lat))
try:
diff = t1.sidereal_time(kind) + lon_delta*u.degree - t2.sidereal_time(kind)
except iers.IERSRangeError:
assume(False)
else:
expected_degrees = (diff.to_value(u.degree) + 180) % 360
assert_almost_equal(expected_degrees, 180, atol=1/(60*60*1000))
|
76bf47115989e59ded51c87331046f7e1fa703f4debae4568916f2a9782effaf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting to/from ITRS, TEME, GCRS, and CIRS.
These are distinct from the ICRS and AltAz functions because they are just
rotations without aberration corrections or offsets.
"""
import numpy as np
import erfa
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference
from astropy.coordinates.matrix_utilities import matrix_transpose
from .icrs import ICRS
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .equatorial import TEME, TETE
from .utils import get_polar_motion, get_jd12, EARTH_CENTER
# # first define helper functions
def teme_to_itrs_mat(time):
# Sidereal time, rotates from ITRS to mean equinox
# Use 1982 model for consistency with Vallado et al (2006)
# http://www.celestrak.com/publications/aiaa/2006-6753/AIAA-2006-6753.pdf
gst = erfa.gmst82(*get_jd12(time, 'ut1'))
# Polar Motion
# Do not include TIO locator s' because it is not used in Vallado 2006
xp, yp = get_polar_motion(time)
pmmat = erfa.pom00(xp, yp, 0)
# rotation matrix
# c2tcio expects a GCRS->CIRS matrix as it's first argument.
# Here, we just set that to an I-matrix, because we're already
# in TEME and the difference between TEME and CIRS is just the
# rotation by the sidereal time rather than the Earth Rotation Angle
return erfa.c2tcio(np.eye(3), gst, pmmat)
def gcrs_to_cirs_mat(time):
# celestial-to-intermediate matrix
return erfa.c2i06a(*get_jd12(time, 'tt'))
def cirs_to_itrs_mat(time):
# compute the polar motion p-matrix
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, 'tt'))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the Earth Rotation Angle for the input obstime
# era00 accepts UT1, so we convert if need be
era = erfa.era00(*get_jd12(time, 'ut1'))
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS
return erfa.c2tcio(np.eye(3), era, pmmat)
def tete_to_itrs_mat(time, rbpn=None):
"""Compute the polar motion p-matrix at the given time.
If the nutation-precession matrix is already known, it should be passed in,
as this is by far the most expensive calculation.
"""
xp, yp = get_polar_motion(time)
sp = erfa.sp00(*get_jd12(time, 'tt'))
pmmat = erfa.pom00(xp, yp, sp)
# now determine the greenwich apparent sidereal time for the input obstime
# we use the 2006A model for consistency with RBPN matrix use in GCRS <-> TETE
ujd1, ujd2 = get_jd12(time, 'ut1')
jd1, jd2 = get_jd12(time, 'tt')
if rbpn is None:
# erfa.gst06a calls pnm06a to calculate rbpn and then gst06. Use it in
# favour of getting rbpn with erfa.pnm06a to avoid a possibly large array.
gast = erfa.gst06a(ujd1, ujd2, jd1, jd2)
else:
gast = erfa.gst06(ujd1, ujd2, jd1, jd2, rbpn)
# c2tcio expects a GCRS->CIRS matrix, but we just set that to an I-matrix
# because we're already in CIRS equivalent frame
return erfa.c2tcio(np.eye(3), gast, pmmat)
def gcrs_precession_mat(equinox):
gamb, phib, psib, epsa = erfa.pfw06(*get_jd12(equinox, 'tt'))
return erfa.fw2m(gamb, phib, psib, epsa)
def get_location_gcrs(location, obstime, ref_to_itrs, gcrs_to_ref):
"""Create a GCRS frame at the location and obstime.
The reference frame z axis must point to the Celestial Intermediate Pole
(as is the case for CIRS and TETE).
This function is here to avoid location.get_gcrs(obstime), which would
recalculate matrices that are already available below (and return a GCRS
coordinate, rather than a frame with obsgeoloc and obsgeovel). Instead,
it uses the private method that allows passing in the matrices.
"""
obsgeoloc, obsgeovel = location._get_gcrs_posvel(obstime,
ref_to_itrs, gcrs_to_ref)
return GCRS(obstime=obstime, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
# now the actual transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, TETE)
def gcrs_to_tete(gcrs_coo, tete_frame):
# Classical NPB matrix, IAU 2006/2000A
# (same as in builtin_frames.utils.get_cip).
rbpn = erfa.pnm06a(*get_jd12(tete_frame.obstime, 'tt'))
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(tete_frame.location, tete_frame.obstime,
tete_to_itrs_mat(tete_frame.obstime, rbpn=rbpn),
rbpn)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to TETE.
# These rotations are defined at the geocenter, but can be applied to
# topocentric positions as well, assuming rigid Earth. See p57 of
# https://www.usno.navy.mil/USNO/astronomical-applications/publications/Circular_179.pdf
crepr = gcrs_coo2.cartesian.transform(rbpn)
return tete_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, GCRS)
def tete_to_gcrs(tete_coo, gcrs_frame):
# Compute the pn matrix, and then multiply by its transpose.
rbpn = erfa.pnm06a(*get_jd12(tete_coo.obstime, 'tt'))
newrepr = tete_coo.cartesian.transform(matrix_transpose(rbpn))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(tete_coo.location, tete_coo.obstime,
tete_to_itrs_mat(tete_coo.obstime, rbpn=rbpn),
rbpn)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TETE, ITRS)
def tete_to_itrs(tete_coo, itrs_frame):
# first get us to TETE at the target obstime, and location (no-op if same)
tete_coo2 = tete_coo.transform_to(TETE(obstime=itrs_frame.obstime,
location=itrs_frame.location))
# now get the pmatrix
pmat = tete_to_itrs_mat(itrs_frame.obstime)
crepr = tete_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TETE)
def itrs_to_tete(itrs_coo, tete_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = tete_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
tete = TETE(newrepr, obstime=itrs_coo.obstime, location=itrs_coo.location)
# now do any needed offsets (no-op if same obstime and location)
return tete.transform_to(tete_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, CIRS)
def gcrs_to_cirs(gcrs_coo, cirs_frame):
# first get the pmatrix
pmat = gcrs_to_cirs_mat(cirs_frame.obstime)
# Get GCRS coordinates for the target observer location and time.
loc_gcrs = get_location_gcrs(cirs_frame.location, cirs_frame.obstime,
cirs_to_itrs_mat(cirs_frame.obstime), pmat)
gcrs_coo2 = gcrs_coo.transform_to(loc_gcrs)
# Now we are relative to the correct observer, do the transform to CIRS.
crepr = gcrs_coo2.cartesian.transform(pmat)
return cirs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, GCRS)
def cirs_to_gcrs(cirs_coo, gcrs_frame):
# Compute the pmatrix, and then multiply by its transpose,
pmat = gcrs_to_cirs_mat(cirs_coo.obstime)
newrepr = cirs_coo.cartesian.transform(matrix_transpose(pmat))
# We now have a GCRS vector for the input location and obstime.
# Turn it into a GCRS frame instance.
loc_gcrs = get_location_gcrs(cirs_coo.location, cirs_coo.obstime,
cirs_to_itrs_mat(cirs_coo.obstime), pmat)
gcrs = loc_gcrs.realize_frame(newrepr)
# Finally, do any needed offsets (no-op if same obstime and location)
return gcrs.transform_to(gcrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ITRS)
def cirs_to_itrs(cirs_coo, itrs_frame):
# first get us to CIRS at the target obstime, and location (no-op if same)
cirs_coo2 = cirs_coo.transform_to(CIRS(obstime=itrs_frame.obstime,
location=itrs_frame.location))
# now get the pmatrix
pmat = cirs_to_itrs_mat(itrs_frame.obstime)
crepr = cirs_coo2.cartesian.transform(pmat)
return itrs_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, CIRS)
def itrs_to_cirs(itrs_coo, cirs_frame):
# compute the pmatrix, and then multiply by its transpose
pmat = cirs_to_itrs_mat(itrs_coo.obstime)
newrepr = itrs_coo.cartesian.transform(matrix_transpose(pmat))
cirs = CIRS(newrepr, obstime=itrs_coo.obstime, location=itrs_coo.location)
# now do any needed offsets (no-op if same obstime and location)
return cirs.transform_to(cirs_frame)
# TODO: implement GCRS<->CIRS if there's call for it. The thing that's awkward
# is that they both have obstimes, so an extra set of transformations are necessary.
# so unless there's a specific need for that, better to just have it go through the above
# two steps anyway
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, PrecessedGeocentric)
def gcrs_to_precessedgeo(from_coo, to_frame):
# first get us to GCRS with the right attributes (might be a no-op)
gcrs_coo = from_coo.transform_to(GCRS(obstime=to_frame.obstime,
obsgeoloc=to_frame.obsgeoloc,
obsgeovel=to_frame.obsgeovel))
# now precess to the requested equinox
pmat = gcrs_precession_mat(to_frame.equinox)
crepr = gcrs_coo.cartesian.transform(pmat)
return to_frame.realize_frame(crepr)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, PrecessedGeocentric, GCRS)
def precessedgeo_to_gcrs(from_coo, to_frame):
# first un-precess
pmat = gcrs_precession_mat(from_coo.equinox)
crepr = from_coo.cartesian.transform(matrix_transpose(pmat))
gcrs_coo = GCRS(crepr,
obstime=from_coo.obstime,
obsgeoloc=from_coo.obsgeoloc,
obsgeovel=from_coo.obsgeovel)
# then move to the GCRS that's actually desired
return gcrs_coo.transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, TEME, ITRS)
def teme_to_itrs(teme_coo, itrs_frame):
# use the pmatrix to transform to ITRS in the source obstime
pmat = teme_to_itrs_mat(teme_coo.obstime)
crepr = teme_coo.cartesian.transform(pmat)
itrs = ITRS(crepr, obstime=teme_coo.obstime)
# transform the ITRS coordinate to the target obstime
return itrs.transform_to(itrs_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ITRS, TEME)
def itrs_to_teme(itrs_coo, teme_frame):
# transform the ITRS coordinate to the target obstime
itrs_coo2 = itrs_coo.transform_to(ITRS(obstime=teme_frame.obstime))
# compute the pmatrix, and then multiply by its transpose
pmat = teme_to_itrs_mat(teme_frame.obstime)
newrepr = itrs_coo2.cartesian.transform(matrix_transpose(pmat))
return teme_frame.realize_frame(newrepr)
# Create loopback transformations
frame_transform_graph._add_merged_transform(ITRS, CIRS, ITRS)
frame_transform_graph._add_merged_transform(PrecessedGeocentric, GCRS, PrecessedGeocentric)
frame_transform_graph._add_merged_transform(TEME, ITRS, TEME)
frame_transform_graph._add_merged_transform(TETE, ICRS, TETE)
|
92df141d882084b5ff657d5f580f3036f76e6b86624271a567952867dec9511f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the transformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric, galactocentric_frame_defaults
from .supergalactic import Supergalactic
from .altaz import AltAz
from .hadec import HADec
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .equatorial import TEME, TETE
from .ecliptic import * # there are a lot of these so we don't list them all explicitly
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import icrs_observed_transforms
from . import itrs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
# Import this after importing other frames, since this requires various
# transformtions to set up the LSR frames
from .lsr import LSR, GalacticLSR, LSRK, LSRD
from astropy.coordinates.baseframe import frame_transform_graph
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'galactocentric_frame_defaults',
'Supergalactic', 'AltAz', 'HADec', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'TEME', 'TETE', 'PrecessedGeocentric', 'GeocentricMeanEcliptic',
'BarycentricMeanEcliptic', 'HeliocentricMeanEcliptic',
'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic',
'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR', 'LSRK', 'LSRD',
'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs',
'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic']
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for
item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the built in coordinate systems,
their aliases (useful for converting other coordinates to them using
attribute-style access) and the pre-defined transformations between
them. The user is free to override any of these transformations by
defining new transformations between these systems, but the
pre-defined transformations should be sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from astropy.coordinates.transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = f"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{cls.__name__}:</b>
<span style="font-size: 24px; color: {color};"><b>➝</b></span>
</p>
</li>
"""
html_list_items.append(block)
nl = '\n'
graph_legend = f"""
.. raw:: html
<ul>
{nl.join(html_list_items)}
</ul>
"""
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
# Here, we override the module docstring so that sphinx renders the transform
# graph without the developer documentation in the main docstring above.
__doc__ = _transform_graph_docs
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.