hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
2d7b3e3baa7b09aedd17d4dcaca3e7ad5191c78785475f535eeefd50064e8fd9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
from astropy.io import ascii
from .common import (assert_equal, assert_almost_equal)
def assert_equal_splitlines(arg1, arg2):
assert_equal(arg1.splitlines(), arg2.splitlines())
def test_read_normal():
"""Normal SimpleRST Table"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ['Col1', 'Col2'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_normal_names():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(Reader=ascii.RST,
names=('name1', 'name2'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name2'])
assert_almost_equal(dat[1][0], 2.4)
def test_read_normal_names_include():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= ========== ======
Col1 Col2 Col3
======= ========== ======
1.2 "hello" 3
2.4 's worlds 7
======= ========== ======
"""
reader = ascii.get_reader(Reader=ascii.RST,
names=('name1', 'name2', 'name3'),
include_names=('name1', 'name3'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name3'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], 3)
def test_read_normal_exclude():
"""Nice, typical SimpleRST table with col name excluded"""
table = """
======= ==========
Col1 Col2
======= ==========
1.2 "hello"
2.4 's worlds
======= ==========
"""
reader = ascii.get_reader(Reader=ascii.RST,
exclude_names=('Col1',))
dat = reader.read(table)
assert_equal(dat.colnames, ['Col2'])
assert_equal(dat[1][0], "'s worlds")
def test_read_unbounded_right_column():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat[0][2], "Hello")
assert_equal(dat[1][2], "Worlds")
def test_read_unbounded_right_column_header():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3Long
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames[-1], "Col3Long")
def test_read_right_indented_table():
"""We should be able to read right indented tables correctly"""
table = """
# comment (with blank line above)
==== ==== ====
Col1 Col2 Col3
==== ==== ====
3 3.4 foo
1 4.5 bar
==== ==== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1)
def test_trailing_spaces_in_row_definition():
""" Trailing spaces in the row definition column shouldn't matter"""
table = (
"\n"
"# comment (with blank line above)\n"
" ==== ==== ==== \n"
" Col1 Col2 Col3\n"
" ==== ==== ==== \n"
" 3 3.4 foo\n"
" 1 4.5 bar\n"
" ==== ==== ==== \n"
)
# make sure no one accidentally deletes the trailing whitespaces in the
# table.
assert len(table) == 151
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1)
table = """\
====== =========== ============ ===========
Col1 Col2 Col3 Col4
====== =========== ============ ===========
1.2 "hello" 1 a
2.4 's worlds 2 2
====== =========== ============ ===========
"""
dat = ascii.read(table, Reader=ascii.RST)
def test_write_normal():
"""Write a table as a normal SimpleRST Table"""
out = StringIO()
ascii.write(dat, out, Writer=ascii.RST)
assert_equal_splitlines(out.getvalue(), """\
==== ========= ==== ====
Col1 Col2 Col3 Col4
==== ========= ==== ====
1.2 "hello" 1 a
2.4 's worlds 2 2
==== ========= ==== ====
""")
|
4cd4bab9339f1ee3e400d69befa4048b271ad93876cedcbefbdb0ee955e9ab23 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import numpy as np
from astropy.io import ascii
from .common import assert_equal
def test_types_from_dat():
converters = {'a': [ascii.convert_numpy(float)],
'e': [ascii.convert_numpy(str)]}
dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'],
Reader=ascii.Basic,
converters=converters)
assert dat['a'].dtype.kind == 'f'
assert dat['b'].dtype.kind == 'i'
assert dat['c'].dtype.kind in ('S', 'U')
assert dat['d'].dtype.kind == 'f'
assert dat['e'].dtype.kind in ('S', 'U')
def test_rdb_write_types():
dat = ascii.read(['a b c d', '1 1.0 cat 2.1'],
Reader=ascii.Basic)
out = StringIO()
ascii.write(dat, out, Writer=ascii.Rdb)
outs = out.getvalue().splitlines()
assert_equal(outs[1], 'N\tN\tS\tN')
def test_ipac_read_types():
table = r"""\
| ra | dec | sai |-----v2---| sptype |
| real | float | l | real | char |
| unit | unit | unit | unit | ergs |
| null | null | null | null | -999 |
2.09708 2956 73765 2.06000 B8IVpMnHg
"""
reader = ascii.get_reader(Reader=ascii.Ipac)
reader.read(table)
types = [ascii.FloatType,
ascii.FloatType,
ascii.IntType,
ascii.FloatType,
ascii.StrType]
for (col, expected_type) in zip(reader.cols, types):
assert_equal(col.type, expected_type)
def test_col_dtype_in_custom_class():
"""Test code in BaseOutputter._convert_vals to handle Column.dtype
attribute. See discussion in #11895."""
dtypes = [np.float32, np.int8, np.int16]
class TestDtypeHeader(ascii.BasicHeader):
def get_cols(self, lines):
super().get_cols(lines)
for col, dtype in zip(self.cols, dtypes):
col.dtype = dtype
class TestDtype(ascii.Basic):
"""
Basic table Data Reader with data type alternating float32, int8
"""
header_class = TestDtypeHeader
txt = """
a b c
1 2 3
"""
reader = ascii.get_reader(TestDtype)
t = reader.read(txt)
for col, dtype in zip(t.itercols(), dtypes):
assert col.dtype.type is dtype
|
c86313a86cbfa012f9948e0ed9ca8411e9061ddf7f62c4383201653c6f3d2fa7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
# This file connects ASDF to the astropy.table.Table class
import functools
from astropy.io import registry as io_registry
from astropy.table import Table
def read_table(filename, data_key=None, find_table=None, **kwargs):
"""
Read a `~astropy.table.Table` object from an ASDF file
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will look for a Table object with the key of
``data`` in the top-level ASDF tree. The parameters ``data_key`` and
``find_key`` can be used to override the default behavior.
This function is registered as the Table reader for ASDF files with the
unified I/O interface.
Parameters
----------
filename : str or :class:`py.lath:local`
Name of the file to be read
data_key : str
Optional top-level key to use for finding the Table in the tree. If not
provided, uses ``data`` by default. Use of this parameter is not
compatible with ``find_table``.
find_table : function
Optional function to be used for locating the Table in the tree. The
function takes a single parameter, which is a dictionary representing
the top of the ASDF tree. The function must return a
`~astropy.table.Table` instance.
Returns
-------
table : `~astropy.table.Table`
`~astropy.table.Table` instance
"""
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and find_table:
raise ValueError("Options 'data_key' and 'find_table' are not compatible")
with asdf.open(filename, **kwargs) as af:
if find_table:
return find_table(af.tree)
else:
return af[data_key or 'data']
def write_table(table, filename, data_key=None, make_tree=None, **kwargs):
"""
Write a `~astropy.table.Table` object to an ASDF file.
This requires `asdf <https://pypi.org/project/asdf/>`_ to be installed.
By default, this function will write a Table object in the top-level ASDF
tree using the key of ``data``. The parameters ``data_key`` and
``make_tree`` can be used to override the default behavior.
This function is registered as the Table writer for ASDF files with the
unified I/O interface.
Parameters
----------
table : `~astropy.table.Table`
`~astropy.table.Table` instance to be written
filename : str or :class:`py.path:local`
Name of the new ASDF file to be created
data_key : str
Optional top-level key in the ASDF tree to use when writing the Table.
If not provided, uses ``data`` by default. Use of this parameter is not
compatible with ``make_tree``.
make_tree : function
Optional function to be used for creating the ASDF tree. The function
takes a single parameter, which is the `~astropy.table.Table` instance
to be written. The function must return a `dict` representing the ASDF
tree to be created.
"""
try:
import asdf
except ImportError:
raise Exception(
"The asdf module is required to read and write ASDF files")
if data_key and make_tree:
raise ValueError("Options 'data_key' and 'make_tree' are not compatible")
if make_tree:
tree = make_tree(table)
else:
tree = {data_key or 'data' : table}
with asdf.AsdfFile(tree) as af:
af.write_to(filename, **kwargs)
def asdf_identify(origin, filepath, fileobj, *args, **kwargs):
try:
import asdf
except ImportError:
return False
return filepath is not None and filepath.endswith('.asdf')
io_registry.register_reader('asdf', Table, read_table)
io_registry.register_writer('asdf', Table, write_table)
io_registry.register_identifier('asdf', Table, asdf_identify)
|
37a96f444942227ad3adea8f6b0d72e4023744c0ef69ad51d7410b2028b9b262 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.types import CustomType, ExtensionTypeMeta
__all__ = ['AstropyType', 'AstropyAsdfType']
# Names of AstropyType or AstropyAsdfType subclasses that are base classes
# and aren't used directly for serialization.
_TYPE_BASE_CLASS_NAMES = {'PolynomialTypeBase'}
_astropy_types = set()
_astropy_asdf_types = set()
class AstropyTypeMeta(ExtensionTypeMeta):
"""
Keeps track of `AstropyType` subclasses that are created so that they can
be stored automatically by astropy extensions for ASDF.
"""
def __new__(mcls, name, bases, attrs):
cls = super().__new__(mcls, name, bases, attrs)
# Classes using this metaclass are automatically added to the list of
# astropy extensions
if cls.__name__ not in _TYPE_BASE_CLASS_NAMES:
if cls.organization == 'astropy.org' and cls.standard == 'astropy':
_astropy_types.add(cls)
elif cls.organization == 'stsci.edu' and cls.standard == 'asdf':
_astropy_asdf_types.add(cls)
return cls
class AstropyType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas and tags that are defined by
Astropy.
IMPORTANT: This parent class should **not** be used for types that have
schemas that are defined by the ASDF standard.
"""
organization = 'astropy.org'
standard = 'astropy'
class AstropyAsdfType(CustomType, metaclass=AstropyTypeMeta):
"""
This class represents types that have schemas that are defined in the ASDF
standard, but have tags that are implemented within astropy.
IMPORTANT: This parent class should **not** be used for types that also
have schemas that are defined by astropy.
"""
organization = 'stsci.edu'
standard = 'asdf'
|
ff2092ebc2799d2aca7c136e10fd4813aaf9074750fb8a9ecc24a49d8ca7a489 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
from asdf.extension import AsdfExtension, BuiltinExtension
from asdf.util import filepath_to_url
# Make sure that all tag implementations are imported by the time we create
# the extension class so that _astropy_asdf_types is populated correctly. We
# could do this using __init__ files, except it causes pytest import errors in
# the case that asdf is not installed.
from .tags.coordinates.angle import * # noqa
from .tags.coordinates.frames import * # noqa
from .tags.coordinates.earthlocation import * # noqa
from .tags.coordinates.skycoord import * # noqa
from .tags.coordinates.representation import * # noqa
from .tags.coordinates.spectralcoord import * # noqa
from .tags.fits.fits import * # noqa
from .tags.table.table import * # noqa
from .tags.time.time import * # noqa
from .tags.time.timedelta import * # noqa
from .tags.transform.basic import * # noqa
from .tags.transform.compound import * # noqa
from .tags.transform.functional_models import * # noqa
from .tags.transform.physical_models import * # noqa
from .tags.transform.math import * # noqa
from .tags.transform.polynomial import * # noqa
from .tags.transform.powerlaws import * # noqa
from .tags.transform.projections import * # noqa
from .tags.transform.spline import * # noqa
from .tags.transform.tabular import * # noqa
from .tags.unit.quantity import * # noqa
from .tags.unit.unit import * # noqa
from .tags.unit.equivalency import * # noqa
from .types import _astropy_types, _astropy_asdf_types
__all__ = ['AstropyExtension', 'AstropyAsdfExtension']
ASTROPY_SCHEMA_URI_BASE = 'http://astropy.org/schemas/'
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'data', 'schemas'))
ASTROPY_URL_MAPPING = [
(ASTROPY_SCHEMA_URI_BASE,
filepath_to_url(
os.path.join(SCHEMA_PATH, 'astropy.org')) +
'/{url_suffix}.yaml')]
# This extension is used to register custom types that have both tags and
# schemas defined by Astropy.
class AstropyExtension(AsdfExtension):
@property
def types(self):
return _astropy_types
@property
def tag_mapping(self):
return [('tag:astropy.org:astropy',
ASTROPY_SCHEMA_URI_BASE + 'astropy{tag_suffix}')]
@property
def url_mapping(self):
return ASTROPY_URL_MAPPING
# This extension is used to register custom tag types that have schemas defined
# by ASDF, but have tag implementations defined in astropy.
class AstropyAsdfExtension(BuiltinExtension):
@property
def types(self):
return _astropy_asdf_types
|
fa2b719f9d7867f18b45043393bedc693efa18b70966cad12ac42decaa066069 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects the readers/writers to the astropy.table.Table class
import functools
import os.path
from astropy.utils.misc import NOT_OVERWRITING_MSG
from astropy.table import Table
import astropy.io.registry as io_registry
__all__ = ['PANDAS_FMTS']
# Astropy users normally expect to not have an index, so default to turn
# off writing the index. This structure allows for astropy-specific
# customization of all options.
PANDAS_FMTS = {'csv': {'read': {},
'write': {'index': False}},
'fwf': {'read': {}}, # No writer
'html': {'read': {},
'write': {'index': False}},
'json': {'read': {},
'write': {}}}
PANDAS_PREFIX = 'pandas.'
# Imports for reading HTML
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def import_html_libs():
"""Try importing dependencies for reading HTML.
This is copied from pandas.io.html
"""
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import (
HAS_BS4 as _HAS_BS4,
HAS_LXML as _HAS_LXML,
HAS_HTML5LIB as _HAS_HTML5LIB
)
_IMPORTS = True
def _pandas_read(fmt, filespec, **kwargs):
"""Provide io Table connector to read table using pandas.
"""
try:
import pandas
except ImportError:
raise ImportError('pandas must be installed to use pandas table reader')
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
read_func = getattr(pandas, 'read_' + pandas_fmt)
# Get defaults and then override with user-supplied values
read_kwargs = PANDAS_FMTS[pandas_fmt]['read'].copy()
read_kwargs.update(kwargs)
# Special case: pandas defaults to HTML lxml for reading, but does not attempt
# to fall back to bs4 + html5lib. So do that now for convenience if user has
# not specifically selected a flavor. If things go wrong the pandas exception
# with instruction to install a library will come up.
if pandas_fmt == 'html' and 'flavor' not in kwargs:
import_html_libs()
if (not _HAS_LXML and _HAS_HTML5LIB and _HAS_BS4):
read_kwargs['flavor'] = 'bs4'
df = read_func(filespec, **read_kwargs)
# Special case for HTML
if pandas_fmt == 'html':
df = df[0]
return Table.from_pandas(df)
def _pandas_write(fmt, tbl, filespec, overwrite=False, **kwargs):
"""Provide io Table connector to write table using pandas.
"""
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
# Get defaults and then override with user-supplied values
write_kwargs = PANDAS_FMTS[pandas_fmt]['write'].copy()
write_kwargs.update(kwargs)
df = tbl.to_pandas()
write_method = getattr(df, 'to_' + pandas_fmt)
if not overwrite:
try: # filespec is not always a path-like
exists = os.path.exists(filespec)
except TypeError: # skip invalid arguments
pass
else:
if exists: # only error if file already exists
raise OSError(NOT_OVERWRITING_MSG.format(filespec))
return write_method(filespec, **write_kwargs)
for pandas_fmt, defaults in PANDAS_FMTS.items():
fmt = PANDAS_PREFIX + pandas_fmt # Full format specifier
if 'read' in defaults:
func = functools.partial(_pandas_read, fmt)
io_registry.register_reader(fmt, Table, func)
if 'write' in defaults:
func = functools.partial(_pandas_write, fmt)
io_registry.register_writer(fmt, Table, func)
|
42af53f5cd9ad4bda95b5f973c752bbeac0d0965c6f5e458cf9afce1e94da964 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import pytest
import numpy as np
from astropy.io import ascii
from astropy.table import Table, QTable
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io.misc.pandas import connect
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
# Check dependencies
pandas = pytest.importorskip("pandas")
connect.import_html_libs()
HAS_HTML_DEPS = connect._HAS_LXML or (connect._HAS_BS4 and connect._HAS_HTML5LIB)
WRITE_FMTS = [fmt for fmt in connect.PANDAS_FMTS
if 'write' in connect.PANDAS_FMTS[fmt]]
@pytest.mark.parametrize('fmt', WRITE_FMTS)
def test_read_write_format(fmt):
"""
Test round-trip through pandas write/read for supported formats.
:param fmt: format name, e.g. csv, html, json
:return:
"""
# Skip the reading tests
if fmt == 'html' and not HAS_HTML_DEPS:
pytest.skip('Missing lxml or bs4 + html5lib for HTML read/write test')
pandas_fmt = 'pandas.' + fmt
# Explicitly provide dtype to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table([[1, 2, 3], [1.0, 2.5, 5.0], ['a', 'b', 'c']],
dtype=(np.int64, np.float64, str))
buf = StringIO()
t.write(buf, format=pandas_fmt)
buf.seek(0)
t2 = Table.read(buf, format=pandas_fmt)
assert t.colnames == t2.colnames
assert np.all(t == t2)
@pytest.mark.parametrize('fmt', WRITE_FMTS)
def test_write_overwrite(tmpdir, fmt):
"""Test overwriting."""
tmpfile = tmpdir.join('test.' + fmt).strpath
pandas_fmt = 'pandas.' + fmt
# Explicitly provide dtype to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table([[1, 2, 3], [1.0, 2.5, 5.0], ['a', 'b', 'c']],
dtype=(np.int64, np.float64, str))
# works when file DNE
t.write(tmpfile, format=pandas_fmt)
# fails when cannot overwrite
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(tmpfile, format=pandas_fmt, overwrite=False)
# passes when it can
t.write(tmpfile, format=pandas_fmt, overwrite=True)
def test_read_fixed_width_format():
"""Test reading with pandas read_fwf()
"""
tbl = """\
a b c
1 2.0 a
2 3.0 b"""
buf = StringIO()
buf.write(tbl)
# Explicitly provide converters to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table.read(tbl, format='ascii', guess=False,
converters={'a': [ascii.convert_numpy(np.int64)]})
buf.seek(0)
t2 = Table.read(buf, format='pandas.fwf')
assert t.colnames == t2.colnames
assert np.all(t == t2)
def test_write_with_mixins():
"""Writing a table with mixins just drops them via to_pandas()
This also tests passing a kwarg to pandas read and write.
"""
sc = SkyCoord([1, 2], [3, 4], unit='deg')
q = [5, 6] * u.m
qt = QTable([[1, 2], q, sc], names=['i', 'q', 'sc'])
buf = StringIO()
qt.write(buf, format='pandas.csv', sep=' ')
exp = ['i q sc.ra sc.dec',
'1 5.0 1.0 3.0',
'2 6.0 2.0 4.0']
assert buf.getvalue().splitlines() == exp
# Read it back
buf.seek(0)
qt2 = Table.read(buf, format='pandas.csv', sep=' ')
# Explicitly provide converters to avoid casting 'i' to int32.
# See https://github.com/astropy/astropy/issues/8682
exp_t = ascii.read(exp, converters={'i': [ascii.convert_numpy(np.int64)]})
assert qt2.colnames == exp_t.colnames
assert np.all(qt2 == exp_t)
|
35decd8cc4706d3533f732290d0362bb546ea477029d816a38feb3e09a95dd38 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table import Table, QTable, NdarrayMixin, Column
from astropy.table.table_helpers import simple_table
from astropy import units as u
from astropy.coordinates import (SkyCoord, Latitude, Longitude, Angle, EarthLocation,
SphericalRepresentation, CartesianRepresentation,
SphericalCosLatDifferential)
from astropy.io.misc.parquet import parquet_identify, get_pyarrow
from astropy.time import Time, TimeDelta
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.utils.compat.optional_deps import HAS_PANDAS # noqa 401
# Skip all tests in this file if we cannot import pyarrow
pyarrow = pytest.importorskip("pyarrow")
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3', 'U3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
elif dtype == 'U3':
return ['abc', 'def', 'ghi']
else:
return [1, 2, 3]
def test_read_write_simple(tmpdir):
"""Test writing/reading a simple parquet file."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2['a'] == [1, 2, 3])
def test_read_write_existing(tmpdir):
"""Test writing an existing file without overwriting."""
test_file = tmpdir.join('test.parquet')
with open(test_file, 'w') as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file)
def test_read_write_existing_overwrite(tmpdir):
"""Test overwriting an existing file."""
test_file = tmpdir.join('test.parquet')
with open(test_file, 'w') as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert np.all(t2['a'] == [1, 2, 3])
def test_read_fileobj(tmpdir):
"""Test reading a file object."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file)
import io
with io.FileIO(test_file, mode='r') as input_file:
t2 = Table.read(input_file)
assert np.all(t2['a'] == [1, 2, 3])
def test_read_pathlikeobj(tmpdir):
"""Test reading a path-like object."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file)
import pathlib
p = pathlib.Path(test_file)
t2 = Table.read(p)
assert np.all(t2['a'] == [1, 2, 3])
def test_read_wrong_fileobj():
"""Test reading an incorrect fileobject type."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
with pytest.raises(TypeError,
match="pyarrow can only open path-like or file-like objects."):
Table.read(f, format='parquet')
def test_identify_wrong_fileobj():
"""Test identifying an incorrect fileobj."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
assert not parquet_identify('test', 'test', f)
def test_identify_file_wrong_extension():
"""Test identifying an incorrect extension."""
assert not parquet_identify('test', 'test.notparquet', None)
def test_identify_file_correct_extension():
"""Test identifying an incorrect extension."""
assert parquet_identify('test', 'test.parquet', None)
assert parquet_identify('test', 'test.parq', None)
def test_identify_file_noobject_nopath():
"""Test running identify with no object or path."""
assert not parquet_identify('test', None, None)
def test_write_wrong_type():
"""Test writing to a filename of the wrong type."""
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError, match='should be a string'):
t1.write(1212, format='parquet')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
"""Test that round-tripping a single column preserves datatypes."""
test_file = tmpdir.join('test.parquet')
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
def test_preserve_all_dtypes(tmpdir):
"""Test that round-tripping preserves a table with all the datatypes."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
def test_preserve_meta(tmpdir):
"""Test that writing/reading preserves metadata."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file)
t2 = Table.read(test_file)
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
def test_preserve_serialized(tmpdir):
"""Test that writing/reading preserves unit/format/description."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work"""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
def test_fail_meta_serialize(tmpdir):
"""Test that we cannot preserve objects in metadata."""
test_file = tmpdir.join('test.parquet')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
"""Convenient routine to check objects and attributes match."""
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
# Testing Parquet table read/write with mixins. This is mostly
# copied from HDF5/FITS mixin testing, and it might be good to unify it.
# Analogous tests also exist for ECSV.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sr = SphericalRepresentation(
[0, 1]*u.deg, [2, 3]*u.deg, 1*u.kpc)
cr = CartesianRepresentation(
[0, 1]*u.pc, [4, 5]*u.pc, [8, 6]*u.pc)
sd = SphericalCosLatDifferential(
[0, 1]*u.mas/u.yr, [0, 1]*u.mas/u.yr, 10*u.km/u.s)
srd = SphericalRepresentation(sr, differentials=sd)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scd = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5'])
scdc = scd.copy()
scdc.representation_type = 'cartesian'
scpm = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr)
scpmrv = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
pm_ra_cosdec=[7, 8]*u.mas/u.yr, pm_dec=[9, 10]*u.mas/u.yr,
radial_velocity=[11, 12]*u.km/u.s)
scrv = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,pc',
radial_velocity=[11, 12]*u.km/u.s)
tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)
# NOTE: in the test below the name of the column "x" for the Quantity is
# important since it tests the fix for #10215 (namespace clash, where "x"
# clashes with "el2.x").
mixin_cols = {
'tm': tm,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scd': scd,
'scdc': scdc,
'scpm': scpm,
'scpmrv': scpmrv,
'scrv': scrv,
'x': [1, 2] * u.m,
'qdb': [10, 20] * u.dB(u.mW),
'qdex': [4.5, 5.5] * u.dex(u.cm/u.s**2),
'qmag': [21, 22] * u.ABmag,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': Angle([1, 2] * u.deg),
'el2': el2,
'sr': sr,
'cr': cr,
'sd': sd,
'srd': srd,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation_type', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation_type', 'frame.name'],
'scdc': ['x', 'y', 'z', 'representation_type', 'frame.name'],
'scpm': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'representation_type', 'frame.name'],
'scpmrv': ['ra', 'dec', 'distance', 'pm_ra_cosdec', 'pm_dec',
'radial_velocity', 'representation_type', 'frame.name'],
'scrv': ['ra', 'dec', 'distance', 'radial_velocity', 'representation_type',
'frame.name'],
'x': ['value', 'unit'],
'qdb': ['value', 'unit'],
'qdex': ['value', 'unit'],
'qmag': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el2': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
'sr': ['lon', 'lat', 'distance'],
'cr': ['x', 'y', 'z'],
'sd': ['d_lon_coslat', 'd_lat', 'd_distance'],
'srd': ['lon', 'lat', 'distance', 'differentials.s.d_lon_coslat',
'differentials.s.d_lat', 'differentials.s.d_distance'],
}
def test_parquet_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='parquet')
t2 = Table.read(filename, format='parquet')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format='parquet')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = tmpdir.join('test_simple.parquet')
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format='parquet')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through Parquet using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmpdir.join('test.parquet')
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='parquet')
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_read_one_name(table_cls, tmpdir):
"""Test write all cols at once, and read one at a time."""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="parquet")
for name in names:
t2 = table_cls.read(filename, format='parquet', include_names=[name])
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t2.colnames == [name]
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_read_exclude_names(table_cls, tmpdir):
"""Test write all cols at once, and read all but one at a time."""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format='parquet', exclude_names=names[0: 5])
assert t.colnames[5:] == t2.colnames
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_read_no_columns(table_cls, tmpdir):
"""Test write all cols at once, and try to read no valid columns."""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="parquet")
with pytest.raises(ValueError, match='No include_names specified'):
t2 = table_cls.read(filename, format='parquet',
include_names=['not_a_column', 'also_not_a_column'])
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_parquet_mixins_read_schema(table_cls, tmpdir):
"""Test write all cols at once, and read the schema."""
filename = tmpdir.join('test_simple.parquet')
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", schema_only=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
assert len(t2) == 0
def test_parquet_filter(tmpdir):
"""Test reading a parquet file with a filter."""
filename = tmpdir.join('test_simple.parquet')
t1 = Table()
t1['a'] = Column(data=np.arange(100), dtype=np.int32)
t1['b'] = Column(data=np.arange(100, 0, -1), dtype=np.float64)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, filters=[('a', '<', 50)])
assert t2['a'].max() < 50
t2 = Table.read(filename, filters=[('b', '<', 50)])
assert t2['b'].max() < 50
def test_parquet_read_generic(tmpdir):
"""Test reading a generic parquet file."""
filename = tmpdir.join('test_generic.parq')
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
# Write the table generically via pyarrow.parquet
names = t1.dtype.names
type_list = [(name, pyarrow.from_numpy_dtype(t1[name].dtype.type))
for name in names]
schema = pyarrow.schema(type_list)
_, parquet, writer_version = get_pyarrow()
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(filename, schema, version=writer_version) as writer:
arrays = [pyarrow.array(t1[name].data)
for name in names]
writer.write_table(pyarrow.Table.from_arrays(arrays, schema=schema))
with pytest.warns(AstropyUserWarning, match='No table::len'):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_PANDAS')
def test_parquet_read_pandas(tmpdir):
"""Test reading a pandas parquet file."""
filename = tmpdir.join('test_pandas.parq')
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
df = t1.to_pandas()
# We use version='2.0' for full support of datatypes including uint32.
_, _, writer_version = get_pyarrow()
df.to_parquet(filename, version=writer_version)
with pytest.warns(AstropyUserWarning, match='No table::len'):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
|
7fe435c1c76b6a0c141f34355f1c2417498a0334e3f3b921e8964cdb48bfd484 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to YAML serialization.
"""
from io import StringIO
import pytest
import numpy as np
from astropy.coordinates import (SkyCoord, EarthLocation, Angle, Longitude, Latitude,
SphericalRepresentation, UnitSphericalRepresentation,
CartesianRepresentation, SphericalCosLatDifferential,
SphericalDifferential, CartesianDifferential)
from astropy import units as u
from astropy.time import Time
from astropy.table import QTable, SerializedColumn
from astropy.coordinates.tests.test_representation import representation_equal
from astropy.io.misc.yaml import load, load_all, dump # noqa
@pytest.mark.parametrize('c', [True, np.uint8(8), np.int16(4),
np.int32(1), np.int64(3), np.int64(2**63 - 1),
2.0, np.float64(),
3+4j, np.complex_(3 + 4j),
np.complex64(3 + 4j),
np.complex128(1. - 2**-52 + 1j * (1. - 2**-52))])
def test_numpy_types(c):
cy = load(dump(c))
assert c == cy
@pytest.mark.parametrize('c', [u.m, u.m / u.s, u.hPa, u.dimensionless_unscaled,
u.Unit('m, (cm, um)')])
def test_unit(c):
cy = load(dump(c))
if isinstance(c, (u.CompositeUnit, u.StructuredUnit)):
assert c == cy
else:
assert c is cy
@pytest.mark.parametrize('c', [u.Unit('bakers_dozen', 13*u.one),
u.def_unit('magic')])
def test_custom_unit(c):
s = dump(c)
with pytest.warns(u.UnitsWarning, match=f"'{c!s}' did not parse") as w:
cy = load(s)
assert len(w) == 1
assert isinstance(cy, u.UnrecognizedUnit)
assert str(cy) == str(c)
with u.add_enabled_units(c):
cy2 = load(s)
assert cy2 is c
@pytest.mark.parametrize('c', [
Angle('1 2 3', unit='deg'),
Longitude('1 2 3', unit='deg'),
Latitude('1 2 3', unit='deg'),
[[1], [3]] * u.m,
np.array([[1, 2], [3, 4]], order='F'),
np.array([[1, 2], [3, 4]], order='C'),
np.array([1, 2, 3, 4])[::2],
np.array([(1., 2), (3., 4)], dtype='f8,i4'), # array with structured dtype.
np.array((1., 2), dtype='f8,i4'), # array scalar with structured dtype.
np.array((1., 2), dtype='f8,i4')[()], # numpy void.
np.array((1., 2.), dtype='f8,f8') * u.s, # Quantity structured scalar.
[((1., 2., 3.), (4., 5., 6.)), # Quantity with structured unit.
((11., 12., 13.), (14., 15., 16.))] * u.Unit('m, m/s'),
np.array([((1., 2., 3.), (4., 5., 6.)),
((11., 12., 13.), (14., 15., 16.))],
dtype=[('p', '3f8'), ('v', '3f8')]) * u.Unit('m, m/s')
])
def test_ndarray_subclasses(c):
cy = load(dump(c))
assert np.all(c == cy)
assert c.shape == cy.shape
assert c.dtype == cy.dtype
assert type(c) is type(cy)
cc = 'C_CONTIGUOUS'
fc = 'F_CONTIGUOUS'
if c.flags[cc] or c.flags[fc]:
assert c.flags[cc] == cy.flags[cc]
assert c.flags[fc] == cy.flags[fc]
else:
# Original was not contiguous but round-trip version
# should be c-contig.
assert cy.flags[cc]
if hasattr(c, 'unit'):
assert c.unit == cy.unit
def compare_coord(c, cy):
assert c.shape == cy.shape
assert c.frame.name == cy.frame.name
assert list(c.get_frame_attr_names()) == list(cy.get_frame_attr_names())
for attr in c.get_frame_attr_names():
assert getattr(c, attr) == getattr(cy, attr)
assert (list(c.representation_component_names) ==
list(cy.representation_component_names))
for name in c.representation_component_names:
assert np.all(getattr(c, attr) == getattr(cy, attr))
@pytest.mark.parametrize('frame', ['fk4', 'altaz'])
def test_skycoord(frame):
c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]],
unit='deg', frame=frame,
obstime=Time('2016-01-02'),
location=EarthLocation(1000, 2000, 3000, unit=u.km))
cy = load(dump(c))
compare_coord(c, cy)
@pytest.mark.parametrize('rep', [
CartesianRepresentation(1*u.m, 2.*u.m, 3.*u.m),
SphericalRepresentation([[1, 2], [3, 4]]*u.deg,
[[5, 6], [7, 8]]*u.deg,
10*u.pc),
UnitSphericalRepresentation(0*u.deg, 10*u.deg),
SphericalCosLatDifferential([[1.], [2.]]*u.mas/u.yr,
[4., 5.]*u.mas/u.yr,
[[[10]], [[20]]]*u.km/u.s),
CartesianDifferential([10, 20, 30]*u.km/u.s),
CartesianRepresentation(
[1, 2, 3]*u.m,
differentials=CartesianDifferential([10, 20, 30]*u.km/u.s)),
SphericalRepresentation(
[[1, 2], [3, 4]]*u.deg, [[5, 6], [7, 8]]*u.deg, 10*u.pc,
differentials={
's': SphericalDifferential([[0., 1.], [2., 3.]]*u.mas/u.yr,
[[4., 5.], [6., 7.]]*u.mas/u.yr,
10*u.km/u.s)})])
def test_representations(rep):
rrep = load(dump(rep))
assert np.all(representation_equal(rrep, rep))
def _get_time():
t = Time([[1], [2]], format='cxcsec',
location=EarthLocation(1000, 2000, 3000, unit=u.km))
t.format = 'iso'
t.precision = 5
t.delta_ut1_utc = np.array([[3.0], [4.0]])
t.delta_tdb_tt = np.array([[5.0], [6.0]])
t.out_subfmt = 'date_hm'
return t
def compare_time(t, ty):
assert type(t) is type(ty)
assert np.all(t == ty)
for attr in ('shape', 'jd1', 'jd2', 'format', 'scale', 'precision', 'in_subfmt',
'out_subfmt', 'location', 'delta_ut1_utc', 'delta_tdb_tt'):
assert np.all(getattr(t, attr) == getattr(ty, attr))
def test_time():
t = _get_time()
ty = load(dump(t))
compare_time(t, ty)
def test_timedelta():
t = _get_time()
dt = t - t + 0.1234556 * u.s
dty = load(dump(dt))
assert type(dt) is type(dty)
for attr in ('shape', 'jd1', 'jd2', 'format', 'scale'):
assert np.all(getattr(dt, attr) == getattr(dty, attr))
def test_serialized_column():
sc = SerializedColumn({'name': 'hello', 'other': 1, 'other2': 2.0})
scy = load(dump(sc))
assert sc == scy
def test_load_all():
t = _get_time()
unit = u.m / u.s
c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]],
unit='deg', frame='fk4',
obstime=Time('2016-01-02'),
location=EarthLocation(1000, 2000, 3000, unit=u.km))
# Make a multi-document stream
out = ('---\n' + dump(t)
+ '---\n' + dump(unit)
+ '---\n' + dump(c))
ty, unity, cy = list(load_all(out))
compare_time(t, ty)
compare_coord(c, cy)
assert unity == unit
def test_ecsv_astropy_objects_in_meta():
"""
Test that astropy core objects in ``meta`` are serialized.
"""
t = QTable([[1, 2] * u.m, [4, 5]], names=['a', 'b'])
tm = _get_time()
c = SkyCoord([[1, 2], [3, 4]], [[5, 6], [7, 8]],
unit='deg', frame='fk4',
obstime=Time('2016-01-02'),
location=EarthLocation(1000, 2000, 3000, unit=u.km))
unit = u.m / u.s
t.meta = {'tm': tm, 'c': c, 'unit': unit}
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = QTable.read(out.getvalue(), format='ascii.ecsv')
compare_time(tm, t2.meta['tm'])
compare_coord(c, t2.meta['c'])
assert t2.meta['unit'] == unit
|
b7a46e2a8664b7554144bd55edf15f8f939f67490d6f4a9ce13010616221ee27 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy.table import Table, QTable, Column
from astropy.table.table_helpers import simple_table
from astropy.units import allclose as quantity_allclose
from astropy.units.quantity import QuantityInfo
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
from astropy.io.misc.hdf5 import meta_path
from astropy.utils.compat.optional_deps import HAS_H5PY # noqa
if HAS_H5PY:
import h5py
from astropy.io.tests.mixin_columns import mixin_cols, compare_attrs, serialized_names
# HDF5 does not support object dtype (since it stores binary representations).
unsupported_cols = {name: col for name, col in mixin_cols.items()
if (isinstance(col, np.ndarray) and col.dtype.kind == 'O')}
mixin_cols = {name: col for name, col in mixin_cols.items()
if name not in unsupported_cols}
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
else:
return [1, 2, 3]
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.warns(UserWarning, match="table path was not set via the path= argument"):
t1.write(test_file)
t1 = Table.read(test_file, path='__astropy_table__')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath_nonempty(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='bubu')
with pytest.raises(ValueError) as exc:
t1.write(test_file, append=True)
assert 'table path should always be set via the path=' in exc.value.args[0]
@pytest.mark.skipif('not HAS_H5PY')
def test_read_notable_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(ValueError, match='no table found in HDF5 group /'):
Table.read(test_file, path='/', format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath_multi_tables(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path="the_table")
t1.write(test_file, path="the_table_but_different", append=True,
overwrite=True)
with pytest.warns(AstropyUserWarning,
match=r"path= was not specified but multiple tables"):
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path='test/')
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/')
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test').create_group('path')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
with pytest.raises(OSError) as exc:
Table.read(f, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table', append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_memory(tmpdir):
with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(output_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file, path='the_table')
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table', overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table_1', append=True)
t1.write(test_file, path='the_table_2', append=True)
t2 = Table.read(test_file, path='the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_groups(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test_1')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='test_1/the_table_1', append=True)
t1.write(test_file, path='test_2/the_table_2', append=True)
t2 = Table.read(test_file, path='test_1/the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='test_2/the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='table1')
t1.write(test_file, path='table2', append=True)
t1v2 = Table()
t1v2.add_column(Column(name='a', data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path='table1', append=True)
assert exc.value.args[0] == 'Table table1 already exists'
t1v2.write(test_file, path='table1', append=True, overwrite=True)
t2 = Table.read(test_file, path='table1')
assert np.all(t2['a'] == [4, 5, 6])
t3 = Table.read(test_file, path='table2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_group_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file['path/to'], path='data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError, match='h5py can only open regular files'):
Table.read(f, format='hdf5')
@pytest.mark.skipif('not HAS_H5PY')
def test_write_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_create_dataset_kwargs(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
the_path = 'the_table'
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path=the_path,
maxshape=(None, ))
# A roundabout way of checking this, but the table created above should be
# resizable if the kwarg was passed through successfully
t2 = Table()
t2.add_column(Column(name='a', data=[4, 5]))
with h5py.File(test_file, 'a') as output_file:
output_file[the_path].resize((len(t1) + len(t2), ))
output_file[the_path][len(t1):] = t2.as_array()
t3 = Table.read(test_file, path='the_table')
assert np.all(t3['a'] == [1, 2, 3, 4, 5])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_filobj_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='path/to/data/the_table')
t2 = Table.read(test_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path='path/to/data/the_table', format='hdf5')
assert exc.value.args[0] == ('output should be a string '
'or an h5py File or Group object')
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
test_file = str(tmpdir.join('test.hdf5'))
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_all_dtypes(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
# Check that the meta table is fixed-width bytes (see #11299)
h5 = h5py.File(test_file, 'r')
meta_lines = h5[meta_path('the_table')]
assert meta_lines.dtype.kind == 'S'
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_old_meta_format(tmpdir):
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename('data/old_meta_example.hdf5')
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_serialized_in_complicated_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,
overwrite=True)
t2 = Table.read(test_file, path='the_table/complicated/path')
assert t1['a'].format == t2['a'].format
assert t1['a'].unit == t2['a'].unit
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work, now!"""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY')
def test_skip_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.meta['f'] = str
wtext = f"Attribute `f` of type {type(t1.meta['f'])} cannot be written to HDF5 files - skipping"
with pytest.warns(AstropyUserWarning, match=wtext) as w:
t1.write(test_file, path='the_table')
assert len(w) == 1
@pytest.mark.skipif('not HAS_H5PY')
def test_fail_meta_serialize(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path='the_table', serialize_meta=True)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_h5py_objects(tmpdir):
# Regression test - ensure that Datasets are recognized automatically
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
f = h5py.File(test_file, mode='r')
t2 = Table.read(f, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(f['/'], path='the_table')
assert np.all(t3['a'] == [1, 2, 3])
t4 = Table.read(f['the_table'])
assert np.all(t4['a'] == [1, 2, 3])
f.close() # don't raise an error in 'test --open-files'
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_unicode_to_hdf5(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t = Table()
t['p'] = ['a', 'b', 'c']
t['q'] = [1, 2, 3]
t['r'] = [b'a', b'b', b'c']
t['s'] = ["\u2119", "\u01b4", "\u2602"]
t.write(test_file, path='the_table', overwrite=True)
t1 = Table.read(test_file, path='the_table', character_as_bytes=False)
for col, col1 in zip(t.itercols(), t1.itercols()):
assert np.all(col == col1)
assert np.all(t1['p'].info.dtype.kind == "U")
assert np.all(t1['q'].info.dtype.kind == "i")
assert np.all(t1['r'].info.dtype.kind == "U")
assert np.all(t1['s'].info.dtype.kind == "U")
# Test default (character_as_bytes=True)
t2 = Table.read(test_file, path='the_table')
for col, col1 in zip(t.itercols(), t2.itercols()):
assert np.all(col == col1)
assert np.all(t2['p'].info.dtype.kind == "S")
assert np.all(t2['q'].info.dtype.kind == "i")
assert np.all(t2['r'].info.dtype.kind == "S")
assert np.all(t2['s'].info.dtype.kind == "S")
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2)
@pytest.mark.skipif('not HAS_H5PY')
def test_hdf5_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
all_serialized_names = []
for name in names:
all_serialized_names.extend(serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, 'r')
h5_names = list(h5['root'].dtype.names)
assert h5_names == all_serialized_names
h5.close()
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.hdf5'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ['data'] if colname in ('c1', 'c2') else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.parametrize('name_col', unsupported_cols.items())
@pytest.mark.xfail(reason='column type unsupported')
def test_fits_unsupported_mixin(self, name_col, tmpdir):
# Check that we actually fail in writing unsupported columns defined
# on top.
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
Table([col], names=[name]).write(filename, format='hdf5', path='root',
serialize_meta=True)
@pytest.mark.skipif('not HAS_H5PY')
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.h5'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
@pytest.mark.skipif('not HAS_H5PY')
def test_overwrite_serialized_meta():
# This used to cause an error because the meta data table
# was not removed from the existing file.
with h5py.File('test_data.h5', 'w', driver='core', backing_store=False) as out:
t1 = Table()
t1.add_column(Column(data=[4, 8, 15], unit='cm'))
t1.write(out, path='data', serialize_meta=True)
t2 = Table.read(out, path='data')
assert all(t1 == t2)
assert t1.info(out=None) == t2.info(out=None)
t3 = Table()
t3.add_column(Column(data=[16, 23, 42], unit='g'))
t3.write(out, path='data', serialize_meta=True, append=True, overwrite=True)
t2 = Table.read(out, path='data')
assert all(t3 == t2)
assert t3.info(out=None) == t2.info(out=None)
|
0d84803f2dbb2b6c3bace68694370709fdfc360c150faad1d4801916943f224e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.io.misc import fnpickle, fnunpickle
def test_fnpickling_simple(tmpdir):
"""
Tests the `fnpickle` and `fnupickle` functions' basic operation by
pickling and unpickling a string, using both a filename and a
file.
"""
fn = str(tmpdir.join('test1.pickle'))
obj1 = 'astring'
fnpickle(obj1, fn)
res = fnunpickle(fn, 0)
assert obj1 == res
# now try with a file-like object instead of a string
with open(fn, 'wb') as f:
fnpickle(obj1, f)
with open(fn, 'rb') as f:
res = fnunpickle(f)
assert obj1 == res
class ToBePickled:
def __init__(self, item):
self.item = item
def __eq__(self, other):
if isinstance(other, ToBePickled):
return self.item == other.item
else:
return False
def test_fnpickling_class(tmpdir):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle custom classes.
"""
fn = str(tmpdir.join('test2.pickle'))
obj1 = 'astring'
obj2 = ToBePickled(obj1)
fnpickle(obj2, fn)
res = fnunpickle(fn)
assert res == obj2
def test_fnpickling_protocol(tmpdir):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle pickle files from all protcols.
"""
import pickle
obj1 = 'astring'
obj2 = ToBePickled(obj1)
for p in range(pickle.HIGHEST_PROTOCOL + 1):
fn = str(tmpdir.join(f'testp{p}.pickle'))
fnpickle(obj2, fn, protocol=p)
res = fnunpickle(fn)
assert res == obj2
def test_fnpickling_many(tmpdir):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle multiple objects from a single file.
"""
fn = str(tmpdir.join('test3.pickle'))
# now try multiples
obj3 = 328.3432
obj4 = 'blahblahfoo'
fnpickle(obj3, fn)
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=-1)
assert len(res) == 2
assert res[0] == obj3
assert res[1] == obj4
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=2)
assert len(res) == 2
with pytest.raises(EOFError):
fnunpickle(fn, number=5)
|
70415ca134186e2c2f3b686c218fe021163cdde3b48c9cbf5a3423d4ea964c81 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
# Define a constant to know if the entry points are installed, since this impacts
# whether we can run the tests.
from importlib.metadata import entry_points
import pytest
# TODO: Exclusively use select when Python minversion is 3.10
eps = entry_points()
if hasattr(eps, 'select'):
ep = [entry.name for entry in eps.select(group='asdf_extensions')]
else:
ep = [entry.name for entry in eps.get('asdf_extensions', [])]
ASDF_ENTRY_INSTALLED = 'astropy' in ep and 'astropy-asdf' in ep
del entry_points, eps, ep
if not ASDF_ENTRY_INSTALLED:
pytest.skip('The astropy asdf entry points are not installed',
allow_module_level=True)
|
4fa877e5922863384f0a02101f6636fa8cd5c67566f2e236d0e8f993a5f31b5d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from astropy.table import Table
def make_table():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
return Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
def test_table_io(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
table.write(tmpfile)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'data' in af.keys()
assert isinstance(af['data'], Table)
assert all(af['data'] == table)
# Now test using the table reader
new_t = Table.read(tmpfile)
assert all(new_t == table)
def test_table_io_custom_key(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
table.write(tmpfile, data_key='something')
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'something' in af.keys()
assert 'data' not in af.keys()
assert isinstance(af['something'], Table)
assert all(af['something'] == table)
# Now test using the table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
new_t = Table.read(tmpfile, data_key='something')
assert all(new_t == table)
def test_table_io_custom_tree(tmpdir):
tmpfile = str(tmpdir.join('table.asdf'))
table = make_table()
def make_custom_tree(tab):
return dict(foo=dict(bar=tab))
table.write(tmpfile, make_tree=make_custom_tree)
# Simple sanity check using ASDF directly
with asdf.open(tmpfile) as af:
assert 'foo' in af.keys()
assert 'bar' in af['foo']
assert 'data' not in af.keys()
assert all(af['foo']['bar'] == table)
# Now test using table reader
with pytest.raises(KeyError):
new_t = Table.read(tmpfile)
def find_table(asdffile):
return asdffile['foo']['bar']
new_t = Table.read(tmpfile, find_table=find_table)
assert all(new_t == table)
|
589a1e9683fa86f1ed44e78bf086bcabd21a12199adf335a25d26c6c3322c561 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
__all__ = []
def skycoord_equal(sc1, sc2):
"""SkyCoord equality useful for testing and ASDF serialization
"""
if not sc1.is_equivalent_frame(sc2):
return False
if sc1.representation_type is not sc2.representation_type:
return False
if sc1.shape != sc2.shape:
return False # Maybe raise ValueError corresponding to future numpy behavior
eq = np.ones(shape=sc1.shape, dtype=bool)
for comp in sc1.data.components:
eq &= getattr(sc1.data, comp) == getattr(sc2.data, comp)
return np.all(eq)
|
1f79c2c24bf3ed92b41bafaa8d99b988ceb27cc6deb5b062f3c3be76383c0b4c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
|
861b32a5a1f1c70db8a4cc8eec28f1485288773bda03570d0672bcb415df6acb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf.tags.core.ndarray import NDArrayType
from astropy import table
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class TableType:
"""
This class defines to_tree and from_tree methods that are used by both the
AstropyTableType and the AsdfTableType defined below. The behavior is
differentiated by the ``_compat`` class attribute. When ``_compat==True``,
the behavior will conform to the table schema defined by the ASDF Standard.
Otherwise, the behavior will conform to the custom table schema defined by
Astropy.
"""
_compat = False
@classmethod
def from_tree(cls, node, ctx):
# This is getting meta, guys
meta = node.get('meta', {})
# This enables us to support files that use the table definition from
# the ASDF Standard, rather than the custom one that Astropy defines.
if cls._compat:
return table.Table(node['columns'], meta=meta)
if node.get('qtable', False):
t = table.QTable(meta=node.get('meta', {}))
else:
t = table.Table(meta=node.get('meta', {}))
for name, col in zip(node['colnames'], node['columns']):
t[name] = col
return t
@classmethod
def to_tree(cls, data, ctx):
columns = [data[name] for name in data.colnames]
node = dict(columns=columns)
# Files that use the table definition from the ASDF Standard (instead
# of the one defined by Astropy) will not contain these fields
if not cls._compat:
node['colnames'] = data.colnames
node['qtable'] = isinstance(data, table.QTable)
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
try:
NDArrayType.assert_equal(np.array(old), np.array(new))
except (AttributeError, TypeError, ValueError):
for col0, col1 in zip(old, new):
try:
NDArrayType.assert_equal(np.array(col0), np.array(col1))
except (AttributeError, TypeError, ValueError):
assert col0 == col1
class AstropyTableType(TableType, AstropyType):
"""
This tag class reads and writes tables that conform to the custom schema
that is defined by Astropy (in contrast to the one that is defined by the
ASDF Standard). The primary reason for differentiating is to enable the
support of Astropy mixin columns, which are not supported by the ASDF
Standard.
"""
name = 'table/table'
types = ['astropy.table.Table']
requires = ['astropy']
class AsdfTableType(TableType, AstropyAsdfType):
"""
This tag class allows Astropy to read (and write) ASDF files that use the
table definition that is provided by the ASDF Standard (instead of the
custom one defined by Astropy). This is important to maintain for
cross-compatibility.
"""
name = 'core/table'
types = ['astropy.table.Table']
requires = ['astropy']
_compat = True
class ColumnType(AstropyAsdfType):
name = 'core/column'
types = ['astropy.table.Column', 'astropy.table.MaskedColumn']
requires = ['astropy']
handle_dynamic_subclasses = True
@classmethod
def from_tree(cls, node, ctx):
data = node['data']
name = node['name']
description = node.get('description')
unit = node.get('unit')
meta = node.get('meta', None)
return table.Column(
data=data._make_array(), name=name, description=description,
unit=unit, meta=meta)
@classmethod
def to_tree(cls, data, ctx):
node = {
'data': data.data,
'name': data.name
}
if data.description:
node['description'] = data.description
if data.unit:
node['unit'] = data.unit
if data.meta:
node['meta'] = data.meta
return node
@classmethod
def assert_equal(cls, old, new):
assert old.meta == new.meta
assert old.description == new.description
assert old.unit == new.unit
NDArrayType.assert_equal(np.array(old), np.array(new))
|
3230b7ab6c6f15fc8d0923271e3fcba4bf70b8a25b02292e0f2cfa3a324bf28e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from astropy import table
from astropy.io import fits
from astropy.io.misc.asdf.types import AstropyType, AstropyAsdfType
class FitsType:
name = 'fits/fits'
types = ['astropy.io.fits.HDUList']
requires = ['astropy']
@classmethod
def from_tree(cls, data, ctx):
hdus = []
first = True
for hdu_entry in data:
header = fits.Header([fits.Card(*x) for x in hdu_entry['header']])
data = hdu_entry.get('data')
if data is not None:
try:
data = data.__array__()
except ValueError:
data = None
if first:
hdu = fits.PrimaryHDU(data=data, header=header)
first = False
elif data.dtype.names is not None:
hdu = fits.BinTableHDU(data=data, header=header)
else:
hdu = fits.ImageHDU(data=data, header=header)
hdus.append(hdu)
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def to_tree(cls, hdulist, ctx):
units = []
for hdu in hdulist:
header_list = []
for card in hdu.header.cards:
if card.comment:
new_card = [card.keyword, card.value, card.comment]
else:
if card.value:
new_card = [card.keyword, card.value]
else:
if card.keyword:
new_card = [card.keyword]
else:
new_card = []
header_list.append(new_card)
hdu_dict = {}
hdu_dict['header'] = header_list
if hdu.data is not None:
if hdu.data.dtype.names is not None:
data = table.Table(hdu.data)
else:
data = hdu.data
hdu_dict['data'] = data
units.append(hdu_dict)
return units
@classmethod
def reserve_blocks(cls, data, ctx):
for hdu in data:
if hdu.data is not None:
yield ctx.blocks.find_or_create_block_for_array(hdu.data, ctx)
@classmethod
def assert_equal(cls, old, new):
for hdua, hdub in zip(old, new):
assert_array_equal(hdua.data, hdub.data)
for carda, cardb in zip(hdua.header.cards, hdub.header.cards):
assert tuple(carda) == tuple(cardb)
class AstropyFitsType(FitsType, AstropyType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by Astropy. It will be used by default when
writing new HDUs to ASDF files.
"""
class AsdfFitsType(FitsType, AstropyAsdfType):
"""
This class implements ASDF serialization/deserialization that corresponds
to the FITS schema defined by the ASDF Standard. It will not be used by
default, except when reading files that use the ASDF Standard definition
rather than the one defined in Astropy. It will primarily be used for
backwards compatibility for reading older files. In the unlikely case that
another ASDF implementation uses the FITS schema from the ASDF Standard,
this tag could also be used to read a file it generated.
"""
|
7322d3d0cced654d202f3d5ab1796f2864868affaf25f16ebe96a261ae20a49b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf.versioning import AsdfVersion
from astropy.modeling.bounding_box import ModelBoundingBox, CompoundBoundingBox
from astropy.modeling import mappings
from astropy.modeling import functional_models
from astropy.modeling.core import CompoundModel
from astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType
from . import _parameter_to_value
__all__ = ['TransformType', 'IdentityType', 'ConstantType']
class TransformType(AstropyAsdfType):
version = '1.2.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'name' in node:
model.name = node['name']
if "inputs" in node:
model.inputs = tuple(node["inputs"])
if "outputs" in node:
model.outputs = tuple(node["outputs"])
if 'bounding_box' in node:
model.bounding_box = node['bounding_box']
elif 'selector_args' in node:
cbbox_keys = [tuple(key) for key in node['cbbox_keys']]
bbox_dict = dict(zip(cbbox_keys, node['cbbox_values']))
selector_args = node['selector_args']
model.bounding_box = CompoundBoundingBox.validate(model, bbox_dict, selector_args)
param_and_model_constraints = {}
for constraint in ['fixed', 'bounds']:
if constraint in node:
param_and_model_constraints[constraint] = node[constraint]
model._initialize_constraints(param_and_model_constraints)
if "input_units_equivalencies" in node:
# this still writes eqs. for compound, but operates on each sub model
if not isinstance(model, CompoundModel):
model.input_units_equivalencies = node['input_units_equivalencies']
yield model
if 'inverse' in node:
model.inverse = node['inverse']
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = model._user_inverse
if model.name is not None:
node['name'] = model.name
node['inputs'] = list(model.inputs)
node['outputs'] = list(model.outputs)
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if isinstance(bb, ModelBoundingBox):
bb = bb.bounding_box(order='C')
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in bb]
node['bounding_box'] = bb
elif isinstance(bb, CompoundBoundingBox):
selector_args = [[sa.index, sa.ignore] for sa in bb.selector_args]
node['selector_args'] = selector_args
node['cbbox_keys'] = list(bb.bounding_boxes.keys())
bounding_boxes = list(bb.bounding_boxes.values())
if len(model.inputs) - len(selector_args) == 1:
node['cbbox_values'] = [list(sbbox.bounding_box()) for sbbox in bounding_boxes]
else:
node['cbbox_values'] = [[list(item) for item in sbbox.bounding_box()
if np.isfinite(item[0])] for sbbox in bounding_boxes]
# model / parameter constraints
if not isinstance(model, CompoundModel):
fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}
if fixed_nondefaults:
node['fixed'] = fixed_nondefaults
bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}
if bounds_nondefaults:
node['bounds'] = bounds_nondefaults
if not isinstance(model, CompoundModel):
if model.input_units_equivalencies:
node['input_units_equivalencies'] = model.input_units_equivalencies
return node
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
# assert the bounding_boxes are the same
assert a.get_bounding_box() == b.get_bounding_box()
assert a.inputs == b.inputs
assert a.outputs == b.outputs
assert a.input_units_equivalencies == b.input_units_equivalencies
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
version = '1.4.0'
supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']
types = ['astropy.modeling.functional_models.Const1D',
'astropy.modeling.functional_models.Const2D']
@classmethod
def from_tree_transform(cls, node, ctx):
if cls.version < AsdfVersion('1.4.0'):
# The 'dimensions' property was added in 1.4.0,
# previously all values were 1D.
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 1:
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 2:
return functional_models.Const2D(node['value'])
else:
raise TypeError('Only 1D and 2D constant models are supported.')
@classmethod
def to_tree_transform(cls, data, ctx):
if cls.version < AsdfVersion('1.4.0'):
if not isinstance(data, functional_models.Const1D):
raise ValueError(
f'constant-{cls.version} does not support models with > 1 dimension')
return {
'value': _parameter_to_value(data.amplitude)
}
else:
if isinstance(data, functional_models.Const1D):
dimension = 1
elif isinstance(data, functional_models.Const2D):
dimension = 2
return {
'value': _parameter_to_value(data.amplitude),
'dimensions': dimension
}
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super().__init__(mapping)
self._n_outputs = n_outputs
self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))
@property
def inverse(self):
raise NotImplementedError()
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
class UnitsMappingType(AstropyType):
name = "transform/units_mapping"
version = "1.0.0"
types = [mappings.UnitsMapping]
@classmethod
def to_tree(cls, node, ctx):
tree = {}
if node.name is not None:
tree["name"] = node.name
inputs = []
outputs = []
for i, o, m in zip(node.inputs, node.outputs, node.mapping):
input = {
"name": i,
"allow_dimensionless": node.input_units_allow_dimensionless[i],
}
if m[0] is not None:
input["unit"] = m[0]
if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:
input["equivalencies"] = node.input_units_equivalencies[i]
inputs.append(input)
output = {
"name": o,
}
if m[-1] is not None:
output["unit"] = m[-1]
outputs.append(output)
tree["unit_inputs"] = inputs
tree["unit_outputs"] = outputs
return tree
@classmethod
def from_tree(cls, tree, ctx):
mapping = tuple((i.get("unit"), o.get("unit"))
for i, o in zip(tree["unit_inputs"], tree["unit_outputs"]))
equivalencies = None
for i in tree["unit_inputs"]:
if "equivalencies" in i:
if equivalencies is None:
equivalencies = {}
equivalencies[i["name"]] = i["equivalencies"]
kwargs = {
"input_units_equivalencies": equivalencies,
"input_units_allow_dimensionless": {
i["name"]: i.get("allow_dimensionless", False) for i in tree["unit_inputs"]},
}
if "name" in tree:
kwargs["name"] = tree["name"]
return mappings.UnitsMapping(mapping, **kwargs)
|
bab67b23bd092a8f2b02b4035a49e13aa7bcbc4cb97aecac9c1511bf6fa00235 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import astropy.units as u
def _parameter_to_value(param):
if param.unit is not None:
return u.Quantity(param)
else:
return param.value
|
bac24b9f303bac8187fca0f60a6dffe91b81fc8706e7a0e99fb5effa72028a2c | from .basic import TransformType
from astropy.modeling.models import Spline1D
__all__ = ['SplineType']
class SplineType(TransformType):
name = 'transform/spline1d'
version = '1.0.0'
types = ['astropy.modeling.spline.Spline1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return Spline1D(knots=node['knots'],
coeffs=node['coefficients'],
degree=node['degree'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {
"knots": model.t,
"coefficients": model.c,
"degree": model.degree
}
|
ce7bc5588d4a2229b32608cba36919375bcf093627a4a39e64296bd6b251d17c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy.modeling.math_functions import __all__ as math_classes
from astropy.modeling.math_functions import *
from astropy.modeling import math_functions
from .basic import TransformType
__all__ = ['NpUfuncType']
class NpUfuncType(TransformType):
name = "transform/math_functions"
version = '1.0.0'
types = ['astropy.modeling.math_functions.'+ kl for kl in math_classes]
@classmethod
def from_tree_transform(cls, node, ctx):
klass_name = math_functions._make_class_name(node['func_name'])
klass = getattr(math_functions, klass_name)
return klass()
@classmethod
def to_tree_transform(cls, model, ctx):
return {'func_name': model.func.__name__}
|
5b4475074faf4e6b57192112d73f4489270fa19548e4f3424dd9a1815f12231b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import functional_models
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['AiryDisk2DType', 'Box1DType', 'Box2DType',
'Disk2DType', 'Ellipse2DType', 'Exponential1DType',
'Gaussian1DType', 'Gaussian2DType', 'KingProjectedAnalytic1DType',
'Logarithmic1DType', 'Lorentz1DType', 'Moffat1DType',
'Moffat2DType', 'Planar2D', 'RedshiftScaleFactorType',
'RickerWavelet1DType', 'RickerWavelet2DType', 'Ring2DType',
'Sersic1DType', 'Sersic2DType',
'Sine1DType', 'Cosine1DType', 'Tangent1DType',
'ArcSine1DType', 'ArcCosine1DType', 'ArcTangent1DType',
'Trapezoid1DType', 'TrapezoidDisk2DType', 'Voigt1DType']
class AiryDisk2DType(TransformType):
name = 'transform/airy_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.AiryDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.AiryDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
radius=node['radius'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'radius': _parameter_to_value(model.radius)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.AiryDisk2D) and
isinstance(b, functional_models.AiryDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.radius, b.radius)
class Box1DType(TransformType):
name = 'transform/box1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box1D) and
isinstance(b, functional_models.Box1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
class Box2DType(TransformType):
name = 'transform/box2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Box2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Box2D(amplitude=node['amplitude'],
x_0=node['x_0'],
x_width=node['x_width'],
y_0=node['y_0'],
y_width=node['y_width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'x_width': _parameter_to_value(model.x_width),
'y_0': _parameter_to_value(model.y_0),
'y_width': _parameter_to_value(model.y_width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Box2D) and
isinstance(b, functional_models.Box2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.x_width, b.x_width)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.y_width, b.y_width)
class Disk2DType(TransformType):
name = 'transform/disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Disk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Disk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Disk2D) and
isinstance(b, functional_models.Disk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
class Ellipse2DType(TransformType):
name = 'transform/ellipse2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ellipse2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ellipse2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
a=node['a'],
b=node['b'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'a': _parameter_to_value(model.a),
'b': _parameter_to_value(model.b),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ellipse2D) and
isinstance(b, functional_models.Ellipse2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.a, b.a)
assert_array_equal(a.b, b.b)
assert_array_equal(a.theta, b.theta)
class Exponential1DType(TransformType):
name = 'transform/exponential1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Exponential1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Exponential1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Exponential1D) and
isinstance(b, functional_models.Exponential1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Gaussian1DType(TransformType):
name = 'transform/gaussian1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian1D(amplitude=node['amplitude'],
mean=node['mean'],
stddev=node['stddev'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'mean': _parameter_to_value(model.mean),
'stddev': _parameter_to_value(model.stddev)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian1D) and
isinstance(b, functional_models.Gaussian1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.mean, b.mean)
assert_array_equal(a.stddev, b.stddev)
class Gaussian2DType(TransformType):
name = 'transform/gaussian2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Gaussian2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Gaussian2D(amplitude=node['amplitude'],
x_mean=node['x_mean'],
y_mean=node['y_mean'],
x_stddev=node['x_stddev'],
y_stddev=node['y_stddev'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_mean': _parameter_to_value(model.x_mean),
'y_mean': _parameter_to_value(model.y_mean),
'x_stddev': _parameter_to_value(model.x_stddev),
'y_stddev': _parameter_to_value(model.y_stddev),
'theta': _parameter_to_value(model.theta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Gaussian2D) and
isinstance(b, functional_models.Gaussian2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_mean, b.x_mean)
assert_array_equal(a.y_mean, b.y_mean)
assert_array_equal(a.x_stddev, b.x_stddev)
assert_array_equal(a.y_stddev, b.y_stddev)
assert_array_equal(a.theta, b.theta)
class KingProjectedAnalytic1DType(TransformType):
name = 'transform/king_projected_analytic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.KingProjectedAnalytic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.KingProjectedAnalytic1D(
amplitude=node['amplitude'],
r_core=node['r_core'],
r_tide=node['r_tide'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_core': _parameter_to_value(model.r_core),
'r_tide': _parameter_to_value(model.r_tide)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.KingProjectedAnalytic1D) and
isinstance(b, functional_models.KingProjectedAnalytic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_core, b.r_core)
assert_array_equal(a.r_tide, b.r_tide)
class Logarithmic1DType(TransformType):
name = 'transform/logarithmic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Logarithmic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Logarithmic1D(amplitude=node['amplitude'],
tau=node['tau'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'tau': _parameter_to_value(model.tau)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Logarithmic1D) and
isinstance(b, functional_models.Logarithmic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.tau, b.tau)
class Lorentz1DType(TransformType):
name = 'transform/lorentz1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Lorentz1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Lorentz1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Lorentz1D) and
isinstance(b, functional_models.Lorentz1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Moffat1DType(TransformType):
name = 'transform/moffat1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat1D(amplitude=node['amplitude'],
x_0=node['x_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat1D) and
isinstance(b, functional_models.Moffat1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Moffat2DType(TransformType):
name = 'transform/moffat2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Moffat2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Moffat2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
gamma=node['gamma'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'gamma': _parameter_to_value(model.gamma),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Moffat2D) and
isinstance(b, functional_models.Moffat2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.gamma, b.gamma)
assert_array_equal(a.alpha, b.alpha)
class Planar2D(TransformType):
name = 'transform/planar2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Planar2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Planar2D(slope_x=node['slope_x'],
slope_y=node['slope_y'],
intercept=node['intercept'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'slope_x': _parameter_to_value(model.slope_x),
'slope_y': _parameter_to_value(model.slope_y),
'intercept': _parameter_to_value(model.intercept)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Planar2D) and
isinstance(b, functional_models.Planar2D))
assert_array_equal(a.slope_x, b.slope_x)
assert_array_equal(a.slope_y, b.slope_y)
assert_array_equal(a.intercept, b.intercept)
class RedshiftScaleFactorType(TransformType):
name = 'transform/redshift_scale_factor'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RedshiftScaleFactor']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RedshiftScaleFactor(z=node['z'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'z': _parameter_to_value(model.z)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RedshiftScaleFactor) and
isinstance(b, functional_models.RedshiftScaleFactor))
assert_array_equal(a.z, b.z)
class RickerWavelet1DType(TransformType):
name = 'transform/ricker_wavelet1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet1D(amplitude=node['amplitude'],
x_0=node['x_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet1D) and
isinstance(b, functional_models.RickerWavelet1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.sigma, b.sigma)
class RickerWavelet2DType(TransformType):
name = 'transform/ricker_wavelet2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.RickerWavelet2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.RickerWavelet2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
sigma=node['sigma'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'sigma': _parameter_to_value(model.sigma)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.RickerWavelet2D) and
isinstance(b, functional_models.RickerWavelet2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.sigma, b.sigma)
class Ring2DType(TransformType):
name = 'transform/ring2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Ring2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Ring2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
r_in=node['r_in'],
width=node['width'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'r_in': _parameter_to_value(model.r_in),
'width': _parameter_to_value(model.width)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Ring2D) and
isinstance(b, functional_models.Ring2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.r_in, b.r_in)
assert_array_equal(a.width, b.width)
class Sersic1DType(TransformType):
name = 'transform/sersic1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic1D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic1D) and
isinstance(b, functional_models.Sersic1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
class Sersic2DType(TransformType):
name = 'transform/sersic2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sersic2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Sersic2D(amplitude=node['amplitude'],
r_eff=node['r_eff'],
n=node['n'],
x_0=node['x_0'],
y_0=node['y_0'],
ellip=node['ellip'],
theta=node['theta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'r_eff': _parameter_to_value(model.r_eff),
'n': _parameter_to_value(model.n),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'ellip': _parameter_to_value(model.ellip),
'theta': _parameter_to_value(model.theta)
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Sersic2D) and
isinstance(b, functional_models.Sersic2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.r_eff, b.r_eff)
assert_array_equal(a.n, b.n)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.ellip, b.ellip)
assert_array_equal(a.theta, b.theta)
class Trigonometric1DType(TransformType):
_model = None
@classmethod
def from_tree_transform(cls, node, ctx):
return cls._model(amplitude=node['amplitude'],
frequency=node['frequency'],
phase=node['phase'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'frequency': _parameter_to_value(model.frequency),
'phase': _parameter_to_value(model.phase)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, cls._model) and
isinstance(b, cls._model))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.frequency, b.frequency)
assert_array_equal(a.phase, b.phase)
class Sine1DType(Trigonometric1DType):
name = 'transform/sine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Sine1D']
_model = functional_models.Sine1D
class Cosine1DType(Trigonometric1DType):
name = 'transform/cosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Cosine1D']
_model = functional_models.Cosine1D
class Tangent1DType(Trigonometric1DType):
name = 'transform/tangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Tangent1D']
_model = functional_models.Tangent1D
class ArcSine1DType(Trigonometric1DType):
name = 'transform/arcsine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcSine1D']
_model = functional_models.ArcSine1D
class ArcCosine1DType(Trigonometric1DType):
name = 'transform/arccosine1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcCosine1D']
_model = functional_models.ArcCosine1D
class ArcTangent1DType(Trigonometric1DType):
name = 'transform/arctangent1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.ArcTangent1D']
_model = functional_models.ArcTangent1D
class Trapezoid1DType(TransformType):
name = 'transform/trapezoid1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Trapezoid1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Trapezoid1D(amplitude=node['amplitude'],
x_0=node['x_0'],
width=node['width'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'width': _parameter_to_value(model.width),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Trapezoid1D) and
isinstance(b, functional_models.Trapezoid1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.width, b.width)
assert_array_equal(a.slope, b.slope)
class TrapezoidDisk2DType(TransformType):
name = 'transform/trapezoid_disk2d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.TrapezoidDisk2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.TrapezoidDisk2D(amplitude=node['amplitude'],
x_0=node['x_0'],
y_0=node['y_0'],
R_0=node['R_0'],
slope=node['slope'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'y_0': _parameter_to_value(model.y_0),
'R_0': _parameter_to_value(model.R_0),
'slope': _parameter_to_value(model.slope)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.TrapezoidDisk2D) and
isinstance(b, functional_models.TrapezoidDisk2D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.y_0, b.y_0)
assert_array_equal(a.R_0, b.R_0)
assert_array_equal(a.slope, b.slope)
class Voigt1DType(TransformType):
name = 'transform/voigt1d'
version = '1.0.0'
types = ['astropy.modeling.functional_models.Voigt1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return functional_models.Voigt1D(x_0=node['x_0'],
amplitude_L=node['amplitude_L'],
fwhm_L=node['fwhm_L'],
fwhm_G=node['fwhm_G'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'x_0': _parameter_to_value(model.x_0),
'amplitude_L': _parameter_to_value(model.amplitude_L),
'fwhm_L': _parameter_to_value(model.fwhm_L),
'fwhm_G': _parameter_to_value(model.fwhm_G)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, functional_models.Voigt1D) and
isinstance(b, functional_models.Voigt1D))
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.amplitude_L, b.amplitude_L)
assert_array_equal(a.fwhm_L, b.fwhm_L)
assert_array_equal(a.fwhm_G, b.fwhm_G)
|
46af07201300c745f60f9291f6012e5fedd7e50471aa885c302ecd903234f46d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['AffineType', 'Rotate2DType', 'Rotate3DType',
'RotationSequenceType']
class AffineType(TransformType):
name = "transform/affine"
version = '1.3.0'
types = ['astropy.modeling.projections.AffineTransformation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
matrix = node['matrix']
translation = node['translation']
if matrix.shape != (2, 2):
raise NotImplementedError(
"asdf currently only supports 2x2 (2D) rotation transformation "
"matrices")
if translation.shape != (2,):
raise NotImplementedError(
"asdf currently only supports 2D translation transformations.")
return modeling.projections.AffineTransformation2D(
matrix=matrix, translation=translation)
@classmethod
def to_tree_transform(cls, model, ctx):
return {'matrix': _parameter_to_value(model.matrix),
'translation': _parameter_to_value(model.translation)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (a.__class__ == b.__class__)
assert_array_equal(a.matrix, b.matrix)
assert_array_equal(a.translation, b.translation)
class Rotate2DType(TransformType):
name = "transform/rotate2d"
version = '1.3.0'
types = ['astropy.modeling.rotations.Rotation2D']
@classmethod
def from_tree_transform(cls, node, ctx):
return modeling.rotations.Rotation2D(node['angle'])
@classmethod
def to_tree_transform(cls, model, ctx):
return {'angle': _parameter_to_value(model.angle)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.rotations.Rotation2D) and
isinstance(b, modeling.rotations.Rotation2D))
assert_array_equal(a.angle, b.angle)
class Rotate3DType(TransformType):
name = "transform/rotate3d"
version = '1.3.0'
types = ['astropy.modeling.rotations.RotateNative2Celestial',
'astropy.modeling.rotations.RotateCelestial2Native',
'astropy.modeling.rotations.EulerAngleRotation']
@classmethod
def from_tree_transform(cls, node, ctx):
if node['direction'] == 'native2celestial':
return modeling.rotations.RotateNative2Celestial(node["phi"],
node["theta"],
node["psi"])
elif node['direction'] == 'celestial2native':
return modeling.rotations.RotateCelestial2Native(node["phi"],
node["theta"],
node["psi"])
else:
return modeling.rotations.EulerAngleRotation(node["phi"],
node["theta"],
node["psi"],
axes_order=node["direction"])
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.rotations.RotateNative2Celestial):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "native2celestial"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "native2celestial"
}
elif isinstance(model, modeling.rotations.RotateCelestial2Native):
try:
node = {"phi": _parameter_to_value(model.lon),
"theta": _parameter_to_value(model.lat),
"psi": _parameter_to_value(model.lon_pole),
"direction": "celestial2native"
}
except AttributeError:
node = {"phi": model.lon,
"theta": model.lat,
"psi": model.lon_pole,
"direction": "celestial2native"
}
else:
node = {"phi": _parameter_to_value(model.phi),
"theta": _parameter_to_value(model.theta),
"psi": _parameter_to_value(model.psi),
"direction": model.axes_order
}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
if a.__class__.__name__ == "EulerAngleRotation":
assert_array_equal(a.phi, b.phi)
assert_array_equal(a.psi, b.psi)
assert_array_equal(a.theta, b.theta)
else:
assert_array_equal(a.lon, b.lon)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon_pole, b.lon_pole)
class RotationSequenceType(TransformType):
name = "transform/rotate_sequence_3d"
types = ['astropy.modeling.rotations.RotationSequence3D',
'astropy.modeling.rotations.SphericalRotationSequence']
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
angles = node['angles']
axes_order = node['axes_order']
rotation_type = node['rotation_type']
if rotation_type == 'cartesian':
return modeling.rotations.RotationSequence3D(angles, axes_order=axes_order)
elif rotation_type == 'spherical':
return modeling.rotations.SphericalRotationSequence(angles, axes_order=axes_order)
else:
raise ValueError(f"Unrecognized rotation_type: {rotation_type}")
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'angles': list(model.angles.value)}
node['axes_order'] = model.axes_order
if isinstance(model, modeling.rotations.SphericalRotationSequence):
node['rotation_type'] = "spherical"
elif isinstance(model, modeling.rotations.RotationSequence3D):
node['rotation_type'] = "cartesian"
else:
raise ValueError(f"Cannot serialize model of type {type(model)}")
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.__class__.__name__ == b.__class__.__name__
assert_array_equal(a.angles, b.angles)
assert a.axes_order == b.axes_order
class GenericProjectionType(TransformType):
@classmethod
def from_tree_transform(cls, node, ctx):
args = []
for param_name, default in cls.params:
args.append(node.get(param_name, default))
if node['direction'] == 'pix2sky':
return cls.types[0](*args)
else:
return cls.types[1](*args)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if isinstance(model, cls.types[0]):
node['direction'] = 'pix2sky'
else:
node['direction'] = 'sky2pix'
for param_name, default in cls.params:
val = getattr(model, param_name).value
if val != default:
node[param_name] = val
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert a.__class__ == b.__class__
_generic_projections = {
'zenithal_perspective': ('ZenithalPerspective', (('mu', 0.0), ('gamma', 0.0)), '1.3.0'),
'gnomonic': ('Gnomonic', (), None),
'stereographic': ('Stereographic', (), None),
'slant_orthographic': ('SlantOrthographic', (('xi', 0.0), ('eta', 0.0)), None),
'zenithal_equidistant': ('ZenithalEquidistant', (), None),
'zenithal_equal_area': ('ZenithalEqualArea', (), None),
'airy': ('Airy', (('theta_b', 90.0),), '1.2.0'),
'cylindrical_perspective': ('CylindricalPerspective', (('mu', 0.0), ('lam', 0.0)), '1.3.0'),
'cylindrical_equal_area': ('CylindricalEqualArea', (('lam', 0.0),), '1.3.0'),
'plate_carree': ('PlateCarree', (), None),
'mercator': ('Mercator', (), None),
'sanson_flamsteed': ('SansonFlamsteed', (), None),
'parabolic': ('Parabolic', (), None),
'molleweide': ('Molleweide', (), None),
'hammer_aitoff': ('HammerAitoff', (), None),
'conic_perspective': ('ConicPerspective', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equal_area': ('ConicEqualArea', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_equidistant': ('ConicEquidistant', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'conic_orthomorphic': ('ConicOrthomorphic', (('sigma', 0.0), ('delta', 0.0)), '1.3.0'),
'bonne_equal_area': ('BonneEqualArea', (('theta1', 0.0),), '1.3.0'),
'polyconic': ('Polyconic', (), None),
'tangential_spherical_cube': ('TangentialSphericalCube', (), None),
'cobe_quad_spherical_cube': ('COBEQuadSphericalCube', (), None),
'quad_spherical_cube': ('QuadSphericalCube', (), None),
'healpix': ('HEALPix', (('H', 4.0), ('X', 3.0)), None),
'healpix_polar': ('HEALPixPolar', (), None)
}
def make_projection_types():
for tag_name, (name, params, version) in _generic_projections.items():
class_name = f'{name}Type'
types = [f'astropy.modeling.projections.Pix2Sky_{name}',
f'astropy.modeling.projections.Sky2Pix_{name}']
members = {'name': f'transform/{tag_name}',
'types': types,
'params': params}
if version:
members['version'] = version
globals()[class_name] = type(
str(class_name),
(GenericProjectionType,),
members)
__all__.append(class_name)
make_projection_types()
|
82f01c726285a7c83d9ed505eb265cee3ebf390a26115e380c00a7c3f7e91247 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfVersion
import astropy.units as u
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'Linear1DType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
return {'offset': _parameter_to_value(offset)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class MultiplyType(TransformType):
name = "transform/multiplyscale"
version = '1.0.0'
types = ['astropy.modeling.models.Multiply']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
return modeling.models.Multiply(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
return {'factor': _parameter_to_value(factor)}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Multiply) and
isinstance(b, modeling.models.Multiply))
assert_array_equal(a.factor, b.factor)
class PolynomialTypeBase(TransformType):
DOMAIN_WINDOW_MIN_VERSION = AsdfVersion("1.2.0")
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = modeling.models.Polynomial1D(coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
typeindex = cls.types.index(model.__class__)
ndim = (typeindex % 2) + 1
if cls.version >= PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 included an unrelated "domain"
# property. We can't serialize the new domain values with those
# versions because they don't validate.
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
if cls.version > PolynomialTypeBase.DOMAIN_WINDOW_MIN_VERSION:
# Schema versions prior to 1.2 are known not to serialize
# domain or window.
if isinstance(a, modeling.models.Polynomial1D):
assert a.domain == b.domain
assert a.window == b.window
else:
assert a.x_domain == b.x_domain
assert a.x_window == b.x_window
assert a.y_domain == b.y_domain
assert a.y_window == b.y_window
class PolynomialType1_0(PolynomialTypeBase):
version = "1.0.0"
class PolynomialType1_1(PolynomialTypeBase):
version = "1.1.0"
class PolynomialType1_2(PolynomialTypeBase):
version = "1.2.0"
class OrthoPolynomialType(TransformType):
name = "transform/ortho_polynomial"
types = ['astropy.modeling.models.Legendre1D',
'astropy.modeling.models.Legendre2D',
'astropy.modeling.models.Chebyshev1D',
'astropy.modeling.models.Chebyshev2D',
'astropy.modeling.models.Hermite1D',
'astropy.modeling.models.Hermite2D']
typemap = {
'legendre': 0,
'chebyshev': 2,
'hermite': 4,
}
invtypemap = dict([[v, k] for k, v in typemap.items()])
version = "1.0.0"
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
poly_type = node['polynomial_type']
if n_dim == 1:
domain = node.get('domain', None)
window = node.get('window', None)
model = cls.types[cls.typemap[poly_type]](coefficients.size - 1,
domain=domain, window=window)
model.parameters = coefficients
elif n_dim == 2:
x_domain, y_domain = tuple(node.get('domain', (None, None)))
x_window, y_window = tuple(node.get('window', (None, None)))
coeffs = {}
shape = coefficients.shape
x_degree = shape[0] - 1
y_degree = shape[1] - 1
for i in range(x_degree + 1):
for j in range(y_degree + 1):
name = f'c{i}_{j}'
coeffs[name] = coefficients[i, j]
model = cls.types[cls.typemap[poly_type]+1](x_degree, y_degree,
x_domain=x_domain,
y_domain=y_domain,
x_window=x_window,
y_window=y_window,
**coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transforms.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
typeindex = cls.types.index(model.__class__)
poly_type = cls.invtypemap[int(typeindex/2)*2]
ndim = (typeindex % 2) + 1
if ndim == 1:
coefficients = np.array(model.parameters)
else:
coefficients = np.zeros((model.x_degree + 1, model.y_degree + 1))
for i in range(model.x_degree + 1):
for j in range(model.y_degree + 1):
name = f'c{i}_{j}'
coefficients[i, j] = getattr(model, name).value
node = {'polynomial_type': poly_type, 'coefficients': coefficients}
if ndim == 1:
if model.domain is not None:
node['domain'] = model.domain
if model.window is not None:
node['window'] = model.window
else:
if model.x_domain or model.y_domain is not None:
node['domain'] = (model.x_domain, model.y_domain)
if model.x_window or model.y_window is not None:
node['window'] = (model.x_window, model.y_window)
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
# There should be a more elegant way of doing this
TransformType.assert_equal(a, b)
assert ((isinstance(a, (modeling.models.Legendre1D, modeling.models.Legendre2D)) and
isinstance(b, (modeling.models.Legendre1D, modeling.models.Legendre2D))) or
(isinstance(a, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D)) and
isinstance(b, (modeling.models.Chebyshev1D, modeling.models.Chebyshev2D))) or
(isinstance(a, (modeling.models.Hermite1D, modeling.models.Hermite2D)) and
isinstance(b, (modeling.models.Hermite1D, modeling.models.Hermite2D))))
assert_array_equal(a.parameters, b.parameters)
class Linear1DType(TransformType):
name = "transform/linear1d"
version = '1.0.0'
types = ['astropy.modeling.models.Linear1D']
@classmethod
def from_tree_transform(cls, node, ctx):
slope = node.get('slope', None)
intercept = node.get('intercept', None)
return modeling.models.Linear1D(slope=slope, intercept=intercept)
@classmethod
def to_tree_transform(cls, model, ctx):
return {
'slope': _parameter_to_value(model.slope),
'intercept': _parameter_to_value(model.intercept),
}
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Linear1D) and
isinstance(b, modeling.models.Linear1D))
assert_array_equal(a.slope, b.slope)
assert_array_equal(a.intercept, b.intercept)
|
232b5a607e9c5226352e0ccaf85be4079897a01036e6e698205d4ff102139965 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from astropy import modeling
from astropy import units as u
from .basic import TransformType
from astropy.modeling.bounding_box import ModelBoundingBox
__all__ = ['TabularType']
class TabularType(TransformType):
name = "transform/tabular"
version = '1.2.0'
types = [
modeling.models.Tabular2D, modeling.models.Tabular1D
]
@classmethod
def from_tree_transform(cls, node, ctx):
lookup_table = node.pop("lookup_table")
dim = lookup_table.ndim
fill_value = node.pop("fill_value", None)
if dim == 1:
# The copy is necessary because the array is memory mapped.
points = (node['points'][0][:],)
model = modeling.models.Tabular1D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
elif dim == 2:
points = tuple([p[:] for p in node['points']])
model = modeling.models.Tabular2D(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
else:
tabular_class = modeling.models.tabular_model(dim, name)
points = tuple([p[:] for p in node['points']])
model = tabular_class(points=points, lookup_table=lookup_table,
method=node['method'], bounds_error=node['bounds_error'],
fill_value=fill_value)
return model
@classmethod
def to_tree_transform(cls, model, ctx):
node = {}
if model.fill_value is not None:
node["fill_value"] = model.fill_value
node["lookup_table"] = model.lookup_table
node["points"] = [p for p in model.points]
node["method"] = str(model.method)
node["bounds_error"] = model.bounds_error
return node
@classmethod
def assert_equal(cls, a, b):
if isinstance(a.lookup_table, u.Quantity):
assert u.allclose(a.lookup_table, b.lookup_table)
assert u.allclose(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
for i in range(len(a_box)):
assert u.allclose(a_box[i], b_box[i])
else:
assert_array_equal(a.lookup_table, b.lookup_table)
assert_array_equal(a.points, b.points)
a_box = a.bounding_box
if isinstance(a_box, ModelBoundingBox):
a_box = a_box.bounding_box()
b_box = b.bounding_box
if isinstance(b_box, ModelBoundingBox):
b_box = b_box.bounding_box()
assert_array_equal(a_box, b_box)
assert (a.method == b.method)
if a.fill_value is None:
assert b.fill_value is None
elif np.isnan(a.fill_value):
assert np.isnan(b.fill_value)
else:
assert(a.fill_value == b.fill_value)
assert(a.bounds_error == b.bounds_error)
|
507b34a4452d0a2d22a2dd2822232967b486b82da90b1cdcdd1c925938cac87b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import functional_models, physical_models
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['BlackBody', 'Drude1DType', 'Plummer1DType']
class BlackBody(TransformType):
name = 'transform/blackbody'
version = '1.0.0'
types = ['astropy.modeling.physical_models.BlackBody']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.BlackBody(scale=node['scale'],
temperature=node['temperature'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'scale': _parameter_to_value(model.scale),
'temperature': _parameter_to_value(model.temperature)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.BlackBody) and
isinstance(b, physical_models.BlackBody))
assert_array_equal(a.scale, b.scale)
assert_array_equal(a.temperature, b.temperature)
class Drude1DType(TransformType):
name = 'transform/drude1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Drude1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Drude1D(amplitude=node['amplitude'],
x_0=node['x_0'],
fwhm=node['fwhm'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'fwhm': _parameter_to_value(model.fwhm)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Drude1D) and
isinstance(b, physical_models.Drude1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.fwhm, b.fwhm)
class Plummer1DType(TransformType):
name = 'transform/plummer1d'
version = '1.0.0'
types = ['astropy.modeling.physical_models.Plummer1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return physical_models.Plummer1D(mass=node['mass'],
r_plum=node['r_plum'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mass': _parameter_to_value(model.mass),
'r_plum': _parameter_to_value(model.r_plum)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, physical_models.Plummer1D) and
isinstance(b, physical_models.Plummer1D))
assert_array_equal(a.mass, b.mass)
assert_array_equal(a.r_plum, b.r_plum)
|
6cb699916b7362ea58d88f7f94cb410525e6bd1f841be1c4a868622e48828e97 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf import tagged
from asdf.tests.helpers import assert_tree_match
from .basic import TransformType
from astropy.modeling.core import Model, CompoundModel
from astropy.modeling.models import Identity, Mapping, Const1D
__all__ = ['CompoundType', 'RemapAxesType']
_operator_to_tag_mapping = {
'+': 'add',
'-': 'subtract',
'*': 'multiply',
'/': 'divide',
'**': 'power',
'|': 'compose',
'&': 'concatenate',
'fix_inputs': 'fix_inputs'
}
_tag_to_method_mapping = {
'add': '__add__',
'subtract': '__sub__',
'multiply': '__mul__',
'divide': '__truediv__',
'power': '__pow__',
'compose': '__or__',
'concatenate': '__and__',
'fix_inputs': 'fix_inputs'
}
class CompoundType(TransformType):
name = ['transform/' + x for x in _tag_to_method_mapping.keys()]
types = [CompoundModel]
version = '1.2.0'
handle_dynamic_subclasses = True
@classmethod
def from_tree_tagged(cls, node, ctx):
tag = node._tag[node._tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
oper = _tag_to_method_mapping[tag]
left = node['forward'][0]
if not isinstance(left, Model):
raise TypeError(f"Unknown model type '{node['forward'][0]._tag}'")
right = node['forward'][1]
if (not isinstance(right, Model) and
not (oper == 'fix_inputs' and isinstance(right, dict))):
raise TypeError(f"Unknown model type '{node['forward'][1]._tag}'")
if oper == 'fix_inputs':
right = dict(zip(right['keys'], right['values']))
model = CompoundModel('fix_inputs', left, right)
else:
model = getattr(left, oper)(right)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def to_tree_tagged(cls, model, ctx):
left = model.left
if isinstance(model.right, dict):
right = {
'keys': list(model.right.keys()),
'values': list(model.right.values())
}
else:
right = model.right
node = {
'forward': [left, right]
}
try:
tag_name = 'transform/' + _operator_to_tag_mapping[model.op]
except KeyError:
raise ValueError(f"Unknown operator '{model.op}'")
node = tagged.tag_object(cls.make_yaml_tag(tag_name), node, ctx=ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert_tree_match(a.left, b.left)
assert_tree_match(a.right, b.right)
class RemapAxesType(TransformType):
name = 'transform/remap_axes'
types = [Mapping]
version = '1.3.0'
@classmethod
def from_tree_transform(cls, node, ctx):
mapping = node['mapping']
n_inputs = node.get('n_inputs')
if all([isinstance(x, int) for x in mapping]):
return Mapping(tuple(mapping), n_inputs)
if n_inputs is None:
n_inputs = max([x for x in mapping
if isinstance(x, int)]) + 1
transform = Identity(n_inputs)
new_mapping = []
i = n_inputs
for entry in mapping:
if isinstance(entry, int):
new_mapping.append(entry)
else:
new_mapping.append(i)
transform = transform & Const1D(entry.value)
i += 1
return transform | Mapping(new_mapping)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'mapping': list(model.mapping)}
if model.n_inputs > max(model.mapping) + 1:
node['n_inputs'] = model.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
TransformType.assert_equal(a, b)
assert a.mapping == b.mapping
assert(a.n_inputs == b.n_inputs)
|
62eca0e16a1aacbb0d1fbe39110d970553de9c21cb5b31db270922df2e5decfc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from numpy.testing import assert_array_equal
from astropy.modeling import powerlaws
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['PowerLaw1DType', 'BrokenPowerLaw1DType',
'SmoothlyBrokenPowerLaw1DType', 'ExponentialCutoffPowerLaw1DType',
'LogParabola1DType']
class PowerLaw1DType(TransformType):
name = 'transform/power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.PowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.PowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.PowerLaw1D) and
isinstance(b, powerlaws.PowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
class BrokenPowerLaw1DType(TransformType):
name = 'transform/broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.BrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.BrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.BrokenPowerLaw1D) and
isinstance(b, powerlaws.BrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
class SmoothlyBrokenPowerLaw1DType(TransformType):
name = 'transform/smoothly_broken_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.SmoothlyBrokenPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.SmoothlyBrokenPowerLaw1D(amplitude=node['amplitude'],
x_break=node['x_break'],
alpha_1=node['alpha_1'],
alpha_2=node['alpha_2'],
delta=node['delta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_break': _parameter_to_value(model.x_break),
'alpha_1': _parameter_to_value(model.alpha_1),
'alpha_2': _parameter_to_value(model.alpha_2),
'delta': _parameter_to_value(model.delta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.SmoothlyBrokenPowerLaw1D) and
isinstance(b, powerlaws.SmoothlyBrokenPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_break, b.x_break)
assert_array_equal(a.alpha_1, b.alpha_1)
assert_array_equal(a.alpha_2, b.alpha_2)
assert_array_equal(a.delta, b.delta)
class ExponentialCutoffPowerLaw1DType(TransformType):
name = 'transform/exponential_cutoff_power_law1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.ExponentialCutoffPowerLaw1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.ExponentialCutoffPowerLaw1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
x_cutoff=node['x_cutoff'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'x_cutoff': _parameter_to_value(model.x_cutoff)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.ExponentialCutoffPowerLaw1D) and
isinstance(b, powerlaws.ExponentialCutoffPowerLaw1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.x_cutoff, b.x_cutoff)
class LogParabola1DType(TransformType):
name = 'transform/log_parabola1d'
version = '1.0.0'
types = ['astropy.modeling.powerlaws.LogParabola1D']
@classmethod
def from_tree_transform(cls, node, ctx):
return powerlaws.LogParabola1D(amplitude=node['amplitude'],
x_0=node['x_0'],
alpha=node['alpha'],
beta=node['beta'])
@classmethod
def to_tree_transform(cls, model, ctx):
node = {'amplitude': _parameter_to_value(model.amplitude),
'x_0': _parameter_to_value(model.x_0),
'alpha': _parameter_to_value(model.alpha),
'beta': _parameter_to_value(model.beta)}
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, powerlaws.LogParabola1D) and
isinstance(b, powerlaws.LogParabola1D))
assert_array_equal(a.amplitude, b.amplitude)
assert_array_equal(a.x_0, b.x_0)
assert_array_equal(a.alpha, b.alpha)
assert_array_equal(a.beta, b.beta)
|
1a29212b685daf80046312252974a95d7b50f0bed97694690d1b792c220fb794 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import functools
import numpy as np
from astropy.time import TimeDelta
from ...types import AstropyType
__all__ = ['TimeDeltaType']
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TimeDeltaType(AstropyType):
name = 'time/timedelta'
types = [TimeDelta]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return TimeDelta.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
assert allclose_jd(old.jd, new.jd)
assert allclose_jd2(old.jd2, new.jd2)
assert allclose_sec(old.sec, new.sec)
|
804ad0fc6cc2c844cc03414a4c842d4e52b8e025994bf276ca6e08c69b7da3b1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf.versioning import AsdfSpec
from astropy import time
from astropy import units as u
from astropy.units import Quantity
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyAsdfType
__all__ = ['TimeType']
_guessable_formats = set(['iso', 'byear', 'jyear', 'yday'])
_astropy_format_to_asdf_format = {
'isot': 'iso',
'byear_str': 'byear',
'jyear_str': 'jyear'
}
def _assert_earthlocation_equal(a, b):
assert_array_equal(a.x, b.x)
assert_array_equal(a.y, b.y)
assert_array_equal(a.z, b.z)
assert_array_equal(a.lat, b.lat)
assert_array_equal(a.lon, b.lon)
class TimeType(AstropyAsdfType):
name = 'time/time'
version = '1.1.0'
supported_versions = ['1.0.0', AsdfSpec('>=1.1.0')]
types = ['astropy.time.core.Time']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
fmt = node.format
if fmt == 'byear':
node = time.Time(node, format='byear_str')
elif fmt == 'jyear':
node = time.Time(node, format='jyear_str')
elif fmt in ('fits', 'datetime', 'plot_date'):
node = time.Time(node, format='isot')
fmt = node.format
fmt = _astropy_format_to_asdf_format.get(fmt, fmt)
guessable_format = fmt in _guessable_formats
if node.scale == 'utc' and guessable_format and node.isscalar:
return node.value
d = {'value': node.value}
if not guessable_format:
d['format'] = fmt
if node.scale != 'utc':
d['scale'] = node.scale
if node.location is not None:
x, y, z = node.location.x, node.location.y, node.location.z
# Preserve backwards compatibility for writing the old schema
# This allows WCS to test backwards compatibility with old frames
# This code does get tested in CI, but we don't run a coverage test
if cls.version == '1.0.0': # pragma: no cover
unit = node.location.unit
d['location'] = {
'x': x.value,
'y': y.value,
'z': z.value,
'unit': unit
}
else:
d['location'] = {
# It seems like EarthLocations can be represented either in
# terms of Cartesian coordinates or latitude and longitude, so
# we rather arbitrarily choose the former for our representation
'x': x,
'y': y,
'z': z
}
return d
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, (str, list, np.ndarray)):
t = time.Time(node)
fmt = _astropy_format_to_asdf_format.get(t.format, t.format)
if fmt not in _guessable_formats:
raise ValueError(f"Invalid time '{node}'")
return t
value = node['value']
fmt = node.get('format')
scale = node.get('scale')
location = node.get('location')
if location is not None:
unit = location.get('unit', u.m)
# This ensures that we can read the v.1.0.0 schema and convert it
# to the new EarthLocation object, which expects Quantity components
for comp in ['x', 'y', 'z']:
if not isinstance(location[comp], Quantity):
location[comp] = Quantity(location[comp], unit=unit)
location = EarthLocation.from_geocentric(
location['x'], location['y'], location['z'])
return time.Time(value, format=fmt, scale=scale, location=location)
@classmethod
def assert_equal(cls, old, new):
assert old.format == new.format
assert old.scale == new.scale
if isinstance(old.location, EarthLocation):
assert isinstance(new.location, EarthLocation)
_assert_earthlocation_equal(old.location, new.location)
else:
assert old.location == new.location
assert_array_equal(old, new)
|
11e3af4c2f141630d663757df02b834500ff321b4331256b0ea387052b1398e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from asdf.tags.core import NDArrayType
from astropy.coordinates.spectral_coordinate import SpectralCoord
from astropy.io.misc.asdf.types import AstropyType
from astropy.io.misc.asdf.tags.unit.unit import UnitType
__all__ = ['SpectralCoordType']
class SpectralCoordType(AstropyType):
"""
ASDF tag implementation used to serialize/derialize SpectralCoord objects
"""
name = 'coordinates/spectralcoord'
types = [SpectralCoord]
version = '1.0.0'
@classmethod
def to_tree(cls, spec_coord, ctx):
node = {}
if isinstance(spec_coord, SpectralCoord):
node['value'] = spec_coord.value
node['unit'] = spec_coord.unit
if spec_coord.observer is not None:
node['observer'] = spec_coord.observer
if spec_coord.target is not None:
node['target'] = spec_coord.target
return node
raise TypeError(f"'{spec_coord}' is not a valid SpectralCoord")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, SpectralCoord):
return node
unit = UnitType.from_tree(node['unit'], ctx)
value = node['value']
observer = node['observer'] if 'observer' in node else None
target = node['target'] if 'observer' in node else None
if isinstance(value, NDArrayType):
value = value._make_array()
return SpectralCoord(value, unit=unit, observer=observer, target=target)
|
cb8fb12c9563cd3b8907c6110ed8c9295c2be62e21b521ae884eabca8f685f1a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import glob
from asdf import tagged
import astropy.units as u
import astropy.coordinates
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.units import Quantity
from astropy.coordinates import ICRS, Longitude, Latitude, Angle
from astropy.io.misc.asdf.types import AstropyType
__all__ = ['CoordType']
SCHEMA_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'schemas', 'astropy.org', 'astropy'))
def _get_frames():
"""
By reading the schema files, get the list of all the frames we can
save/load.
"""
search = os.path.join(SCHEMA_PATH, 'coordinates', 'frames', '*.yaml')
files = glob.glob(search)
names = []
for fpath in files:
path, fname = os.path.split(fpath)
frame, _ = fname.split('-')
# Skip baseframe because we cannot directly save / load it.
# Skip icrs because we have an explicit tag for it because there are
# two versions.
if frame not in ['baseframe', 'icrs']:
names.append(frame)
return names
class BaseCoordType:
"""
This defines the base methods for coordinates, without defining anything
related to asdf types. This allows subclasses with different types and
schemas to use this without confusing the metaclass machinery.
"""
@staticmethod
def _tag_to_frame(tag):
"""
Extract the frame name from the tag.
"""
tag = tag[tag.rfind('/')+1:]
tag = tag[:tag.rfind('-')]
return frame_transform_graph.lookup_name(tag)
@classmethod
def _frame_name_to_tag(cls, frame_name):
return cls.make_yaml_tag(cls._tag_prefix + frame_name)
@classmethod
def from_tree_tagged(cls, node, ctx):
frame = cls._tag_to_frame(node._tag)
data = node.get('data', None)
if data is not None:
return frame(node['data'], **node['frame_attributes'])
return frame(**node['frame_attributes'])
@classmethod
def to_tree_tagged(cls, frame, ctx):
if type(frame) not in frame_transform_graph.frame_set:
raise ValueError("Can only save frames that are registered with the "
"transformation graph.")
node = {}
if frame.has_data:
node['data'] = frame.data
frame_attributes = {}
for attr in frame.frame_attributes.keys():
value = getattr(frame, attr, None)
if value is not None:
frame_attributes[attr] = value
node['frame_attributes'] = frame_attributes
return tagged.tag_object(cls._frame_name_to_tag(frame.name), node, ctx=ctx)
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
if new.has_data:
assert u.allclose(new.data.lon, old.data.lon)
assert u.allclose(new.data.lat, old.data.lat)
class CoordType(BaseCoordType, AstropyType):
_tag_prefix = "coordinates/frames/"
name = ["coordinates/frames/" + f for f in _get_frames()]
types = [astropy.coordinates.BaseCoordinateFrame]
handle_dynamic_subclasses = True
requires = ['astropy']
version = "1.0.0"
class ICRSType(CoordType):
"""
Define a special tag for ICRS so we can make it version 1.1.0.
"""
name = "coordinates/frames/icrs"
types = ['astropy.coordinates.ICRS']
version = "1.1.0"
class ICRSType10(AstropyType):
name = "coordinates/frames/icrs"
types = [astropy.coordinates.ICRS]
requires = ['astropy']
version = "1.0.0"
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = Angle(node['ra']['wrap_angle'])
ra = Longitude(
node['ra']['value'],
unit=node['ra']['unit'],
wrap_angle=wrap_angle)
dec = Latitude(node['dec']['value'], unit=node['dec']['unit'])
return ICRS(ra=ra, dec=dec)
@classmethod
def to_tree(cls, frame, ctx):
node = {}
wrap_angle = Quantity(frame.ra.wrap_angle)
node['ra'] = {
'value': frame.ra.value,
'unit': frame.ra.unit.to_string(),
'wrap_angle': wrap_angle
}
node['dec'] = {
'value': frame.dec.value,
'unit': frame.dec.unit.to_string()
}
return node
@classmethod
def assert_equal(cls, old, new):
assert isinstance(old, ICRS)
assert isinstance(new, ICRS)
assert u.allclose(new.ra, old.ra)
assert u.allclose(new.dec, old.dec)
|
83e12765cda410bb14e25a9cc25565e3ea7324f30a68c21ca8e7a0257641b43b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import SkyCoord
from astropy.io.misc.asdf.tags.helpers import skycoord_equal
from ...types import AstropyType
class SkyCoordType(AstropyType):
name = 'coordinates/skycoord'
types = [SkyCoord]
version = "1.0.0"
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, tree, ctx):
return SkyCoord.info._construct_from_dict(tree)
@classmethod
def assert_equal(cls, old, new):
assert skycoord_equal(old, new)
|
8ff80cf3960674f038aeacc196466f0d4b5a52130ce8c62865a372a5134b979f | import astropy.units as u
import astropy.coordinates.representation
from astropy.coordinates.representation import BaseRepresentationOrDifferential
from astropy.io.misc.asdf.types import AstropyType
class RepresentationType(AstropyType):
name = "coordinates/representation"
types = [BaseRepresentationOrDifferential]
version = "1.0.0"
_representation_module = astropy.coordinates.representation
@classmethod
def to_tree(cls, representation, ctx):
comps = representation.components
components = {}
for c in comps:
value = getattr(representation, '_' + c, None)
if value is not None:
components[c] = value
t = type(representation)
node = {}
node['type'] = t.__name__
node['components'] = components
return node
@classmethod
def from_tree(cls, node, ctx):
rep_type = getattr(cls._representation_module, node['type'])
return rep_type(**node['components'])
@classmethod
def assert_equal(cls, old, new):
assert isinstance(new, type(old))
assert new.components == old.components
for comp in new.components:
nc = getattr(new, comp)
oc = getattr(old, comp)
assert u.allclose(nc, oc)
|
73b7c86bb958864247a6628be97199c07178703470c217961dd606cbdff51bc0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import Angle, Latitude, Longitude
from astropy.io.misc.asdf.tags.unit.quantity import QuantityType
__all__ = ['AngleType', 'LatitudeType', 'LongitudeType']
class AngleType(QuantityType):
name = "coordinates/angle"
types = [Angle]
requires = ['astropy']
version = "1.0.0"
organization = 'astropy.org'
standard = 'astropy'
@classmethod
def from_tree(cls, node, ctx):
return Angle(super().from_tree(node, ctx))
class LatitudeType(AngleType):
name = "coordinates/latitude"
types = [Latitude]
@classmethod
def from_tree(cls, node, ctx):
return Latitude(super().from_tree(node, ctx))
class LongitudeType(AngleType):
name = "coordinates/longitude"
types = [Longitude]
@classmethod
def from_tree(cls, node, ctx):
wrap_angle = node['wrap_angle']
return Longitude(super().from_tree(node, ctx), wrap_angle=wrap_angle)
@classmethod
def to_tree(cls, longitude, ctx):
tree = super().to_tree(longitude, ctx)
tree['wrap_angle'] = longitude.wrap_angle
return tree
|
9f822eedc9f99434ae60245657dcf8a6bcaa36b70f25f17b681f7a7198a5ce4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import EarthLocation
from ...types import AstropyType
class EarthLocationType(AstropyType):
name = 'coordinates/earthlocation'
types = [EarthLocation]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return EarthLocation.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
return (old == new).all()
|
976500e2a1efb1d3cd6d56caf913a1dc6b95fced9ea62af4a4d417516abdfa93 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
def run_schema_example_test(organization, standard, name, version, check_func=None):
import asdf
from asdf.tests import helpers
from asdf.types import format_tag
from asdf.schema import load_schema
tag = format_tag(organization, standard, version, name)
uri = asdf.extension.default_extensions.extension_list.tag_mapping(tag)
r = asdf.extension.get_default_resolver()
examples = []
schema = load_schema(uri, resolver=r)
for node in asdf.treeutil.iter_tree(schema):
if (isinstance(node, dict) and
'examples' in node and
isinstance(node['examples'], list)):
for desc, example in node['examples']:
examples.append(example)
for example in examples:
buff = helpers.yaml_to_asdf('example: ' + example.strip())
ff = asdf.AsdfFile(uri=uri)
# Add some dummy blocks so that the ndarray examples work
for i in range(3):
b = asdf.block.Block(np.zeros((1024*1024*8), dtype=np.uint8))
b._used = True
ff.blocks.add(b)
ff._open_impl(ff, buff, mode='r')
if check_func:
check_func(ff)
|
9abc2d91bdebc6131bf7c6ea241a13fb65ab9129b0a1a439f498eeb25711a7e6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.units.equivalencies import Equivalency
from astropy.units import equivalencies
from astropy.units.quantity import Quantity
from astropy.io.misc.asdf.types import AstropyType
class EquivalencyType(AstropyType):
name = "units/equivalency"
types = [Equivalency]
version = '1.0.0'
@classmethod
def to_tree(cls, equiv, ctx):
node = {}
if not isinstance(equiv, Equivalency):
raise TypeError(f"'{equiv}' is not a valid Equivalency")
eqs = []
for e, kwargs in zip(equiv.name, equiv.kwargs):
kwarg_names = list(kwargs.keys())
kwarg_values = list(kwargs.values())
eq = {'name': e, 'kwargs_names': kwarg_names, 'kwargs_values': kwarg_values}
eqs.append(eq)
return eqs
@classmethod
def from_tree(cls, node, ctx):
eqs = []
for eq in node:
equiv = getattr(equivalencies, eq['name'])
kwargs = dict(zip(eq['kwargs_names'], eq['kwargs_values']))
eqs.append(equiv(**kwargs))
return sum(eqs[1:], eqs[0])
@classmethod
def assert_equal(cls, a, b):
assert a == b
|
1d403453d10e76250f8aaa792a4fa9e4810235dc7374747e59a98f195ff01768 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.units import Unit, UnitBase
from astropy.io.misc.asdf.types import AstropyAsdfType
class UnitType(AstropyAsdfType):
name = 'unit/unit'
types = ['astropy.units.UnitBase']
requires = ['astropy']
@classmethod
def to_tree(cls, node, ctx):
if isinstance(node, str):
node = Unit(node, format='vounit', parse_strict='warn')
if isinstance(node, UnitBase):
return node.to_string(format='vounit')
raise TypeError(f"'{node}' is not a valid unit")
@classmethod
def from_tree(cls, node, ctx):
return Unit(node, format='vounit', parse_strict='silent')
|
72a5c00b75e9695bb36ce2e271f2c4828286875282cfded51778a5b1558790dd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.units import Quantity
from asdf.tags.core import NDArrayType
from astropy.io.misc.asdf.types import AstropyAsdfType
class QuantityType(AstropyAsdfType):
name = 'unit/quantity'
types = ['astropy.units.Quantity']
requires = ['astropy']
version = '1.1.0'
@classmethod
def to_tree(cls, quantity, ctx):
node = {}
if isinstance(quantity, Quantity):
node['value'] = quantity.value
node['unit'] = quantity.unit
return node
raise TypeError(f"'{quantity}' is not a valid Quantity")
@classmethod
def from_tree(cls, node, ctx):
if isinstance(node, Quantity):
return node
unit = node['unit']
value = node['value']
if isinstance(value, NDArrayType):
value = value._make_array()
return Quantity(value, unit=unit)
|
b836203df79a5bf48b2f19c05c6f433b673cad5e32ba74d44470b82758d7ba7e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import numpy as np
from packaging.version import Version
import astropy.units as u
from astropy import table
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.io.misc.asdf.tags.helpers import skycoord_equal
from asdf.tests import helpers
from asdf.tags.core.ndarray import NDArrayType
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[('a', '<i4', (2, 2)),
('b', '<f8'),
('c', '|S1')])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[('a', [('a0', '<i4'), ('a1', '|S1')]),
('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'), (4, 5.0, 'y'), (5, 8.2, 'z')],
dtype=[('a', '<i4'), ('b', '<f8'), ('c', '|S1')])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
if Version(asdf.__version__) >= Version('2.8.0'):
# The auto_inline argument is deprecated as of asdf 2.8.0.
with asdf.config_context() as config:
config.array_inline_threshold = 64
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
else:
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !<tag:astropy.org:astropy/table/table-1.0.0>
columns:
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2]
name: a
- !core/column-1.0.0
data: !core/ndarray-1.0.0
data: [0, 1, 2, 3]
name: b
colnames: [a, b]
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError) as err:
with asdf.open(buff) as ff:
pass
assert 'Inconsistent data column lengths' in str(err.value)
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_quantity_mixin(tmpdir):
t = table.QTable()
t['a'] = [1, 2, 3]
t['b'] = ['x', 'y', 'z']
t['c'] = [2.0, 5.0, 8.2] * u.m
def check(ff):
assert isinstance(ff['table']['c'], u.Quantity)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_time_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
def check(ff):
assert isinstance(ff['table']['c'], Time)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_timedelta_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = TimeDelta([1, 2] * u.day)
def check(ff):
assert isinstance(ff['table']['c'], TimeDelta)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_skycoord_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4', obstime='J1990.5')
def check(ff):
assert isinstance(ff['table']['c'], SkyCoord)
def tree_match(old, new):
NDArrayType.assert_equal(new['a'], old['a'])
NDArrayType.assert_equal(new['b'], old['b'])
assert skycoord_equal(new['c'], old['c'])
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
tree_match_func=tree_match)
def test_earthlocation_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
def check(ff):
assert isinstance(ff['table']['c'], EarthLocation)
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_ndarray_mixin(tmpdir):
t = table.Table()
t['a'] = [1, 2]
t['b'] = ['x', 'y']
t['c'] = table.NdarrayMixin([5, 6])
helpers.assert_roundtrip_tree({'table': t}, tmpdir)
def test_backwards_compat():
"""
Make sure that we can continue to read tables that use the schema from
the ASDF Standard.
This test uses the examples in the table schema from the ASDF Standard,
since these make no reference to Astropy's own table definition.
"""
def check(asdffile):
assert isinstance(asdffile['example'], table.Table)
run_schema_example_test('stsci.edu', 'asdf', 'core/table', '1.0.0', check)
|
7ba7274f6cef19e25026cb1b6b2dd8794c89b870260f8aaba84f2d24511b25e3 | import pytest
from astropy.io.misc.asdf.tests import ASDF_ENTRY_INSTALLED
if not ASDF_ENTRY_INSTALLED:
pytest.skip('The astropy asdf entry points are not installed',
allow_module_level=True)
|
4e6e541bd8bc3c50c8d327860c03319d788993998b7d788c1e431be46b5564fa | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import os
import numpy as np
from astropy.io import fits
from asdf.tests import helpers
from astropy.io.misc.asdf.tags.tests.helpers import run_schema_example_test
def test_complex_structure(tmpdir):
with fits.open(os.path.join(
os.path.dirname(__file__), 'data', 'complex.fits'), memmap=False) as hdulist:
tree = {
'fits': hdulist
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array(
[(0, 1), (2, 3)],
dtype=[('A', int), ('B', int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {'fits': h}
def check_yaml(content):
assert b'!<tag:astropy.org:astropy/table/table-1.0.0>' in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
def test_backwards_compat():
"""
Make sure that we can continue to read FITS HDUs that use the schema from
the ASDF Standard.
This test uses the examples in the fits schema from the ASDF Standard,
since these make no reference to Astropy's own fits definition.
"""
def check(asdffile):
assert isinstance(asdffile['example'], fits.HDUList)
run_schema_example_test('stsci.edu', 'asdf', 'fits/fits', '1.0.0', check)
|
9f2d2a0364379e65a952a6569d183e6fbb2bd1064e8080257496432bd2ecc38b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
asdf = pytest.importorskip("asdf")
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.modeling.models import UnitsMapping
from astropy import units as u
def assert_model_roundtrip(model, tmpdir):
tree = {"model": model}
assert_roundtrip_tree(tree, tmpdir, tree_match_func=assert_models_equal)
def assert_models_equal(a, b):
assert a.name == b.name
assert a.inputs == b.inputs
assert a.input_units == b.input_units
assert a.outputs == b.outputs
assert a.mapping == b.mapping
assert a.input_units_allow_dimensionless == b.input_units_allow_dimensionless
for i in a.inputs:
if a.input_units_equivalencies is None:
a_equiv = None
else:
a_equiv = a.input_units_equivalencies.get(i)
if b.input_units_equivalencies is None:
b_equiv = None
else:
b_equiv = b.input_units_equivalencies.get(i, None)
assert a_equiv == b_equiv
def test_basic(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled),))
assert_model_roundtrip(m, tmpdir)
def test_remove_units(tmpdir):
m = UnitsMapping(((u.m, None),))
assert_model_roundtrip(m, tmpdir)
def test_accept_any_units(tmpdir):
m = UnitsMapping(((None, u.m),))
assert_model_roundtrip(m, tmpdir)
def test_with_equivalencies(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled),), input_units_equivalencies={"x": u.equivalencies.spectral()})
assert_model_roundtrip(m, tmpdir)
def test_with_allow_dimensionless(tmpdir):
m = UnitsMapping(((u.m, u.dimensionless_unscaled), (u.s, u.Hz)), input_units_allow_dimensionless=True)
assert_model_roundtrip(m, tmpdir)
m = UnitsMapping(((u.m, u.dimensionless_unscaled), (u.s, u.Hz)), input_units_allow_dimensionless={"x0": True, "x1": False})
assert_model_roundtrip(m, tmpdir)
|
12a7824f5539e892b8ee8ec9a3c504bb79b8470b449821016442d8d4dedc3b37 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import warnings
from packaging.version import Version
import numpy as np
from asdf import util
from asdf.tests import helpers
from asdf import AsdfFile
import asdf
import astropy.units as u
from astropy.modeling.core import fix_inputs
from astropy.modeling import models as astmodels
from astropy.utils.compat.optional_deps import HAS_SCIPY
def custom_and_analytical_inverse():
p1 = astmodels.Polynomial1D(1)
p2 = astmodels.Polynomial1D(1)
p3 = astmodels.Polynomial1D(1)
p4 = astmodels.Polynomial1D(1)
m1 = p1 & p2
m2 = p3 & p4
m1.inverse = m2
return m1
def custom_inputs_outputs():
m = astmodels.Gaussian2D()
m.inputs = ('a', 'b')
m.outputs = ('c',)
return m
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Shift(2.),
astmodels.Hermite1D(2, c0=2, c1=3, c2=0.5),
astmodels.Legendre1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5),
astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Legendre2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Hermite2D(1, 1, c0_0=1, c0_1=2, c1_0=3),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotationSequence3D([1.2, 2.3, 3.4, .3], 'xyzx'),
astmodels.SphericalRotationSequence([1.2, 2.3, 3.4, .3], 'xyzy'),
astmodels.AiryDisk2D(amplitude=10., x_0=0.5, y_0=1.5),
astmodels.Box1D(amplitude=10., x_0=0.5, width=5.),
astmodels.Box2D(amplitude=10., x_0=0.5, x_width=5., y_0=1.5, y_width=7.),
astmodels.Const1D(amplitude=5.),
astmodels.Const2D(amplitude=5.),
astmodels.Disk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5.),
astmodels.Ellipse2D(amplitude=10., x_0=0.5, y_0=1.5, a=2., b=4., theta=0.1),
astmodels.Exponential1D(amplitude=10., tau=3.5),
astmodels.Gaussian1D(amplitude=10., mean=5., stddev=3.),
astmodels.Gaussian2D(amplitude=10., x_mean=5., y_mean=5., x_stddev=3., y_stddev=3.),
astmodels.KingProjectedAnalytic1D(amplitude=10., r_core=5., r_tide=2.),
astmodels.Logarithmic1D(amplitude=10., tau=3.5),
astmodels.Lorentz1D(amplitude=10., x_0=0.5, fwhm=2.5),
astmodels.Moffat1D(amplitude=10., x_0=0.5, gamma=1.2, alpha=2.5),
astmodels.Moffat2D(amplitude=10., x_0=0.5, y_0=1.5, gamma=1.2, alpha=2.5),
astmodels.Planar2D(slope_x=0.5, slope_y=1.2, intercept=2.5),
astmodels.RedshiftScaleFactor(z=2.5),
astmodels.RickerWavelet1D(amplitude=10., x_0=0.5, sigma=1.2),
astmodels.RickerWavelet2D(amplitude=10., x_0=0.5, y_0=1.5, sigma=1.2),
astmodels.Ring2D(amplitude=10., x_0=0.5, y_0=1.5, r_in=5., width=10.),
astmodels.Sersic1D(amplitude=10., r_eff=1., n=4.),
astmodels.Sersic2D(amplitude=10., r_eff=1., n=4., x_0=0.5, y_0=1.5, ellip=0.0, theta=0.0),
astmodels.Sine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Cosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Tangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcSine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcCosine1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.ArcTangent1D(amplitude=10., frequency=0.5, phase=1.),
astmodels.Trapezoid1D(amplitude=10., x_0=0.5, width=5., slope=1.),
astmodels.TrapezoidDisk2D(amplitude=10., x_0=0.5, y_0=1.5, R_0=5., slope=1.),
astmodels.Voigt1D(x_0=0.55, amplitude_L=10., fwhm_L=0.5, fwhm_G=0.9),
astmodels.BlackBody(scale=10.0, temperature=6000.*u.K),
astmodels.Drude1D(amplitude=10.0, x_0=0.5, fwhm=2.5),
astmodels.Plummer1D(mass=10.0, r_plum=5.0),
astmodels.BrokenPowerLaw1D(amplitude=10, x_break=0.5, alpha_1=2.0, alpha_2=3.5),
astmodels.ExponentialCutoffPowerLaw1D(10, 0.5, 2.0, 7.),
astmodels.LogParabola1D(amplitude=10, x_0=0.5, alpha=2., beta=3.,),
astmodels.PowerLaw1D(amplitude=10., x_0=0.5, alpha=2.0),
astmodels.SmoothlyBrokenPowerLaw1D(amplitude=10., x_break=5.0, alpha_1=2.0, alpha_2=3.0, delta=0.5),
custom_and_analytical_inverse(),
custom_inputs_outputs(),
]
if HAS_SCIPY:
test_models.append(astmodels.Spline1D(np.array([-3., -3., -3., -3., -1., 0., 1., 3., 3., 3., 3.]),
np.array([0.10412331, 0.07013616, -0.18799552, 1.35953147, -0.15282581, 0.03923, -0.04297299, 0., 0., 0., 0.]),
3))
math_models = []
for kl in astmodels.math.__all__:
klass = getattr(astmodels.math, kl)
math_models.append(klass())
test_models.extend(math_models)
test_models_with_constraints = [astmodels.Legendre2D(x_degree=1, y_degree=1,
c0_0=1, c0_1=2, c1_0=3,
fixed={'c1_0': True, 'c0_1': True},
bounds={'c0_0': (-10, 10)})]
test_models.extend(test_models_with_constraints)
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.6.0 which causes warnings
if Version(asdf.__version__) <= Version('2.6.0'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
f'astropy.modeling.projections.Sky2Pix_{name}')(),
'backward': util.resolve_name(
f'astropy.modeling.projections.Pix2Sky_{name}')()
}
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
def test_const1d(tmpdir, standard_version):
helpers.assert_roundtrip_tree(
{"model": astmodels.Const1D(amplitude=5.)},
tmpdir,
init_options={"version": standard_version}
)
@pytest.mark.parametrize("standard_version", asdf.versioning.supported_versions)
@pytest.mark.parametrize("model", [
astmodels.Polynomial1D(1, c0=5, c1=17),
astmodels.Polynomial1D(1, c0=5, c1=17, domain=[-5, 4], window=[-2, 3]),
astmodels.Polynomial2D(2, c0_0=3, c1_0=5, c0_1=7),
astmodels.Polynomial2D(
2, c0_0=3, c1_0=5, c0_1=7, x_domain=[-2, 2], y_domain=[-4, 4],
x_window=[-6, 6], y_window=[-8, 8]
),
])
def test_polynomial(tmpdir, standard_version, model):
helpers.assert_roundtrip_tree({"model": model}, tmpdir, init_options={"version": standard_version})
def test_domain_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5, domain=[-2, 2])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_domain.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_window_orthopoly(tmpdir):
model1d = astmodels.Chebyshev1D(2, c0=2, c1=3, c2=0.5,
domain=[-2, 2], window=[-0.5, 0.5])
model2d = astmodels.Chebyshev2D(1, 1, c0_0=1, c0_1=2, c1_0=3,
x_domain=[-2, 2], y_domain=[-2, 2],
x_window=[-0.5, 0.5], y_window=[-0.1, 0.5])
fa = AsdfFile()
fa.tree['model1d'] = model1d
fa.tree['model2d'] = model2d
file_path = str(tmpdir.join('orthopoly_window.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model1d'](1.8) == model1d(1.8)
assert f.tree['model2d'](1.8, -1.5) == model2d(1.8, -1.5)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model_units(tmpdir):
points = np.arange(0, 5) * u.pix
values = [1., 10, 2, 45, -3] * u.nm
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[3., 0., 0.],
[0., 2., 0.],
[0., 0., 0.]]) * u.nm
points = ([1, 2, 3], [1, 2, 3]) * u.pix
model2 = astmodels.Tabular2D(points, lookup_table=table,
bounds_error=False, fill_value=None,
method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs(tmpdir):
with warnings.catch_warnings():
# Some schema files are missing from asdf<=2.4.2 which causes warnings
if Version(asdf.__version__) <= Version('2.5.1'):
warnings.filterwarnings('ignore', 'Unable to locate schema file')
model0 = astmodels.Pix2Sky_TAN()
model0.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
model1 = astmodels.Rotation2D()
model = model0 | model1
tree = {
'compound': fix_inputs(model, {'x': 45}),
'compound1': fix_inputs(model, {0: 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fix_inputs_type():
with pytest.raises(TypeError):
tree = {
'compound': fix_inputs(3, {'x': 45})
}
helpers.assert_roundtrip_tree(tree, tmpdir)
with pytest.raises(AttributeError):
tree = {
'compound': astmodels.Pix2Sky_TAN() & {'x': 45}
}
helpers.assert_roundtrip_tree(tree, tmpdir)
comp_model = custom_and_analytical_inverse()
@pytest.mark.parametrize(('model'), [astmodels.Shift(1) & astmodels.Shift(2) | comp_model,
comp_model | astmodels.Shift(1) & astmodels.Shift(2),
astmodels.Shift(1) & comp_model,
comp_model & astmodels.Shift(1)
])
def test_custom_and_analytical(model, tmpdir):
fa = AsdfFile()
fa.tree['model'] = model
file_path = str(tmpdir.join('custom_and_analytical_inverse.asdf'))
fa.write_to(file_path)
with asdf.open(file_path) as f:
assert f.tree['model'].inverse is not None
def test_deserialize_compound_user_inverse(tmpdir):
"""
Confirm that we are able to correctly reconstruct a
compound model with a user inverse set on one of its
component models.
Due to code in TransformType that facilitates circular
inverses, the user inverse of the component model is
not available at the time that the CompoundModel is
constructed.
"""
yaml = """
model: !transform/concatenate-1.2.0
forward:
- !transform/shift-1.2.0
inverse: !transform/shift-1.2.0 {offset: 5.0}
offset: -10.0
- !transform/shift-1.2.0 {offset: -20.0}
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as af:
model = af["model"]
assert model.has_inverse()
assert model.inverse(-5, -20) == (0, 0)
# test some models and compound models with some input unit equivalencies
def models_with_input_eq():
# 1D model
m1 = astmodels.Shift(1*u.kg)
m1.input_units_equivalencies = {'x': u.mass_energy()}
# 2D model
m2 = astmodels.Const2D(10*u.Hz)
m2.input_units_equivalencies = {'x': u.dimensionless_angles(),
'y': u.dimensionless_angles()}
# 2D model with only one input equivalencies
m3 = astmodels.Const2D(10*u.Hz)
m3.input_units_equivalencies = {'x': u.dimensionless_angles()}
# model using equivalency that has args using units
m4 = astmodels.PowerLaw1D(amplitude=1*u.m, x_0=10*u.pix, alpha=7)
m4.input_units_equivalencies = {'x': u.equivalencies.pixel_scale(0.5*u.arcsec/u.pix)}
return[m1, m2, m3, m4]
def compound_models_with_input_eq():
m1 = astmodels.Gaussian1D(10*u.K, 11*u.arcsec, 12*u.arcsec)
m1.input_units_equivalencies = {'x': u.parallax()}
m2 = astmodels.Gaussian1D(5*u.s, 2*u.K, 3*u.K)
m2.input_units_equivalencies = {'x': u.temperature()}
return [m1|m2, m1&m2, m1+m2]
test_models.extend(models_with_input_eq())
test_models.extend(compound_models_with_input_eq())
|
8494f148026b98c5f78247662dfe541fe799fe74bdc3a7be2d0707936033aaf8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.time import Time, TimeDelta
@pytest.mark.parametrize('fmt', TimeDelta.FORMATS.keys())
def test_timedelta(fmt, tmpdir):
t1 = Time(Time.now())
t2 = Time(Time.now())
td = TimeDelta(t2 - t1, format=fmt)
tree = dict(timedelta=td)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('scale', list(TimeDelta.SCALES) + [None])
def test_timedelta_scales(scale, tmpdir):
tree = dict(timedelta=TimeDelta(0.125, scale=scale, format="jd"))
assert_roundtrip_tree(tree, tmpdir)
def test_timedelta_vector(tmpdir):
tree = dict(timedelta=TimeDelta([1, 2] * u.day))
assert_roundtrip_tree(tree, tmpdir)
|
cc98ccf5931a75030d5901c61ed34c223c94d3d9b66cc97fe349318ff1fa6942 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import datetime
import numpy as np
from astropy import time
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = dict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', dict())
else:
cursor = cursor.setdefault('properties', dict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, dict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location_1_0_0(tmpdir):
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=6378100*u.m, y=0*u.m, z=0*u.m)
t = time.Time('J2000.000', location=location, format='jyear_str')
tree = {'time': t}
# The version refers to ASDF Standard 1.0.0, which includes time-1.0.0
helpers.assert_roundtrip_tree(tree, tmpdir, init_options={"version": "1.0.0"})
def test_isot(tmpdir):
tree = {
'time': time.Time('2000-01-01T00:00:00.000')
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
assert isinstance(tree['time'], str)
def test_isot_array(tmpdir):
tree = {
'time': time.Time(['2001-01-02T12:34:56', '2001-02-03T00:01:02'])
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
8007ff110cb6185ee16dde548d99d9c0e9f38d478a8902daf51bf91e770bedb2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, FK5, Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {'coord': ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {'coord': FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
8672d71d7c624c64341aeb30595d2c27edf69afadd90efe8c62738ab022574a5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.coordinates.angles import Longitude, Latitude
from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS
@pytest.fixture
def position():
lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(lat=34.4900*u.deg, lon=-104.221800*u.deg,
height=40*u.km)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation_site(tmpdir):
orig_sites = getattr(EarthLocation, '_site_registry', None)
try:
EarthLocation._get_site_registry(force_builtin=True)
rog = EarthLocation.of_site('greenwich')
tree = dict(location=rog)
assert_roundtrip_tree(tree, tmpdir)
finally:
EarthLocation._site_registry = orig_sites
|
ddb54d68a991b3a2d7399a63a9094c57479a36012e31f775245f3af2fbc2ab91 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, ICRS, Galactic, FK4, FK5, Longitude
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('coord', [
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
])
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame='icrs')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
representation_type='cartesian')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr,
pm_dec=1*u.mas/u.yr)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason='Apparent loss of precision during serialization')
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10*u.deg, 20*u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile['coord'], 'equinox')
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
0ae8273e8cf2dea2faf6bd68fc7084ffa9f00ef3bc116aaac9e68a69086f79df | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import astropy.units as u
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.coordinates import Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_angle(tmpdir):
tree = {'angle': Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {'angle': Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {'angle': Longitude(-100, u.deg, wrap_angle=180*u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
91a2c87919020a418602084e90762044465c89af6bc080ba86cea029b6f6d3be | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from numpy.random import random, randint
import astropy.units as u
from astropy.coordinates import Angle
import astropy.coordinates.representation as r
from asdf.tests.helpers import assert_roundtrip_tree
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {'representation': representation}
assert_roundtrip_tree(tree, tmpdir)
|
f6edd790ec51bb099e847ea63b95e86d05e4380f5b6d050e5d42c96afa198e69 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from astropy import units as u
from astropy.coordinates import SpectralCoord, ICRS, Galactic
from astropy.tests.helper import assert_quantity_allclose
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree # noqa
def test_scalar_spectralcoord(tmpdir):
sc = SpectralCoord(565 * u.nm)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, 565 * u.nm)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_vector_spectralcoord(tmpdir):
sc = SpectralCoord([100, 200, 300] * u.GHz)
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, [100, 200, 300] * u.GHz)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check, tree_match_func=assert_quantity_allclose)
@pytest.mark.filterwarnings("ignore:No velocity")
def test_spectralcoord_with_obstarget(tmpdir):
sc = SpectralCoord(10 * u.GHz,
observer=ICRS(1 * u.km, 2 * u.km, 3 * u.km, representation_type='cartesian'),
target=Galactic(10 * u.deg, 20 * u.deg, distance=30 * u.pc))
tree = dict(spectralcoord=sc)
def check(asdffile):
assert isinstance(asdffile['spectralcoord'], SpectralCoord)
assert_quantity_allclose(asdffile['spectralcoord'].quantity, 10 * u.GHz)
assert isinstance(asdffile['spectralcoord'].observer, ICRS)
assert isinstance(asdffile['spectralcoord'].target, Galactic)
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
|
235d9cd47d2eca472132d322293d652c45681560f8ae34e4119c25bdc15a67fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import io
from astropy import units as u
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
a6745b9dd530770861d556f961324186c8818f7792995d703401cd5a831984ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
import io
from astropy import units
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = f"""
quantity: !unit/quantity-1.1.0
value: {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {testval}
unit: {testunit}
"""
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = f"""
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{testval}
unit: {testunit}
"""
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
1d421d387bec3a1abccb2b6de89848cb4c666a1a78c1657c9ea195d74fe047fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from astropy import units as u
from astropy.units import equivalencies as eq
from astropy.cosmology import Planck15
asdf = pytest.importorskip('asdf', minversion='2.3.0.dev0')
from asdf.tests import helpers
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [eq.plate_scale(.3 * u.deg/u.mm), eq.pixel_scale(.5 * u.deg/u.pix),
eq.pixel_scale(100. * u.pix/u.cm),
eq.spectral_density(350 * u.nm, factor=2),
eq.spectral_density(350 * u.nm), eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.temperature_energy(), eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr), eq.mass_energy(),
eq.molar_mass_amu(), eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm), eq.doppler_radio(2 * u.Hz),
eq.parallax(), eq.logarithmic(), eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)),
(eq.spectral() + eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr))
]
@pytest.mark.parametrize('equiv', get_equivalencies())
def test_equivalencies(tmpdir, equiv):
tree = {'equiv': equiv}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
9b9ea54edd341a82a81050085fad86aafab9619dad77b03db8ae3814bea1efec | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
from astropy.nddata import CCDData
from astropy.table import Table
def test_table_read_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.read.help('fits', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='fits') documentation" in doc
assert "hdu : int or str, optional" in doc
def test_table_read_help_ascii():
"""
Test dynamically created documentation help via the I/O registry for 'ascii'.
"""
out = StringIO()
Table.read.help('ascii', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='ascii') documentation" in doc
assert "delimiter : str" in doc
assert "ASCII reader 'ascii' details" in doc
assert "Character-delimited table with a single header line" in doc
def test_table_write_help_hdf5():
"""
Test dynamically created documentation help via the I/O registry for 'hdf5'.
"""
out = StringIO()
Table.write.help('hdf5', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='hdf5') documentation" in doc
assert "Write a Table object to an HDF5 file" in doc
assert "compression : bool or str or int" in doc
def test_list_formats():
"""
Test getting list of available formats
"""
out = StringIO()
CCDData.write.list_formats(out)
output = out.getvalue()
assert output == """\
Format Read Write Auto-identify
------ ---- ----- -------------
fits Yes Yes Yes"""
def test_table_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.write.help('fits', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='fits') documentation" in doc
assert "Write a Table object to a FITS file" in doc
def test_table_write_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for no
format provided.
"""
out = StringIO()
Table.write.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" in doc
assert "The available built-in formats" in doc
def test_table_read_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for not
format provided.
"""
out = StringIO()
Table.read.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" in doc
assert "The available built-in formats" in doc
def test_ccddata_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
CCDData.write.help('fits', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.write(format='fits') documentation" in doc
assert "Write CCDData object to FITS file" in doc
assert "key_uncertainty_type : str, optional" in doc
def test_ccddata_read_help_fits():
"""Test dynamically created documentation help via the I/O registry for
CCDData 'fits'.
"""
out = StringIO()
CCDData.read.help('fits', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.read(format='fits') documentation" in doc
assert "Generate a CCDData object from a FITS file" in doc
assert "hdu_uncertainty : str or None, optional" in doc
def test_table_write_help_jsviewer():
"""
Test dynamically created documentation help via the I/O registry for
'jsviewer'.
"""
out = StringIO()
Table.write.help('jsviewer', out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='jsviewer') documentation" in doc
|
dafc3ca1b30ff2af220cf46e8192642829119e8abde0959d393156529daab7e4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test :mod:`astropy.io.registry`.
.. todo::
Don't rely on Table for tests
"""
import contextlib
import os
from collections import Counter
from copy import copy, deepcopy
from io import StringIO
import pytest
import numpy as np
import astropy.units as u
from astropy.io import registry as io_registry
from astropy.io.registry import (IORegistryError, UnifiedInputRegistry,
UnifiedIORegistry, UnifiedOutputRegistry, compat)
from astropy.io.registry.base import _UnifiedIORegistryBase
from astropy.io.registry.compat import default_registry
from astropy.table import Table
###############################################################################
# pytest setup and fixtures
class UnifiedIORegistryBaseSubClass(_UnifiedIORegistryBase):
"""Non-abstract subclass of UnifiedIORegistryBase for testing."""
def get_formats(self, data_class=None):
return None
class EmptyData:
"""
Thing that can read and write.
Note that the read/write are the compatibility methods, which allow for the
kwarg ``registry``. This allows us to not subclass ``EmptyData`` for each
of the types of registry (read-only, ...) and use this class everywhere.
"""
read = classmethod(io_registry.read)
write = io_registry.write
class OtherEmptyData:
"""A different class with different I/O"""
read = classmethod(io_registry.read)
write = io_registry.write
def empty_reader(*args, **kwargs):
return EmptyData()
def empty_writer(table, *args, **kwargs):
return "status: success"
def empty_identifier(*args, **kwargs):
return True
@pytest.fixture
def fmtcls1():
return ("test1", EmptyData)
@pytest.fixture
def fmtcls2():
return ("test2", EmptyData)
@pytest.fixture(params=["test1", "test2"])
def fmtcls(request):
yield (request.param, EmptyData)
@pytest.fixture
def original():
ORIGINAL = {}
ORIGINAL["readers"] = deepcopy(default_registry._readers)
ORIGINAL["writers"] = deepcopy(default_registry._writers)
ORIGINAL["identifiers"] = deepcopy(default_registry._identifiers)
return ORIGINAL
###############################################################################
def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1]
def test_IORegistryError():
with pytest.raises(IORegistryError, match="just checking"):
raise IORegistryError("just checking")
class TestUnifiedIORegistryBase:
"""Test :class:`astropy.io.registry.UnifiedIORegistryBase`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistryBaseSubClass
@pytest.fixture
def registry(self):
"""I/O registry. Cleaned before and after each function."""
registry = self._cls()
HAS_READERS = hasattr(registry, "_readers")
HAS_WRITERS = hasattr(registry, "_writers")
# copy and clear original registry
ORIGINAL = {}
ORIGINAL["identifiers"] = deepcopy(registry._identifiers)
registry._identifiers.clear()
if HAS_READERS:
ORIGINAL["readers"] = deepcopy(registry._readers)
registry._readers.clear()
if HAS_WRITERS:
ORIGINAL["writers"] = deepcopy(registry._writers)
registry._writers.clear()
yield registry
registry._identifiers.clear()
registry._identifiers.update(ORIGINAL["identifiers"])
if HAS_READERS:
registry._readers.clear()
registry._readers.update(ORIGINAL["readers"])
if HAS_WRITERS:
registry._writers.clear()
registry._writers.update(ORIGINAL["writers"])
# ===========================================
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
# defaults
assert registry.get_formats() is None
# (kw)args don't matter
assert registry.get_formats(data_class=24) is None
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
# TODO! figure out what can be tested
with registry.delay_doc_updates(EmptyData):
registry.register_identifier(*fmtcls1, empty_identifier)
def test_register_identifier(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_identifier()``."""
# initial check it's not registered
assert fmtcls1 not in registry._identifiers
assert fmtcls2 not in registry._identifiers
# register
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls2, empty_identifier)
assert fmtcls1 in registry._identifiers
assert fmtcls2 in registry._identifiers
def test_register_identifier_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_identifier()`` twice."""
fmt, cls = fmtcls
registry.register_identifier(fmt, cls, empty_identifier)
with pytest.raises(IORegistryError) as exc:
registry.register_identifier(fmt, cls, empty_identifier)
assert (
str(exc.value) == f"Identifier for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_identifier_force(self, registry, fmtcls1):
registry.register_identifier(*fmtcls1, empty_identifier)
registry.register_identifier(*fmtcls1, empty_identifier, force=True)
assert fmtcls1 in registry._identifiers
# -----------------------
def test_unregister_identifier(self, registry, fmtcls1):
"""Test ``registry.unregister_identifier()``."""
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
registry.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_unregister_identifier_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_identifier()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_identifier(fmt, cls)
assert (
str(exc.value) == f"No identifier defined for format '{fmt}' "
f"and class '{cls.__name__}'"
)
def test_identify_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), {})
# test no formats to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# ===========================================
# Compat tests
def test_compat_register_identifier(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._identifiers
compat.register_identifier(*fmtcls1, empty_identifier, registry=registry)
assert fmtcls1 in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
try:
compat.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._identifiers
finally:
default_registry._identifiers.pop(fmtcls1)
def test_compat_unregister_identifier(self, registry, fmtcls1):
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in registry._identifiers
compat.unregister_identifier(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._identifiers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._identifiers
default_registry.register_identifier(*fmtcls1, empty_identifier)
assert fmtcls1 in default_registry._identifiers
compat.unregister_identifier(*fmtcls1)
assert fmtcls1 not in registry._identifiers
def test_compat_identify_format(self, registry, fmtcls1):
fmt, cls = fmtcls1
args = (None, cls, None, None, (None,), dict())
# with registry specified
registry.register_identifier(*fmtcls1, empty_identifier)
formats = compat.identify_format(*args, registry=registry)
assert fmt in formats
# without registry specified it becomes default_registry
if registry is not default_registry:
try:
default_registry.register_identifier(*fmtcls1, empty_identifier)
except Exception:
pass
else:
formats = compat.identify_format(*args)
assert fmt in formats
finally:
default_registry.unregister_identifier(*fmtcls1)
@pytest.mark.skip("TODO!")
def test_compat_get_formats(self, registry, fmtcls1):
assert False
@pytest.mark.skip("TODO!")
def test_compat_delay_doc_updates(self, registry, fmtcls1):
assert False
class TestUnifiedInputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedInputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedInputRegistry
# ===========================================
def test_inherited_read_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _read():
return EmptyData()
def _read1():
return Child1()
# check that reader gets inherited
registry.register_reader("test", EmptyData, _read)
assert registry.get_reader("test", Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
registry.register_reader("test", Child1, _read1)
assert registry.get_reader("test", Child2) is _read1
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
with registry.delay_doc_updates(EmptyData):
registry.register_reader("test", EmptyData, empty_reader)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.read.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs)
if ("Format" in s)][0]
ifmt = docs[ihd].index("Format") + 1
iread = docs[ihd].index("Read") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert docs[-1][ifmt : ifmt + 5] == "test"
assert docs[-1][iread : iread + 3] != "Yes"
# now test it's updated
docs = EmptyData.read.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 2
iread = docs[ihd].index("Read") + 1
assert docs[-2][ifmt : ifmt + 4] == "test"
assert docs[-2][iread : iread + 3] == "Yes"
def test_identify_read_format(self, registry):
"""Test ``registry.identify_format()``."""
args = ("read", EmptyData, None, None, (None,), dict())
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a reader, it returns True for all
registry.register_identifier("test", EmptyData, empty_identifier)
formats = registry.identify_format(*args)
assert "test" in formats
# -----------------------
def test_register_reader(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_reader()``."""
# initial check it's not registered
assert fmtcls1 not in registry._readers
assert fmtcls2 not in registry._readers
# register
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls2, empty_reader)
assert fmtcls1 in registry._readers
assert fmtcls2 in registry._readers
assert registry._readers[fmtcls1] == (empty_reader, 0) # (f, priority)
assert registry._readers[fmtcls2] == (empty_reader, 0) # (f, priority)
def test_register_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
registry.register_reader(fmt, cls, empty_reader)
with pytest.raises(IORegistryError) as exc:
registry.register_reader(fmt, cls, empty_reader)
assert (
str(exc.value) == f"Reader for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_reader_force(self, registry, fmtcls1):
registry.register_reader(*fmtcls1, empty_reader)
registry.register_reader(*fmtcls1, empty_reader, force=True)
assert fmtcls1 in registry._readers
def test_register_readers_with_same_name_on_different_classes(self, registry):
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
registry.register_reader("test", EmptyData, lambda: EmptyData())
registry.register_reader("test", OtherEmptyData, lambda: OtherEmptyData())
t = EmptyData.read(format="test", registry=registry)
assert isinstance(t, EmptyData)
tbl = OtherEmptyData.read(format="test", registry=registry)
assert isinstance(tbl, OtherEmptyData)
# -----------------------
def test_unregister_reader(self, registry, fmtcls1):
"""Test ``registry.unregister_reader()``."""
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
registry.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_unregister_reader_invalid(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.unregister_reader(*fmtcls1)
assert (
str(exc.value) == f"No reader defined for format '{fmt}' and "
f"class '{cls.__name__}'"
)
# -----------------------
def test_get_reader(self, registry, fmtcls):
"""Test ``registry.get_reader()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError):
registry.get_reader(fmt, cls)
registry.register_reader(fmt, cls, empty_reader)
reader = registry.get_reader(fmt, cls)
assert reader is empty_reader
def test_get_reader_invalid(self, registry, fmtcls):
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.get_reader(fmt, cls)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_read_noformat(self, registry, fmtcls1):
"""Test ``registry.read()`` when there isn't a reader."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1].read(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_noformat_arbitrary_file(self, tmpdir, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._readers.update(original["readers"])
testfile = str(tmpdir.join("foo.example"))
with open(testfile, "w") as f:
f.write("Hello world")
with pytest.raises(IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_read_toomanyformats(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
cls.read(registry=registry)
assert str(exc.value) == (f"Format is ambiguous - options are: {fmt1}, {fmt2}")
def test_read_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
counter = Counter()
def counting_reader1(*args, **kwargs):
counter[fmt1] += 1
return cls()
def counting_reader2(*args, **kwargs):
counter[fmt2] += 1
return cls()
registry.register_reader(fmt1, cls, counting_reader1, priority=1)
registry.register_reader(fmt2, cls, counting_reader2, priority=2)
registry.register_identifier(fmt1, cls, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls, lambda o, *x, **y: True)
cls.read(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_read_format_noreader(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_read_identifier(self, tmpdir, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(
fmt1, cls, lambda o, path, fileobj, *x, **y: path.endswith("a")
)
registry.register_identifier(
fmt2, cls, lambda o, path, fileobj, *x, **y: path.endswith("b")
)
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmpdir.join("testfile.a").strpath
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt1}' and class '{cls.__name__}'"
)
filename = tmpdir.join("testfile.b").strpath
open(filename, "w").close()
with pytest.raises(IORegistryError) as exc:
cls.read(filename, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_read_valid_return(self, registry, fmtcls):
fmt, cls = fmtcls
registry.register_reader(fmt, cls, empty_reader)
t = cls.read(format=fmt, registry=registry)
assert isinstance(t, cls)
def test_read_non_existing_unknown_ext(self, fmtcls1):
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = fmtcls1[1].read("non-existing-file-with-unknown.ext")
def test_read_directory(self, tmpdir, registry, fmtcls1):
"""
Regression test for a bug that caused the I/O registry infrastructure to
not work correctly for datasets that are represented by folders as
opposed to files, when using the descriptors to add read/write methods.
"""
_, cls = fmtcls1
registry.register_identifier(
"test_folder_format", cls, lambda o, *x, **y: o == "read"
)
registry.register_reader("test_folder_format", cls, empty_reader)
filename = tmpdir.mkdir("folder_dataset").strpath
# With the format explicitly specified
dataset = cls.read(filename, format="test_folder_format", registry=registry)
assert isinstance(dataset, cls)
# With the auto-format identification
dataset = cls.read(filename, registry=registry)
assert isinstance(dataset, cls)
# ===========================================
# Compat tests
def test_compat_register_reader(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._readers
compat.register_reader(*fmtcls1, empty_reader, registry=registry)
assert fmtcls1 in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
try:
compat.register_reader(*fmtcls1, empty_identifier)
except Exception:
pass
else:
assert fmtcls1 in default_registry._readers
finally:
default_registry._readers.pop(fmtcls1)
def test_compat_unregister_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in registry._readers
compat.unregister_reader(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._readers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._readers
default_registry.register_reader(*fmtcls1, empty_reader)
assert fmtcls1 in default_registry._readers
compat.unregister_reader(*fmtcls1)
assert fmtcls1 not in registry._readers
def test_compat_get_reader(self, registry, fmtcls1):
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1, registry=registry)
assert reader is empty_reader
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
reader = compat.get_reader(*fmtcls1)
assert reader is empty_reader
default_registry.unregister_reader(*fmtcls1)
def test_compat_read(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt, registry=registry)
assert isinstance(t, cls)
registry.unregister_reader(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
default_registry.register_reader(*fmtcls1, empty_reader)
t = compat.read(cls, format=fmt)
assert isinstance(t, cls)
default_registry.unregister_reader(*fmtcls1)
class TestUnifiedOutputRegistry(TestUnifiedIORegistryBase):
"""Test :class:`astropy.io.registry.UnifiedOutputRegistry`."""
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedOutputRegistry
# ===========================================
def test_inherited_write_registration(self, registry):
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(EmptyData):
pass
class Child2(Child1):
pass
def _write():
return EmptyData()
def _write1():
return Child1()
# check that writer gets inherited
registry.register_writer("test", EmptyData, _write)
assert registry.get_writer("test", Child2) is _write
# check that nearest ancestor is identified
# (i.e. that the writer for Child2 is the registered method
# for Child1, and not Table)
registry.register_writer("test", Child1, _write1)
assert registry.get_writer("test", Child2) is _write1
# ===========================================
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
fmt, cls = fmtcls1
with registry.delay_doc_updates(EmptyData):
registry.register_writer(*fmtcls1, empty_writer)
# test that the doc has not yet been updated.
# if a the format was registered in a different way, then
# test that this method is not present.
if "Format" in EmptyData.read.__doc__:
docs = EmptyData.write.__doc__.split("\n")
ihd = [i for i, s in enumerate(docs)
if ("Format" in s)][0]
ifmt = docs[ihd].index("Format")
iwrite = docs[ihd].index("Write") + 1
# there might not actually be anything here, which is also good
if docs[-2] != docs[-1]:
assert fmt in docs[-1][ifmt : ifmt + len(fmt) + 1]
assert docs[-1][iwrite : iwrite + 3] != "Yes"
# now test it's updated
docs = EmptyData.write.__doc__.split("\n")
ifmt = docs[ihd].index("Format") + 1
iwrite = docs[ihd].index("Write") + 2
assert fmt in docs[-2][ifmt : ifmt + len(fmt) + 1]
assert docs[-2][iwrite : iwrite + 3] == "Yes"
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_identify_write_format(self, registry, fmtcls1):
"""Test ``registry.identify_format()``."""
fmt, cls = fmtcls1
args = ("write", cls, None, None, (None,), {})
# test there is no format to identify
formats = registry.identify_format(*args)
assert formats == []
# test there is a format to identify
# doesn't actually matter if register a writer, it returns True for all
registry.register_identifier(fmt, cls, empty_identifier)
formats = registry.identify_format(*args)
assert fmt in formats
# -----------------------
def test_register_writer(self, registry, fmtcls1, fmtcls2):
"""Test ``registry.register_writer()``."""
# initial check it's not registered
assert fmtcls1 not in registry._writers
assert fmtcls2 not in registry._writers
# register
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls2, empty_writer)
assert fmtcls1 in registry._writers
assert fmtcls2 in registry._writers
def test_register_writer_invalid(self, registry, fmtcls):
"""Test calling ``registry.register_writer()`` twice."""
fmt, cls = fmtcls
registry.register_writer(fmt, cls, empty_writer)
with pytest.raises(IORegistryError) as exc:
registry.register_writer(fmt, cls, empty_writer)
assert (
str(exc.value) == f"Writer for format '{fmt}' and class "
f"'{cls.__name__}' is already defined"
)
def test_register_writer_force(self, registry, fmtcls1):
registry.register_writer(*fmtcls1, empty_writer)
registry.register_writer(*fmtcls1, empty_writer, force=True)
assert fmtcls1 in registry._writers
# -----------------------
def test_unregister_writer(self, registry, fmtcls1):
"""Test ``registry.unregister_writer()``."""
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in registry._writers
def test_unregister_writer_invalid(self, registry, fmtcls):
"""Test ``registry.unregister_writer()``."""
fmt, cls = fmtcls
with pytest.raises(IORegistryError) as exc:
registry.unregister_writer(fmt, cls)
assert (
str(exc.value) == f"No writer defined for format '{fmt}' "
f"and class '{cls.__name__}'"
)
# -----------------------
def test_get_writer(self, registry, fmtcls1):
"""Test ``registry.get_writer()``."""
with pytest.raises(IORegistryError):
registry.get_writer(*fmtcls1)
registry.register_writer(*fmtcls1, empty_writer)
writer = registry.get_writer(*fmtcls1)
assert writer is empty_writer
def test_get_writer_invalid(self, registry, fmtcls1):
"""Test invalid ``registry.get_writer()``."""
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
registry.get_writer(fmt, cls)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
# -----------------------
def test_write_noformat(self, registry, fmtcls1):
"""Test ``registry.write()`` when there isn't a writer."""
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary(self, registry, original, fmtcls1):
"""Test that all identifier functions can accept arbitrary input"""
registry._identifiers.update(original["identifiers"])
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(object(), registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_noformat_arbitrary_file(self, tmpdir, registry, original):
"""Tests that all identifier functions can accept arbitrary files"""
registry._writers.update(original["writers"])
testfile = str(tmpdir.join("foo.example"))
with pytest.raises(IORegistryError) as exc:
Table().write(testfile, registry=registry)
assert str(exc.value).startswith(
"Format could not be identified based"
" on the file name or contents, "
"please provide a 'format' argument."
)
def test_write_toomanyformats(self, registry, fmtcls1, fmtcls2):
registry.register_identifier(*fmtcls1, lambda o, *x, **y: True)
registry.register_identifier(*fmtcls2, lambda o, *x, **y: True)
with pytest.raises(IORegistryError) as exc:
fmtcls1[1]().write(registry=registry)
assert str(exc.value) == (
f"Format is ambiguous - options are: {fmtcls1[0]}, {fmtcls2[0]}"
)
def test_write_uses_priority(self, registry, fmtcls1, fmtcls2):
fmt1, cls1 = fmtcls1
fmt2, cls2 = fmtcls2
counter = Counter()
def counting_writer1(*args, **kwargs):
counter[fmt1] += 1
def counting_writer2(*args, **kwargs):
counter[fmt2] += 1
registry.register_writer(fmt1, cls1, counting_writer1, priority=1)
registry.register_writer(fmt2, cls2, counting_writer2, priority=2)
registry.register_identifier(fmt1, cls1, lambda o, *x, **y: True)
registry.register_identifier(fmt2, cls2, lambda o, *x, **y: True)
cls1().write(registry=registry)
assert counter[fmt2] == 1
assert counter[fmt1] == 0
def test_write_format_nowriter(self, registry, fmtcls1):
fmt, cls = fmtcls1
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt}' and class '{cls.__name__}'"
)
def test_write_identifier(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: x[0].startswith("a"))
registry.register_identifier(fmt2, cls, lambda o, *x, **y: x[0].startswith("b"))
# Now check that we got past the identifier and are trying to get
# the reader. The registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(IORegistryError) as exc:
cls().write("abc", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write("bac", registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt2}' and class '{cls.__name__}'"
)
def test_write_return(self, registry, fmtcls1):
"""Most writers will return None, but other values are not forbidden."""
fmt, cls = fmtcls1
registry.register_writer(fmt, cls, empty_writer)
res = cls.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# ===========================================
# Compat tests
def test_compat_register_writer(self, registry, fmtcls1):
# with registry specified
assert fmtcls1 not in registry._writers
compat.register_writer(*fmtcls1, empty_writer, registry=registry)
assert fmtcls1 in registry._writers
registry.unregister_writer(*fmtcls1)
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
try:
compat.register_writer(*fmtcls1, empty_writer)
except Exception:
pass
else:
assert fmtcls1 in default_registry._writers
finally:
default_registry._writers.pop(fmtcls1)
def test_compat_unregister_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in registry._writers
compat.unregister_writer(*fmtcls1, registry=registry)
assert fmtcls1 not in registry._writers
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
compat.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_get_writer(self, registry, fmtcls1):
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
writer = compat.get_writer(*fmtcls1, registry=registry)
assert writer is empty_writer
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
writer = compat.get_writer(*fmtcls1)
assert writer is empty_writer
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
def test_compat_write(self, registry, fmtcls1):
fmt, cls = fmtcls1
# with registry specified
registry.register_writer(*fmtcls1, empty_writer)
res = compat.write(cls(), format=fmt, registry=registry)
assert res == "status: success"
# without registry specified it becomes default_registry
if registry is not default_registry:
assert fmtcls1 not in default_registry._writers
default_registry.register_writer(*fmtcls1, empty_writer)
assert fmtcls1 in default_registry._writers
res = compat.write(cls(), format=fmt)
assert res == "status: success"
default_registry.unregister_writer(*fmtcls1)
assert fmtcls1 not in default_registry._writers
class TestUnifiedIORegistry(TestUnifiedInputRegistry, TestUnifiedOutputRegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = UnifiedIORegistry
# ===========================================
@pytest.mark.skip("TODO!")
def test_get_formats(self, registry):
"""Test ``registry.get_formats()``."""
assert False
def test_delay_doc_updates(self, registry, fmtcls1):
"""Test ``registry.delay_doc_updates()``."""
super().test_delay_doc_updates(registry, fmtcls1)
# -----------------------
def test_identifier_origin(self, registry, fmtcls1, fmtcls2):
fmt1, cls = fmtcls1
fmt2, _ = fmtcls2
registry.register_identifier(fmt1, cls, lambda o, *x, **y: o == "read")
registry.register_identifier(fmt2, cls, lambda o, *x, **y: o == "write")
registry.register_reader(fmt1, cls, empty_reader)
registry.register_writer(fmt2, cls, empty_writer)
# There should not be too many formats defined
cls.read(registry=registry)
cls().write(registry=registry)
with pytest.raises(IORegistryError) as exc:
cls.read(format=fmt2, registry=registry)
assert str(exc.value).startswith(
f"No reader defined for format '{fmt2}' and class '{cls.__name__}'"
)
with pytest.raises(IORegistryError) as exc:
cls().write(format=fmt1, registry=registry)
assert str(exc.value).startswith(
f"No writer defined for format '{fmt1}' and class '{cls.__name__}'"
)
class TestDefaultRegistry(TestUnifiedIORegistry):
def setup_class(self):
"""Setup class. This is called 1st by pytest."""
self._cls = lambda *args: default_registry
# =============================================================================
# Test compat
# much of this is already tested above since EmptyData uses io_registry.X(),
# which are the compat methods.
def test_dir():
"""Test all the compat methods are in the directory"""
dc = dir(compat)
for n in compat.__all__:
assert n in dc
def test_getattr():
for n in compat.__all__:
assert hasattr(compat, n)
with pytest.raises(AttributeError, match="module 'astropy.io.registry.compat'"):
compat.this_is_definitely_not_in_this_module
# =============================================================================
# Table tests
def test_read_basic_table():
registry = Table.read._registry
data = np.array(
list(zip([1, 2, 3], ["a", "b", "c"])), dtype=[("A", int), ("B", "|U1")]
)
try:
registry.register_reader("test", Table, lambda x: Table(x))
except Exception:
pass
else:
t = Table.read(data, format="test")
assert t.keys() == ["A", "B"]
for i in range(3):
assert t["A"][i] == data["A"][i]
assert t["B"][i] == data["B"][i]
finally:
registry._readers.pop("test", None)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
@pytest.fixture(autouse=True)
def registry(self):
"""I/O registry. Not cleaned."""
yield
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ["a b", "1 2"]
mt = MyTable.read(data, format="ascii")
t = Table.read(data, format="ascii")
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=["a", "b"])
mt.write(buffer, format="ascii")
assert buffer.getvalue() == os.linesep.join(["a b", "1 2", ""])
def test_read_table_subclass_with_columns_attributes(self, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/7181"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=["a"])
mt["a"].unit = u.m
mt["a"].format = ".4f"
mt["a"].description = "hello"
testfile = str(tmpdir.join("junk.fits"))
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t["a"].unit == u.m
assert t["a"].format == "{:13.4f}"
assert t["a"].description == "hello"
|
dbd2d6fd1ad4f4d05161f42aa1cbcc0c47e733bd9b49355605a7bfbf4757f015 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# LOCAL
from astropy.io.votable import converters, exceptions, tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {'verify': 'exception',
'filename': 'foo.xml'}
pos = (42, 64)
with pytest.warns(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config, pos=pos)
converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number': 47,
'is_exception': False,
'nchar': 64,
'warning': 'W47',
'is_something': True,
'message': 'Missing arraysize indicates length 1',
'doc_url': 'io/votable/api_exceptions.html#w47',
'nline': 42,
'is_warning': True
}
assert parts == match
def test_suppress_warnings():
cfg = {}
warn = exceptions.W01('foo')
with exceptions.conf.set_temp('max_warnings', 2):
with pytest.warns(exceptions.W01) as record:
exceptions._suppressed_warning(warn, cfg)
assert len(record) == 1
assert 'suppressing' not in str(record[0].message)
with pytest.warns(exceptions.W01, match='suppressing'):
exceptions._suppressed_warning(warn, cfg)
exceptions._suppressed_warning(warn, cfg)
assert cfg['_warning_counts'][exceptions.W01] == 3
assert exceptions.conf.max_warnings == 10
|
64cbcc0db6659d7659fdf8870c2c00053458780505985b52b64daf33e16e6aca | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of tests for the util.py module
"""
import pytest
from astropy.io.votable import util
def test_range_list():
assert util.coerce_range_list_param((5,)) == ("5.0", 1)
def test_range_list2():
assert util.coerce_range_list_param((5e-7, 8e-7)) == ("5e-07,8e-07", 2)
def test_range_list3():
assert util.coerce_range_list_param((5e-7, 8e-7, "FOO")) == (
"5e-07,8e-07;FOO", 3)
def test_range_list4a():
with pytest.raises(ValueError):
util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"))
def test_range_list4():
assert (util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"), numeric=False) ==
("5e-07,/8e-07,4/,4/5,J;FOO", 6))
def test_range_list5():
with pytest.raises(ValueError):
util.coerce_range_list_param(('FOO', ))
def test_range_list6():
with pytest.raises(ValueError):
print(util.coerce_range_list_param((5, 'FOO'), util.stc_reference_frames))
def test_range_list7():
assert util.coerce_range_list_param(("J",), numeric=False) == ("J", 1)
def test_range_list8():
for s in ["5.0",
"5e-07,8e-07",
"5e-07,8e-07;FOO",
"5e-07,/8e-07,4.0/,4.0/5.0;FOO",
"J"]:
assert util.coerce_range_list_param(s, numeric=False)[0] == s
def test_range_list9a():
with pytest.raises(ValueError):
util.coerce_range_list_param("52,-27.8;FOO", util.stc_reference_frames)
def test_range_list9():
assert util.coerce_range_list_param(
"52,-27.8;GALACTIC", util.stc_reference_frames)
|
67107f67631a6e3ec82d177e1d0425399293c4d71abb0d57369ff375920814cc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.io.votable import parse
from astropy.utils.data import get_pkg_data_filename
def test_resource_groups():
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/resource_groups.xml'))
resource = votable.resources[0]
groups = resource.groups
params = resource.params
# Test that params inside groups are not outside
assert len(groups[0].entries) == 1
assert groups[0].entries[0].name == "ID"
assert len(params) == 2
assert params[0].name == "standardID"
assert params[1].name == "accessURL"
|
f171841265060dc910253b4536655783b18836ae7bd4b80de0e2e96b57bf2cec | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
from unittest import mock
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable import tree
from astropy.io.votable.exceptions import VOTableSpecError, VOWarning, W39
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
table = parse_single_table(get_pkg_data_filename('data/regression.xml'))
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
table2 = parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=1)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
def test_parse_single_table3():
with pytest.raises(IndexError):
parse_single_table(get_pkg_data_filename('data/regression.xml'),
table_number=3)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/regression.xml'),
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
(('string test', 'string_test'), '|O8'),
(('fixed string test', 'string_test_2'), '<U10'),
('unicode_test', '|O8'),
(('unicode test', 'fixed_unicode_test'), '<U10'),
(('string array test', 'string_array_test'), '<U4'),
('unsignedByte', '|u1'),
('short', '<i2'),
('int', '<i4'),
('long', '<i8'),
('double', '<f8'),
('float', '<f4'),
('array', '|O8'),
('bit', '|b1'),
('bitarray', '|b1', (3, 2)),
('bitvararray', '|O8'),
('bitvararray2', '|O8'),
('floatComplex', '<c8'),
('doubleComplex', '<c16'),
('doubleComplexArray', '|O8'),
('doubleComplexArrayFixed', '<c16', (2,)),
('boolean', '|b1'),
('booleanArray', '|b1', (4,)),
('nulls', '<i4'),
('nulls_array', '<i4', (2, 2)),
('precision1', '<f8'),
('precision2', '<f8'),
('doublearray', '|O8'),
('bitarray2', '|b1', (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace('<', '>')
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, _debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
f'data/regression.bin.tabledata.truth.{votable.version}.xml'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")),
'rt', encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail('legacy_float_repr')
def test_regression(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, False)
@pytest.mark.xfail('legacy_float_repr')
def test_regression_python_based_parser(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, True)
@pytest.mark.xfail('legacy_float_repr')
def test_regression_binary2(tmpdir):
# W39: Bit values can not be masked
with pytest.warns(W39):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
self.table = parse(
get_pkg_data_filename('data/regression.xml')).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table() # noqa
array = table.array
mask = table.array.mask
assert array['string_test'][0] == "String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'), columns=columns).get_first_table() # noqa
array = table.array
mask = table.array.mask
assert array['string_test'][0] == "String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
self.votable = parse(get_pkg_data_filename('data/regression.xml'))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
['String & test', 'String & test', 'XXXX', '', ''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.unicode_)
assert_array_equal(
self.array['string_test_2'],
['Fixed stri', '0123456789', 'XXXX', '', ''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
votable = parse(get_pkg_data_filename('data/regression.xml'))
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
# W39: Bit values can not be masked
with pytest.warns(W39):
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if (filename.endswith('custom_datatype.xml') or
filename.endswith('timesys_errors.xml')):
continue
parse(filename)
def test_too_many_columns():
with pytest.raises(VOTableSpecError):
parse(get_pkg_data_filename('data/too_many_columns.xml.gz'))
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", name='filename', datatype="char",
arraysize='1'),
tree.Field(votable, ID="matrix", name='matrix', datatype="double",
arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[('filename', '?'),
('matrix', '?', (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(fpath, output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
@mock.patch('subprocess.Popen')
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ok', 'ko'),
'returncode': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename('data/empty_table.xml'),
xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
votable = parse(get_pkg_data_filename('data/regression.xml'))
# W39: Bit values can not be masked
with pytest.warns(W39):
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(fd, compressed=True, _astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(fd)
def test_from_scratch_example():
_run_test_from_scratch_example()
def _run_test_from_scratch_example():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'))
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(get_pkg_data_filename('data/nonstandard_units.xml'),
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(get_pkg_data_filename('data/custom_datatype.xml'),
datatype_mapping={'bar': 'int'})
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
def _timesys_tests(votable):
assert len(list(votable.iter_timesys())) == 4
timesys = votable.get_timesys_by_id('time_frame')
assert timesys.timeorigin == 2455197.5
assert timesys.timescale == 'TCB'
assert timesys.refposition == 'BARYCENTER'
timesys = votable.get_timesys_by_id('mjd_origin')
assert timesys.timeorigin == 'MJD-origin'
assert timesys.timescale == 'TDB'
assert timesys.refposition == 'EMBARYCENTER'
timesys = votable.get_timesys_by_id('jd_origin')
assert timesys.timeorigin == 'JD-origin'
assert timesys.timescale == 'TT'
assert timesys.refposition == 'HELIOCENTER'
timesys = votable.get_timesys_by_id('no_origin')
assert timesys.timeorigin is None
assert timesys.timescale == 'UTC'
assert timesys.refposition == 'TOPOCENTER'
def test_timesys():
votable = parse(get_pkg_data_filename('data/timesys.xml'))
_timesys_tests(votable)
def test_timesys_roundtrip():
orig_votable = parse(get_pkg_data_filename('data/timesys.xml'))
bio = io.BytesIO()
orig_votable.to_xml(bio)
bio.seek(0)
votable = parse(bio)
_timesys_tests(votable)
def test_timesys_errors():
output = io.StringIO()
validate(get_pkg_data_filename('data/timesys_errors.xml'), output,
xmllint=False)
outstr = output.getvalue()
assert("E23: Invalid timeorigin attribute 'bad-origin'" in outstr)
assert("E22: ID attribute is required for all TIMESYS elements" in outstr)
assert("W48: Unknown attribute 'refposition_mispelled' on TIMESYS"
in outstr)
|
ec61ac2e05071b9c1fc6007b533b5d0a34b77237cdd935516ca7b73be768e3b0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.io.votable import ucd
def test_none():
assert ucd.check_ucd(None)
examples = {
'phys.temperature':
[('ivoa', 'phys.temperature')],
'pos.eq.ra;meta.main':
[('ivoa', 'pos.eq.ra'), ('ivoa', 'meta.main')],
'meta.id;src':
[('ivoa', 'meta.id'), ('ivoa', 'src')],
'phot.flux;em.radio;arith.ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'PHot.Flux;EM.Radio;ivoa:arith.Ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'pos.galactic.lat':
[('ivoa', 'pos.galactic.lat')],
'meta.code;phot.mag':
[('ivoa', 'meta.code'), ('ivoa', 'phot.mag')],
'stat.error;phot.mag':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag')],
'phys.temperature;instr;stat.max':
[('ivoa', 'phys.temperature'), ('ivoa', 'instr'),
('ivoa', 'stat.max')],
'stat.error;phot.mag;em.opt.V':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag'), ('ivoa', 'em.opt.V')],
'phot.color;em.opt.B;em.opt.V':
[('ivoa', 'phot.color'), ('ivoa', 'em.opt.B'), ('ivoa', 'em.opt.V')],
'stat.error;phot.color;em.opt.B;em.opt.V':
[('ivoa', 'stat.error'), ('ivoa', 'phot.color'), ('ivoa', 'em.opt.B'),
('ivoa', 'em.opt.V')],
}
def test_check():
for s, p in examples.items():
assert ucd.parse_ucd(s, True, True) == p
assert ucd.check_ucd(s, True, True)
def test_too_many_colons():
with pytest.raises(ValueError):
ucd.parse_ucd("ivoa:stsci:phot", True, True)
def test_invalid_namespace():
with pytest.raises(ValueError):
ucd.parse_ucd("_ivoa:phot.mag", True, True)
def test_invalid_word():
with pytest.raises(ValueError):
ucd.parse_ucd("-pho")
|
799efe7320930754b8b1e975f9fac9e7669819f8d0db790d08fd75e865d2fb00 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import pytest
import numpy as np
from astropy.config import set_temp_config, reload_config
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from astropy.io.votable.table import parse, writeto
from astropy.io.votable import tree, conf, validate
from astropy.io.votable.exceptions import VOWarning, W39, E25
from astropy.table import Column, Table
from astropy.table.table_helpers import simple_table
from astropy.units import Unit
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH
def test_table(tmpdir):
# Read the VOTABLE
votable = parse(get_pkg_data_filename('data/regression.xml'))
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype'], f'{name} expected {d["datatype"]} but get {field.datatype}' # noqa
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
# W39: Bit values can not be masked
with pytest.warns(W39):
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
# Issue 8354
assert t['float'].format is None
fn = os.path.join(str(tmpdir), "table_interface.xml")
# W39: Bit values can not be masked
with pytest.warns(W39):
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_pass_kwargs_through_table_interface():
# Table.read() should pass on keyword arguments meant for parse()
filename = get_pkg_data_filename('data/nonstandard_units.xml')
t = Table.read(filename, format='votable', unit_format='generic')
assert t['Flux1'].unit == Unit("erg / (Angstrom cm2 s)")
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_explicit_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9',
'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.4"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_write_overwrite(tmpdir):
t = simple_table(3, 3)
filename = os.path.join(tmpdir, 'overwrite_test.vot')
t.write(filename, format='votable')
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(filename, format='votable')
t.write(filename, format='votable', overwrite=True)
def test_empty_table():
votable = parse(get_pkg_data_filename('data/empty_table.xml'))
table = votable.get_first_table()
astropy_table = table.to_table() # noqa
def test_no_field_not_empty_table():
votable = parse(get_pkg_data_filename('data/no_field_not_empty_table.xml'))
table = votable.get_first_table()
assert len(table.fields) == 0
assert len(table.infos) == 1
def test_no_field_not_empty_table_exception():
with pytest.raises(E25):
parse(get_pkg_data_filename('data/no_field_not_empty_table.xml'), verify='exception')
def test_binary2_masked_strings():
"""
Issue #8995
"""
# Read a VOTable which sets the null mask bit for each empty string value.
votable = parse(get_pkg_data_filename('data/binary2_masked_strings.xml'))
table = votable.get_first_table()
astropy_table = table.to_table()
# Ensure string columns have no masked values and can be written out
assert not np.any(table.array.mask['epoch_photometry_url'])
output = io.BytesIO()
astropy_table.write(output, format='votable')
def test_validate_output_invalid():
"""
Issue #12603. Test that we get the correct output from votable.validate with an invalid
votable.
"""
# A votable with errors
invalid_votable_filepath = get_pkg_data_filename('data/regression.xml')
# When output is None, check that validate returns validation output as a string
validate_out = validate(invalid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known error string
assert "E02: Incorrect number of elements in array." in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(invalid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is not valid)
assert validate_out is False
def test_validate_output_valid():
"""
Issue #12603. Test that we get the correct output from votable.validate with a valid
votable
"""
# A valid votable. (Example from the votable standard:
# https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html )
valid_votable_filepath = get_pkg_data_filename('data/valid_votable.xml')
# When output is None, check that validate returns validation output as a string
validate_out = validate(valid_votable_filepath, output=None)
assert isinstance(validate_out, str)
# Check for known good output string
assert "astropy.io.votable found no violations" in validate_out
# When output is not set, check that validate returns a bool
validate_out = validate(valid_votable_filepath)
assert isinstance(validate_out, bool)
# Check that validation output is correct (votable is valid)
assert validate_out is True
class TestVerifyOptions:
# Start off by checking the default (ignore)
def test_default(self):
parse(get_pkg_data_filename('data/gemini.xml'))
# Then try the various explicit options
def test_verify_ignore(self):
parse(get_pkg_data_filename('data/gemini.xml'), verify='ignore')
def test_verify_warn(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), verify='warn')
assert len(w) == 24
def test_verify_exception(self):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), verify='exception')
# Make sure the deprecated pedantic option still works for now
def test_pedantic_false(self):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=False)
assert len(w) == 25
def test_pedantic_true(self):
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'), pedantic=True)
# Make sure that the default behavior can be set via configuration items
def test_conf_verify_ignore(self):
with conf.set_temp('verify', 'ignore'):
parse(get_pkg_data_filename('data/gemini.xml'))
def test_conf_verify_warn(self):
with conf.set_temp('verify', 'warn'):
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 24
def test_conf_verify_exception(self):
with conf.set_temp('verify', 'exception'):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
# And make sure the old configuration item will keep working
def test_conf_pedantic_false(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = False')
reload_config('astropy.io.votable')
with pytest.warns(VOWarning) as w:
parse(get_pkg_data_filename('data/gemini.xml'))
assert len(w) == 25
def test_conf_pedantic_true(self, tmpdir):
with set_temp_config(tmpdir.strpath):
with open(tmpdir.join('astropy').join('astropy.cfg').strpath, 'w') as f:
f.write('[io.votable]\npedantic = True')
reload_config('astropy.io.votable')
with pytest.warns(AstropyDeprecationWarning):
with pytest.raises(VOWarning):
parse(get_pkg_data_filename('data/gemini.xml'))
|
302141eaf7677181bcbf668f38466da6b956e850cc797016d24b5396642a817d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
from astropy.io.votable.exceptions import W07, W08, W21, W41
from astropy.io.votable import tree
from astropy.io.votable.table import parse
from astropy.io.votable.tree import VOTableFile, Resource
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_check_astroyear_fail():
config = {'verify': 'exception'}
field = tree.Field(None, name='astroyear', arraysize='1')
with pytest.raises(W07):
tree.check_astroyear('X2100', field, config)
def test_string_fail():
config = {'verify': 'exception'}
with pytest.raises(W08):
tree.check_string(42, 'foo', config)
def test_make_Fields():
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
table.fields.extend([tree.Field(
votable, name='Test', datatype="float", unit="mag")])
def test_unit_format():
data = parse(get_pkg_data_filename('data/irsa-nph-error.xml'))
assert data._config['version'] == '1.0'
assert tree._get_default_unit_format(data._config) == 'cds'
data = parse(get_pkg_data_filename('data/names.xml'))
assert data._config['version'] == '1.1'
assert tree._get_default_unit_format(data._config) == 'cds'
data = parse(get_pkg_data_filename('data/gemini.xml'))
assert data._config['version'] == '1.2'
assert tree._get_default_unit_format(data._config) == 'cds'
data = parse(get_pkg_data_filename('data/binary2_masked_strings.xml'))
assert data._config['version'] == '1.3'
assert tree._get_default_unit_format(data._config) == 'cds'
data = parse(get_pkg_data_filename('data/timesys.xml'))
assert data._config['version'] == '1.4'
assert tree._get_default_unit_format(data._config) == 'vounit'
def test_namespace_warning():
"""
A version 1.4 VOTable must use the same namespace as 1.3.
(see https://www.ivoa.net/documents/VOTable/20191021/REC-VOTable-1.4-20191021.html#ToC16)
"""
bad_namespace = b'''<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.4"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
'''
with pytest.warns(W41):
parse(io.BytesIO(bad_namespace), verify='exception')
good_namespace_14 = b'''<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.4" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
'''
parse(io.BytesIO(good_namespace_14), verify='exception')
good_namespace_13 = b'''<?xml version="1.0" encoding="utf-8"?>
<VOTABLE version="1.3" xmlns="http://www.ivoa.net/xml/VOTable/v1.3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<RESOURCE/>
</VOTABLE>
'''
parse(io.BytesIO(good_namespace_13), verify='exception')
def test_version():
"""
VOTableFile.__init__ allows versions of '1.0', '1.1', '1.2', '1.3' and '1.4'.
The '1.0' is curious since other checks in parse() and the version setter do not allow '1.0'.
This test confirms that behavior for now. A future change may remove the '1.0'.
"""
# Exercise the checks in __init__
with pytest.warns(AstropyDeprecationWarning):
VOTableFile(version='1.0')
for version in ('1.1', '1.2', '1.3', '1.4'):
VOTableFile(version=version)
for version in ('0.9', '2.0'):
with pytest.raises(ValueError, match=r"should be in \('1.0', '1.1', '1.2', '1.3', '1.4'\)."):
VOTableFile(version=version)
# Exercise the checks in the setter
vot = VOTableFile()
for version in ('1.1', '1.2', '1.3', '1.4'):
vot.version = version
for version in ('1.0', '2.0'):
with pytest.raises(ValueError, match=r"supports VOTable versions '1.1', '1.2', '1.3', '1.4'$"):
vot.version = version
# Exercise the checks in the parser.
begin = b'<?xml version="1.0" encoding="utf-8"?><VOTABLE version="'
middle = b'" xmlns="http://www.ivoa.net/xml/VOTable/v'
end = b'" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><RESOURCE/></VOTABLE>'
# Valid versions
for bversion in (b'1.1', b'1.2', b'1.3'):
parse(io.BytesIO(begin + bversion + middle + bversion + end), verify='exception')
parse(io.BytesIO(begin + b'1.4' + middle + b'1.3' + end), verify='exception')
# Invalid versions
for bversion in (b'1.0', b'2.0'):
with pytest.warns(W21):
parse(io.BytesIO(begin + bversion + middle + bversion + end), verify='exception')
def votable_xml_string(version):
votable_file = VOTableFile(version=version)
votable_file.resources.append(Resource())
xml_bytes = io.BytesIO()
votable_file.to_xml(xml_bytes)
xml_bytes.seek(0)
bstring = xml_bytes.read()
s = bstring.decode("utf-8")
return s
def test_votable_tag():
xml = votable_xml_string('1.1')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
xml = votable_xml_string('1.2')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
xml = votable_xml_string('1.3')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"' in xml
xml = votable_xml_string('1.4')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
assert 'http://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"' in xml
|
e3f3edc1381140ebc6414ff22021532d51d09e8be7f4672cc1e2548d48d508d0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
# THIRD-PARTY
import numpy as np
from numpy.testing import assert_array_equal
import pytest
# LOCAL
from astropy.io.votable import converters
from astropy.io.votable import exceptions
from astropy.io.votable import tree
from astropy.io.votable.table import parse_single_table
from astropy.utils.data import get_pkg_data_filename
def test_invalid_arraysize():
with pytest.raises(exceptions.E13):
field = tree.Field(
None, name='broken', datatype='char', arraysize='foo')
converters.get_converter(field)
def test_oversize_char():
config = {'verify': 'exception'}
with pytest.warns(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with pytest.warns(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {'verify': 'exception'}
field = tree.Field(None, name='c', arraysize='1', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_oversize_unicode():
config = {'verify': 'exception'}
with pytest.warns(exceptions.W46) as w:
field = tree.Field(
None, name='c2', datatype='unicodeChar',
arraysize='1', config=config)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {'verify': 'exception'}
field = tree.Field(None, name='c', arraysize='1', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_unicode_as_char():
config = {'verify': 'exception'}
field = tree.Field(
None, name='unicode_in_char', datatype='char',
arraysize='*', config=config)
c = converters.get_converter(field, config=config)
# Test parsing.
c.parse('XYZ') # ASCII succeeds
with pytest.warns(
exceptions.W55,
match=r'FIELD \(unicode_in_char\) has datatype="char" but contains non-ASCII value'):
c.parse("zła") # non-ASCII
# Test output.
c.output('XYZ', False) # ASCII str succeeds
c.output(b'XYZ', False) # ASCII bytes succeeds
value = 'zła'
value_bytes = value.encode('utf-8')
with pytest.warns(
exceptions.E24,
match=r'E24: Attempt to write non-ASCII value'):
c.output(value, False) # non-ASCII str raises
with pytest.warns(
exceptions.E24,
match=r'E24: Attempt to write non-ASCII value'):
c.output(value_bytes, False) # non-ASCII bytes raises
def test_unicode_as_char_binary():
config = {'verify': 'exception'}
field = tree.Field(
None, name='unicode_in_char', datatype='char',
arraysize='*', config=config)
c = converters.get_converter(field, config=config)
c._binoutput_var('abc', False) # ASCII succeeds
with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c._binoutput_var('zła', False)
field = tree.Field(
None, name='unicode_in_char', datatype='char',
arraysize='3', config=config)
c = converters.get_converter(field, config=config)
c._binoutput_fixed('xyz', False)
with pytest.raises(exceptions.E24, match=r"E24: Attempt to write non-ASCII value"):
c._binoutput_fixed('zła', False)
def test_wrong_number_of_elements():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='int', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6")
def test_float_mask():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('') == (c.null, True)
with pytest.raises(ValueError):
c.parse('null')
def test_float_mask_permissive():
config = {'verify': 'ignore'}
field = tree.Field(
None, name='c', datatype='float',
config=config)
# config needs to be also passed into parse() to work.
# https://github.com/astropy/astropy/issues/8775
c = converters.get_converter(field, config=config)
assert c.parse('null', config=config) == (c.null, True)
def test_double_array():
config = {'verify': 'exception', 'version_1_3_or_later': True}
field = tree.Field(None, name='c', datatype='double', arraysize='3',
config=config)
data = (1.0, 2.0, 3.0)
c = converters.get_converter(field, config=config)
assert c.output(1.0, False) == '1'
assert c.output(1.0, [False, False]) == '1'
assert c.output(data, False) == '1 2 3'
assert c.output(data, [False, False, False]) == '1 2 3'
assert c.output(data, [False, False, True]) == '1 2 NaN'
assert c.output(data, [False, False]) == '1 2'
a = c.parse("1 2 3", config=config)
assert_array_equal(a[0], data)
assert_array_equal(a[1], False)
with pytest.raises(exceptions.E02):
c.parse("1", config=config)
with pytest.raises(AttributeError), pytest.warns(exceptions.E02):
c.parse("1")
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6", config=config)
with pytest.warns(exceptions.E02):
a = c.parse("2 3 4 5 6")
assert_array_equal(a[0], [2, 3, 4])
assert_array_equal(a[1], False)
def test_complex_array_vararray():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E02):
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
def test_complex():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='doubleComplex',
config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E03):
c.parse("1 2 3")
def test_bit():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E04):
c.parse("T")
def test_bit_mask():
config = {'verify': 'exception'}
with pytest.warns(exceptions.W39) as w:
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
def test_boolean():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='boolean',
config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.E05):
c.parse('YES')
def test_boolean_array():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='boolean', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse('TRUE FALSE T F 0 1')
assert_array_equal(r, [True, False, True, False, False, True])
def test_invalid_type():
config = {'verify': 'exception'}
with pytest.raises(exceptions.E06):
field = tree.Field(
None, name='c', datatype='foobar',
config=config)
converters.get_converter(field, config=config)
def test_precision():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float', precision="E4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2'
field = tree.Field(
None, name='c', datatype='float', precision="F4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2480'
def test_integer_overflow():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='int', config=config)
c = converters.get_converter(field, config=config)
with pytest.raises(exceptions.W51):
c.parse('-2208988800', config=config)
def test_float_default_precision():
config = {'verify': 'exception'}
field = tree.Field(
None, name='c', datatype='float', arraysize="4",
config=config)
c = converters.get_converter(field, config=config)
assert (c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) ==
'1 2 3 8.9990234375')
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ['headA', 'headB', 'headC']
types = ["char", "double", "int"]
vals = [["A", 1.0, 2],
["B", 2.0, 3],
["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(tree.Field(
votable, name=heads[i], datatype=types[i], arraysize="*"))
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
def test_gemini_v1_2():
'''
see Pull Request 4782 or Issue 4781 for details
'''
table = parse_single_table(get_pkg_data_filename('data/gemini.xml'))
assert table is not None
tt = table.to_table()
assert tt['access_url'][0] == (
'http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/data/pub/GEMINI/'
'S20120515S0064?runid=bx9b1o8cvk1qesrt')
|
136c3a17b28bbffd2801a59c60a601f4115ade17ff7d18ad0cd8cb3024431f55 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from astropy.io.votable import table
from astropy.io.votable import exceptions
from astropy.io.votable import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write(f'FAILED: {reason}\n'.encode('utf-8'))
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail(f"HTTPException: {str(e)}")
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, verify='warn', filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
f"java -jar {path_to_stilts_jar} votlint validate=false {filename}",
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
f'{warning_code}: {warning_descr}',
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
f'{exception_code}: {exception_descr}',
exc, ['ul', 'li']))
return tables
|
b84763cd1372d76a863bf68f389263bff9eca33159338a24731a1344482e027c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Validates a large collection of web-accessible VOTable files,
and generates a report as a directory tree of HTML files.
"""
# STDLIB
import os
# LOCAL
from astropy.utils.data import get_pkg_data_filename
from . import html
from . import result
__all__ = ['make_validation_report']
def get_srcdir():
return os.path.dirname(__file__)
def get_urls(destdir, s):
import gzip
types = ['good', 'broken', 'incorrect']
seen = set()
urls = []
for type in types:
filename = get_pkg_data_filename(
f'data/urls/cone.{type}.dat.gz')
with gzip.open(filename, 'rb') as fd:
for url in fd.readlines():
next(s)
url = url.strip()
if url not in seen:
with result.Result(url, root=destdir) as r:
r['expected'] = type
urls.append(url)
seen.add(url)
return urls
def download(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.download_xml_content()
def validate_vo(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.validate_vo()
def votlint_validate(args):
path_to_stilts_jar, url, destdir = args
with result.Result(url, root=destdir) as r:
if r['network_error'] is None:
r.validate_with_votlint(path_to_stilts_jar)
def write_html_result(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
html.write_result(r)
def write_subindex(args):
subset, destdir, total = args
html.write_index_table(destdir, *subset, total=total)
def make_validation_report(
urls=None, destdir='astropy.io.votable.validator.results',
multiprocess=True, stilts=None):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of str, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path-like, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path-like, optional
To perform validation with ``votlint`` from the the Java-based
`STILTS <http://www.star.bris.ac.uk/~mbt/stilts/>`_ VOTable
parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from astropy.utils.console import (color_print, ProgressBar, Spinner)
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(
f'{stilts} does not exist.')
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner('Loading URLs', 'green') as s:
urls = get_urls(destdir, s)
else:
color_print('Marking URLs', 'green')
for url in ProgressBar.iterate(urls):
with result.Result(url, root=destdir) as r:
r['expected'] = type
args = [(url, destdir) for url in urls]
color_print('Downloading VO files', 'green')
ProgressBar.map(
download, args, multiprocess=multiprocess)
color_print('Validating VO files', 'green')
ProgressBar.map(
validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print('Validating with votlint', 'green')
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(
votlint_validate, votlint_args, multiprocess=multiprocess)
color_print('Generating HTML files', 'green')
ProgressBar.map(
write_html_result, args, multiprocess=multiprocess)
with Spinner('Grouping results', 'green') as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print('Generating index', 'green')
html.write_index(subsets, urls, destdir)
color_print('Generating subindices', 'green')
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(
write_subindex, subindex_args, multiprocess=multiprocess)
|
5b296d948953d223920e2793842022832341423d8fa4cc9b0963e2730d64b313 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .main import make_validation_report
from . import main
__doc__ = main.__doc__
del main
|
dacf6530d9c444d1edff4a6d0812ad2cccd9748b5e76b6b37f2b75f44e80f577 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import contextlib
from math import ceil
import os
import re
# ASTROPY
from astropy.utils.xml.writer import XMLWriter, xml_escape
from astropy import online_docs_root
# VO
from astropy.io.votable import exceptions
html_header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML Basic 1.0//EN"
"http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd">
"""
default_style = """
body {
font-family: sans-serif
}
a {
text-decoration: none
}
.highlight {
color: red;
font-weight: bold;
text-decoration: underline;
}
.green { background-color: #ddffdd }
.red { background-color: #ffdddd }
.yellow { background-color: #ffffdd }
tr:hover { background-color: #dddddd }
table {
border-width: 1px;
border-spacing: 0px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
background-color: white;
padding: 5px;
}
table th {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
table td {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
"""
@contextlib.contextmanager
def make_html_header(w):
w.write(html_header)
with w.tag('html', xmlns="http://www.w3.org/1999/xhtml", lang="en-US"):
with w.tag('head'):
w.element('title', 'VO Validation results')
w.element('style', default_style)
with w.tag('body'):
yield
def write_source_line(w, line, nchar=0):
part1 = xml_escape(line[:nchar].decode('utf-8'))
char = xml_escape(line[nchar:nchar+1].decode('utf-8'))
part2 = xml_escape(line[nchar+1:].decode('utf-8'))
w.write(' ')
w.write(part1)
w.write(f'<span class="highlight">{char}</span>')
w.write(part2)
w.write('\n\n')
def write_warning(w, line, xml_lines):
warning = exceptions.parse_vowarning(line)
if not warning['is_something']:
w.data(line)
else:
w.write(f"Line {warning['nline']:d}: ")
if warning['warning']:
w.write('<a href="{}/{}">{}</a>: '.format(
online_docs_root, warning['doc_url'], warning['warning']))
msg = warning['message']
if not isinstance(warning['message'], str):
msg = msg.decode('utf-8')
w.write(xml_escape(msg))
w.write('\n')
if 1 <= warning['nline'] < len(xml_lines):
write_source_line(w, xml_lines[warning['nline'] - 1], warning['nchar'])
def write_votlint_warning(w, line, xml_lines):
match = re.search(r"(WARNING|ERROR|INFO) \(l.(?P<line>[0-9]+), c.(?P<column>[0-9]+)\): (?P<rest>.*)", line)
if match:
w.write('Line {:d}: {}\n'.format(
int(match.group('line')), xml_escape(match.group('rest'))))
write_source_line(
w, xml_lines[int(match.group('line')) - 1],
int(match.group('column')) - 1)
else:
w.data(line)
w.data('\n')
def write_result(result):
if 'network_error' in result and result['network_error'] is not None:
return
xml = result.get_xml_content()
xml_lines = xml.splitlines()
path = os.path.join(result.get_dirpath(), 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
with w.tag('p'):
with w.tag('a', href='vo.xml'):
w.data(result.url.decode('ascii'))
w.element('hr')
with w.tag('pre'):
w._flush()
for line in result['warnings']:
write_warning(w, line, xml_lines)
if result['xmllint'] is False:
w.element('hr')
w.element('p', 'xmllint results:')
content = result['xmllint_content']
if not isinstance(content, str):
content = content.decode('ascii')
content = content.replace(result.get_dirpath() + '/', '')
with w.tag('pre'):
w.data(content)
if 'votlint' in result:
if result['votlint'] is False:
w.element('hr')
w.element('p', 'votlint results:')
content = result['votlint_content']
if not isinstance(content, str):
content = content.decode('ascii')
with w.tag('pre'):
w._flush()
for line in content.splitlines():
write_votlint_warning(w, line, xml_lines)
def write_result_row(w, result):
with w.tag('tr'):
with w.tag('td'):
if ('network_error' in result and
result['network_error'] is not None):
w.data(result.url.decode('ascii'))
else:
w.element('a', result.url.decode('ascii'),
href=f'{result.get_htmlpath()}/index.html')
if 'network_error' in result and result['network_error'] is not None:
w.element('td', str(result['network_error']),
attrib={'class': 'red'})
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
else:
w.element('td', '-', attrib={'class': 'green'})
if result['nexceptions']:
cls = 'red'
msg = 'Fatal'
elif result['nwarnings']:
cls = 'yellow'
msg = str(result['nwarnings'])
else:
cls = 'green'
msg = '-'
w.element('td', msg, attrib={'class': cls})
msg = result['version']
if result['xmllint'] is None:
cls = ''
elif result['xmllint'] is False:
cls = 'red'
else:
cls = 'green'
w.element('td', msg, attrib={'class': cls})
if result['expected'] == 'good':
cls = 'green'
msg = '-'
elif result['expected'] == 'broken':
cls = 'red'
msg = 'net'
elif result['expected'] == 'incorrect':
cls = 'yellow'
msg = 'invalid'
w.element('td', msg, attrib={'class': cls})
if 'votlint' in result:
if result['votlint']:
cls = 'green'
msg = 'Passed'
else:
cls = 'red'
msg = 'Failed'
else:
cls = ''
msg = '?'
w.element('td', msg, attrib={'class': cls})
def write_table(basename, name, results, root="results", chunk_size=500):
def write_page_links(j):
if npages <= 1:
return
with w.tag('center'):
if j > 0:
w.element('a', '<< ', href=f'{basename}_{j - 1:02d}.html')
for i in range(npages):
if i == j:
w.data(str(i+1))
else:
w.element(
'a', str(i+1),
href=f'{basename}_{i:02d}.html')
w.data(' ')
if j < npages - 1:
w.element('a', '>>', href=f'{basename}_{j + 1:02d}.html')
npages = int(ceil(float(len(results)) / chunk_size))
for i, j in enumerate(range(0, max(len(results), 1), chunk_size)):
subresults = results[j:j+chunk_size]
path = os.path.join(root, f'{basename}_{i:02d}.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
write_page_links(i)
w.element('h2', name)
with w.tag('table'):
with w.tag('tr'):
w.element('th', 'URL')
w.element('th', 'Network')
w.element('th', 'Warnings')
w.element('th', 'Schema')
w.element('th', 'Expected')
w.element('th', 'votlint')
for result in subresults:
write_result_row(w, result)
write_page_links(i)
def add_subset(w, basename, name, subresults, inside=['p'], total=None):
with w.tag('tr'):
subresults = list(subresults)
if total is None:
total = len(subresults)
if total == 0: # pragma: no cover
percentage = 0.0
else:
percentage = (float(len(subresults)) / total)
with w.tag('td'):
for element in inside:
w.start(element)
w.element('a', name, href=f'{basename}_00.html')
for element in reversed(inside):
w.end(element)
numbers = f'{len(subresults):d} ({percentage:.2%})'
with w.tag('td'):
w.data(numbers)
def write_index(subsets, results, root='results'):
path = os.path.join(root, 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
w.element('h1', 'VO Validation results')
with w.tag('table'):
for subset in subsets:
add_subset(w, *subset, total=len(results))
def write_index_table(root, basename, name, subresults, inside=None,
total=None, chunk_size=500):
if total is None:
total = len(subresults)
percentage = (float(len(subresults)) / total)
numbers = f'{len(subresults):d} ({percentage:.2%})'
write_table(basename, name + ' ' + numbers, subresults, root, chunk_size)
|
80796af0bff9732d7150410b8fec3b0b5cc6c56b40fa16440da556cda069f6c4 | import pytest
# Renamed these imports so that them being in the namespace will not
# cause pytest 3 to discover them as tests and then complain that
# they have __init__ defined.
from astropy.tests.runner import TestRunner as _TestRunner
from astropy.tests.runner import TestRunnerBase as _TestRunnerBase
from astropy.tests.runner import keyword
def test_disable_kwarg():
class no_remote_data(_TestRunner):
@keyword()
def remote_data(self, remote_data, kwargs):
return NotImplemented
r = no_remote_data('.')
with pytest.raises(TypeError):
r.run_tests(remote_data='bob')
def test_wrong_kwarg():
r = _TestRunner('.')
with pytest.raises(TypeError):
r.run_tests(spam='eggs')
def test_invalid_kwarg():
class bad_return(_TestRunnerBase):
@keyword()
def remote_data(self, remote_data, kwargs):
return 'bob'
r = bad_return('.')
with pytest.raises(TypeError):
r.run_tests(remote_data='bob')
def test_new_kwarg():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
r = Spam('.')
args = r._generate_args(spam='spam')
assert ['spam'] == args
def test_priority():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
return [spam]
@keyword(priority=1)
def eggs(self, eggs, kwargs):
return [eggs]
r = Spam('.')
args = r._generate_args(spam='spam', eggs='eggs')
assert ['eggs', 'spam'] == args
def test_docs():
class Spam(_TestRunnerBase):
@keyword()
def spam(self, spam, kwargs):
"""
Spam Spam Spam
"""
return [spam]
@keyword()
def eggs(self, eggs, kwargs):
"""
eggs asldjasljd
"""
return [eggs]
r = Spam('.')
assert "eggs" in r.run_tests.__doc__
assert "Spam Spam Spam" in r.run_tests.__doc__
|
4fd06d604b4b81f42a9b155b083e5de6bbf1fb04d576197e33c37a61ea48c303 | import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
def test_assert_quantity_allclose():
assert_quantity_allclose([1, 2], [1, 2])
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm)
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm)
with pytest.raises(AssertionError) as exc:
assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm)
assert exc.value.args[0].startswith("\nNot equal to tolerance")
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm)
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200])
assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [100, 200] * u.cm)
assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3)
assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m)
assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m)
assert exc.value.args[0] == "'rtol' should be dimensionless"
|
46b847a0f190eda3332c5d240e1fa98078e52a719f8e95632e01cc9430ea9afc | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
# test helper.run_tests function
from astropy import test as run_tests
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with pytest.raises(ValueError):
run_tests(package='fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with pytest.raises(ValueError):
run_tests(pastebin='not_an_option')
def test_unicode_literal_conversion():
assert isinstance('ångström', str)
|
003093ddbe516fab23534af692b00cb1c6b9b68f8ffa745c954a5aba4bd204bb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.',
onerror=onerror):
imper.find_spec(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
|
2c1e56531ff092554294f57b32a0ce8b354acbf5a219995ae2a07f53021cdbae | from astropy.timeseries.periodograms.base import * # noqa
from astropy.timeseries.periodograms.lombscargle import * # noqa
from astropy.timeseries.periodograms.bls import * # noqa
|
3982d22bd6fe88980ffeb8fb9ea4acb70ff8faa9bd1abe926f2314ef028841c0 | import abc
import numpy as np
from astropy.timeseries import TimeSeries, BinnedTimeSeries
__all__ = ['BasePeriodogram']
class BasePeriodogram:
@abc.abstractmethod
def __init__(self, t, y, dy=None):
pass
@classmethod
def from_timeseries(cls, timeseries, signal_column_name=None, uncertainty=None, **kwargs):
"""
Initialize a periodogram from a time series object.
If a binned time series is passed, the time at the center of the bins is
used. Also note that this method automatically gets rid of NaN/undefined
values when initializing the periodogram.
Parameters
----------
signal_column_name : str
The name of the column containing the signal values to use.
uncertainty : str or float or `~astropy.units.Quantity`, optional
The name of the column containing the errors on the signal, or the
value to use for the error, if a scalar.
**kwargs
Additional keyword arguments are passed to the initializer for this
periodogram class.
"""
if signal_column_name is None:
raise ValueError('signal_column_name should be set to a valid column name')
y = timeseries[signal_column_name]
keep = ~np.isnan(y)
if isinstance(uncertainty, str):
dy = timeseries[uncertainty]
keep &= ~np.isnan(dy)
dy = dy[keep]
else:
dy = uncertainty
if isinstance(timeseries, TimeSeries):
time = timeseries.time
elif isinstance(timeseries, BinnedTimeSeries):
time = timeseries.time_bin_center
else:
raise TypeError('Input time series should be an instance of '
'TimeSeries or BinnedTimeSeries')
return cls(time[keep], y[keep], dy=dy, **kwargs)
|
47d264058631cb696b4223cd10b57ae68e3c15312ee2f5f1aef7a340a43e7d5c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .kepler import *
|
45a497b417aa20f94636a4ad6f62cf6ae12f034183b3bbaaab8ac4fec51a2182 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy.io import registry, fits
from astropy.table import Table, MaskedColumn
from astropy.time import Time, TimeDelta
from astropy.timeseries.sampled import TimeSeries
__all__ = ["kepler_fits_reader"]
def kepler_fits_reader(filename):
"""
This serves as the FITS reader for KEPLER or TESS files within
astropy-timeseries.
This function should generally not be called directly, and instead this
time series reader should be accessed with the
:meth:`~astropy.timeseries.TimeSeries.read` method::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP
Parameters
----------
filename : `str` or `pathlib.Path`
File to load.
Returns
-------
ts : `~astropy.timeseries.TimeSeries`
Data converted into a TimeSeries.
"""
hdulist = fits.open(filename)
# Get the lightcurve HDU
telescope = hdulist[0].header['telescop'].lower()
if telescope == 'tess':
hdu = hdulist['LIGHTCURVE']
elif telescope == 'kepler':
hdu = hdulist[1]
else:
raise NotImplementedError("{} is not implemented, only KEPLER or TESS are "
"supported through this reader".format(hdulist[0].header['telescop']))
if hdu.header['EXTVER'] > 1:
raise NotImplementedError("Support for {} v{} files not yet "
"implemented".format(hdu.header['TELESCOP'], hdu.header['EXTVER']))
# Check time scale
if hdu.header['TIMESYS'] != 'TDB':
raise NotImplementedError("Support for {} time scale not yet "
"implemented in {} reader".format(hdu.header['TIMESYS'], hdu.header['TELESCOP']))
tab = Table.read(hdu, format='fits')
# Some KEPLER files have a T column instead of TIME.
if "T" in tab.colnames:
tab.rename_column("T", "TIME")
for colname in tab.colnames:
unit = tab[colname].unit
# Make masks nan for any column which will turn into a Quantity
# later. TODO: remove once we support Masked Quantities properly?
if unit and isinstance(tab[colname], MaskedColumn):
tab[colname] = tab[colname].filled(np.nan)
# Fix units
if unit == 'e-/s':
tab[colname].unit = 'electron/s'
if unit == 'pixels':
tab[colname].unit = 'pixel'
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
# Filter out NaN rows
nans = np.isnan(tab['time'].data)
if np.any(nans):
warnings.warn(f'Ignoring {np.sum(nans)} rows with NaN times')
tab = tab[~nans]
# Time column is dependent on source and we correct it here
reference_date = Time(hdu.header['BJDREFI'], hdu.header['BJDREFF'],
scale=hdu.header['TIMESYS'].lower(), format='jd')
time = reference_date + TimeDelta(tab['time'].data)
time.format = 'isot'
# Remove original time column
tab.remove_column('time')
hdulist.close()
return TimeSeries(time=time, data=tab)
registry.register_reader('kepler.fits', TimeSeries, kepler_fits_reader)
registry.register_reader('tess.fits', TimeSeries, kepler_fits_reader)
|
b5ff2cd9a80bcd4fb17b3d41127b29682d1df87e67dca8fabd56f33401594a84 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal
from astropy import units as u
from astropy.table import Table, QTable, vstack, join
from astropy.time import Time
from astropy.timeseries.sampled import TimeSeries
from astropy.timeseries.binned import BinnedTimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31', '2015-01-21T12:30:32', '2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1., 2., 11.], [3, 4, 1], ['x', 'y', 'z']], names=['a', 'b', 'c'])
class CommonTimeSeriesTests:
def test_stacking(self):
ts = vstack([self.series, self.series])
assert isinstance(ts, self.series.__class__)
def test_row_slicing(self):
ts = self.series[:2]
assert isinstance(ts, self.series.__class__)
def test_row_indexing(self):
self.series[0][self.time_attr] == Time('2015-01-21T12:30:32')
self.series[self.time_attr][0] == Time('2015-01-21T12:30:32')
def test_column_indexing(self):
assert_equal(self.series['a'], [1, 2, 11])
def test_column_slicing_notime(self):
tab = self.series['a', 'b']
assert not isinstance(tab, self.series.__class__)
assert isinstance(tab, QTable)
def test_add_column(self):
self.series['d'] = [1, 2, 3]
def test_add_row(self):
self.series.add_row(self._row)
def test_set_unit(self):
self.series['d'] = [1, 2, 3]
self.series['d'].unit = 's'
def test_replace_column(self):
self.series.replace_column('c', [1, 3, 4])
def test_required_after_stacking(self):
# When stacking, we have to temporarily relax the checking of the
# columns in the time series, but we need to make sure that the
# checking works again afterwards
ts = vstack([self.series, self.series])
with pytest.raises(ValueError) as exc:
ts.remove_columns(ts.colnames)
assert 'TimeSeries object is invalid' in exc.value.args[0]
def test_join(self):
ts_other = self.series.copy()
ts_other.add_row(self._row)
ts_other['d'] = [11, 22, 33, 44]
ts_other.remove_columns(['a', 'b'])
ts = join(self.series, ts_other)
assert len(ts) == len(self.series)
ts = join(self.series, ts_other, join_type='outer')
assert len(ts) == len(ts_other)
class TestTimeSeries(CommonTimeSeriesTests):
_row = {'time': '2016-03-23T12:30:40', 'a': 1., 'b': 2, 'c': 'a'}
def setup_method(self, method):
self.series = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
self.time_attr = 'time'
def test_column_slicing(self):
ts = self.series['time', 'a']
assert isinstance(ts, TimeSeries)
class TestBinnedTimeSeries(CommonTimeSeriesTests):
_row = {'time_bin_start': '2016-03-23T12:30:40',
'time_bin_size': 2 * u.s, 'a': 1., 'b': 2, 'c': 'a'}
def setup_method(self, method):
self.series = BinnedTimeSeries(time_bin_start=INPUT_TIME,
time_bin_size=3 * u.s,
data=PLAIN_TABLE)
self.time_attr = 'time_bin_start'
def test_column_slicing(self):
ts = self.series['time_bin_start', 'time_bin_size', 'a']
assert isinstance(ts, BinnedTimeSeries)
|
2e0019f19e1d6989201ecbb3a30483766bd4adfffcfad6af05f1343eb9f47f51 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy import units as u
from astropy.time import Time
from astropy.utils.exceptions import AstropyUserWarning
from astropy.timeseries.sampled import TimeSeries
from astropy.timeseries.downsample import aggregate_downsample, reduceat
INPUT_TIME = Time(['2016-03-22T12:30:31', '2016-03-22T12:30:32',
'2016-03-22T12:30:33', '2016-03-22T12:30:34',
'2016-03-22T12:30:35'])
def test_reduceat():
add_output = np.add.reduceat(np.arange(8),[0, 4, 1, 5, 2, 6, 3, 7])
# Similar to np.add for an array input.
sum_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.sum)
assert_equal(sum_output, add_output)
mean_output = reduceat(np.arange(8), np.arange(8)[::2], np.mean)
assert_equal(mean_output, np.array([0.5, 2.5, 4.5, 6.5]))
nanmean_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.mean)
assert_equal(nanmean_output, np.array([1.5, 4, 2.5, 5, 3.5, 6, 4.5, 7.]))
assert_equal(reduceat(np.arange(8), np.arange(8)[::2], np.mean),
reduceat(np.arange(8), np.arange(8)[::2], np.nanmean))
def test_timeseries_invalid():
with pytest.raises(TypeError) as exc:
aggregate_downsample(None)
assert exc.value.args[0] == ("time_series should be a TimeSeries")
def test_time_bin_invalid():
# Make sure to raise the right exception when time_bin_* is passed incorrectly.
with pytest.raises(TypeError, match=r"'time_bin_size' should be a Quantity or a TimeDelta"):
aggregate_downsample(TimeSeries(), time_bin_size=1)
def test_binning_arg_invalid():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
with pytest.raises(TypeError, match=r"With single 'time_bin_start' either 'n_bins', "
"'time_bin_size' or time_bin_end' must be provided"):
aggregate_downsample(ts)
def test_time_bin_conversion():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
# Make sure time_bin_start and time_bin_end are properly converted to Time
down_start = aggregate_downsample(ts, time_bin_start=['2016-03-22T12:30:31'],
time_bin_size=[1]*u.s)
assert_equal(down_start.time_bin_start.isot, ['2016-03-22T12:30:31.000'])
down_end = aggregate_downsample(ts, time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33'],
time_bin_end='2016-03-22T12:30:34')
assert_equal(down_end.time_bin_end.isot, ['2016-03-22T12:30:33.000', '2016-03-22T12:30:34.000'])
def test_time_bin_end_auto():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
# Interpret `time_bin_end` as the end of timeseries when `time_bin_start` is
# an array and `time_bin_size` is not provided
down_auto_end = aggregate_downsample(ts, time_bin_start=['2016-03-22T12:30:31', '2016-03-22T12:30:33'])
assert_equal(down_auto_end.time_bin_end.isot, ['2016-03-22T12:30:33.000', '2016-03-22T12:30:35.000'])
def test_time_bin_start_array():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
# When `time_bin_end` is an array and `time_bin_start` is not provided, `time_bin_start` is converted
# to an array with its first element set to the start of the timeseries and rest populated using
#`time_bin_end`. This case is separately tested since `BinnedTimeSeries` allows `time_bin_end` to
# be an array only if `time_bin_start` is an array.
down_start_array = aggregate_downsample(ts, time_bin_end=['2016-03-22T12:30:33', '2016-03-22T12:30:35'])
assert_equal(down_start_array.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000'])
def test_nbins():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
# n_bins should default to the number needed to fit all the original points
down_nbins = aggregate_downsample(ts, n_bins=2)
assert_equal(down_nbins.time_bin_start.isot, ['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000'])
# Regression test for #12527: ignore `n_bins` if `time_bin_start` is an array
n_times = len(INPUT_TIME)
for n_bins in [0, n_times - 1, n_times, n_times + 1]:
down_nbins = aggregate_downsample(ts, time_bin_start=INPUT_TIME, n_bins=n_bins)
assert len(down_nbins) == n_times
def test_downsample():
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5]], names=['a'])
ts_units = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4, 5] * u.count], names=['a'])
# Avoid precision problems with floating-point comparisons on 32bit
if sys.maxsize > 2**32:
# 64 bit
time_bin_incr = 1 * u.s
time_bin_start = None
else:
# 32 bit
time_bin_incr = (1 - 1e-6) * u.s
time_bin_start = ts.time[0] - 1 * u.ns
down_1 = aggregate_downsample(ts, time_bin_size=time_bin_incr, time_bin_start=time_bin_start)
u.isclose(down_1.time_bin_size, [1, 1, 1, 1, 1]*time_bin_incr)
assert_equal(down_1.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000',
'2016-03-22T12:30:33.000', '2016-03-22T12:30:34.000',
'2016-03-22T12:30:35.000']))
assert_equal(down_1["a"].data.data, np.array([1, 2, 3, 4, 5]))
down_2 = aggregate_downsample(ts, time_bin_size=2*time_bin_incr, time_bin_start=time_bin_start)
u.isclose(down_2.time_bin_size, [2, 2, 2]*time_bin_incr)
assert_equal(down_2.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000',
'2016-03-22T12:30:35.000']))
assert_equal(down_2["a"].data.data, np.array([1, 3, 5]))
down_3 = aggregate_downsample(ts, time_bin_size=3*time_bin_incr, time_bin_start=time_bin_start)
u.isclose(down_3.time_bin_size, [3, 3]*time_bin_incr)
assert_equal(down_3.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000']))
assert_equal(down_3["a"].data.data, np.array([2, 4]))
down_4 = aggregate_downsample(ts, time_bin_size=4*time_bin_incr, time_bin_start=time_bin_start)
u.isclose(down_4.time_bin_size, [4, 4]*time_bin_incr)
assert_equal(down_4.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:35.000']))
assert_equal(down_4["a"].data.data, np.array([2, 5]))
down_units = aggregate_downsample(ts_units, time_bin_size=4*time_bin_incr, time_bin_start=time_bin_start)
u.isclose(down_units.time_bin_size, [4, 4]*time_bin_incr)
assert_equal(down_units.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:35.000']))
assert down_units["a"].unit.name == 'ct'
assert_equal(down_units["a"].data, np.array([2.5, 5.0]))
# Contiguous bins with uneven bin sizes: `time_bin_size` is an array
down_uneven_bins = aggregate_downsample(ts, time_bin_size=[2, 1, 1]*time_bin_incr,
time_bin_start=time_bin_start)
u.isclose(down_uneven_bins.time_bin_size, [2, 1, 1]*time_bin_incr)
assert_equal(down_uneven_bins.time_bin_start.isot, Time(['2016-03-22T12:30:31.000',
'2016-03-22T12:30:33.000',
'2016-03-22T12:30:34.000']))
assert_equal(down_uneven_bins["a"].data.data, np.array([1, 3, 4]))
# Uncontiguous bins with even bin sizes: `time_bin_start` and `time_bin_end` are both arrays
down_time_array = aggregate_downsample(ts, time_bin_start=Time(['2016-03-22T12:30:31.000',
'2016-03-22T12:30:34.000']),
time_bin_end=Time(['2016-03-22T12:30:32.000',
'2016-03-22T12:30:35.000']))
u.isclose(down_time_array.time_bin_size, [1, 1]*u.second)
assert_equal(down_time_array.time_bin_start.isot, Time(['2016-03-22T12:30:31.000',
'2016-03-22T12:30:34.000']))
assert_equal(down_time_array["a"].data.data, np.array([1, 4]))
# Overlapping bins
with pytest.warns(AstropyUserWarning, match="Overlapping bins should be avoided since they "
"can lead to double-counting of data during binning."):
down_overlap_bins = aggregate_downsample(ts, time_bin_start=Time(['2016-03-22T12:30:31.000',
'2016-03-22T12:30:33.000']),
time_bin_end=Time(['2016-03-22T12:30:34',
'2016-03-22T12:30:36.000']))
assert_equal(down_overlap_bins["a"].data, np.array([2, 5]))
@pytest.mark.parametrize("time, time_bin_start, time_bin_end",
[(INPUT_TIME[:2], INPUT_TIME[2:], None),
(INPUT_TIME[3:], INPUT_TIME[:2], INPUT_TIME[1:3]),
(INPUT_TIME[[0]], INPUT_TIME[:2], None),
(INPUT_TIME[[0]], INPUT_TIME[::2], None)])
def test_downsample_edge_cases(time, time_bin_start, time_bin_end):
"""Regression test for #12527: allow downsampling even if all bins fall
before or beyond the time span of the data."""
ts = TimeSeries(time=time, data=[np.ones(len(time))], names=['a'])
down = aggregate_downsample(ts, time_bin_start=time_bin_start, time_bin_end=time_bin_end)
assert len(down) == len(time_bin_start)
assert all(down['time_bin_size'] >= 0) # bin lengths shall never be negative
if ts.time.min() < time_bin_start[0] or time_bin_end is not None:
assert down['a'].mask.all() # all bins placed *beyond* the time span of the data
elif ts.time.min() < time_bin_start[1]:
assert down['a'][0] == ts['a'][0] # single-valued time series falls in *first* bin
|
d46a9206fa8d71d2474628446fd6fdb88c0aa17dc94fe102d41c1864cca1968c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.table import Table, Column
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31',
'2015-01-21T12:30:32',
'2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=['a', 'b', 'c'])
CSV_FILE = get_pkg_data_filename('data/sampled.csv')
def test_empty_initialization():
ts = TimeSeries()
ts['time'] = Time([50001, 50002, 50003], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'flux'")
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts['time'] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts['a'], [10, 2, 3])
assert_equal(ts['b'], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(TypeError) as exc:
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Either 'time' or 'time_start' should be specified"
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ['time', 'a', 'b', 'c']
def test_initialization_with_time_delta():
ts = TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format='sec'),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, ['2018-07-01T10:10:10.000',
'2018-07-01T10:10:13.000',
'2018-07-01T10:10:16.000'])
def test_initialization_missing_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time' is scalar, so 'time_delta' is required"
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Cannot specify both 'time' and 'time_start'"
def test_initialization_invalid_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time_delta' should be a Quantity or a TimeDelta"
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data['time'] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == set(['time', 'a', 'b', 'c'])
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert set(ts2.colnames) == set(['time', 'a'])
assert all(ts2.time == INPUT_TIME)
with pytest.raises(TypeError) as exc:
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError) as exc:
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
assert exc.value.args[0] == ("'n_samples' has been given both and it is not the "
"same length as the input data.")
def test_initialization_length_mismatch():
with pytest.raises(ValueError) as exc:
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=['a', 'b'])
assert exc.value.args[0] == "Length of 'time' (3) should match data length (2)"
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format='sec'))
assert exc.value.args[0] == ("'time_delta' should not be specified since "
"'time' is an array")
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1.2, 0.6, -1.6, 1.4], rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-0.6, 0.4, 1.4, 0.0, 1.0, 0.8], rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, 2, 0.6, 1.6, 1.4], rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, -1.4, -0.4, 1.4, -0.8, -1.0], rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, 1.8, 2.8, 1.4, 2.4, 2.2], rtol=1e-6)
# Now repeat the above tests but with normalization applied
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, -1.2/3.2, 0.6/3.2, -1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format='unix'),
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[-0.6/3.2, 0.4/3.2, 1.4/3.2, 0.0/3.2, 1.0/3.2, 0.8/3.2],
rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=1, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0, 1/3.2, 2/3.2, 0.6/3.2, 1.6/3.2, 1.4/3.2],
rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, -1.4/3.2, -0.4/3.2, 1.4/3.2, -0.8/3.2, -1.0/3.2],
rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, wrap_phase=1,
normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(tsf.time.to_value(u.one),
[0.8/3.2, 1.8/3.2, 2.8/3.2, 1.4/3.2, 2.4/3.2, 2.2/3.2],
rtol=1e-6)
def test_fold_invalid_options():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2)
with pytest.raises(u.UnitsError,
match='period should be a Quantity in units of time'):
ts.fold(period=3.2 * u.m)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2)
with pytest.raises(u.UnitsError,
match='epoch_phase should be a dimensionless Quantity '
'or a float when normalize_phase=True'):
ts.fold(period=3.2 * u.s, epoch_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(u.UnitsError,
match='wrap_phase should be a Quantity in units of '
'time when normalize_phase=False'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2)
with pytest.raises(u.UnitsError,
match='wrap_phase should be dimensionless when '
'normalize_phase=True'):
ts.fold(period=3.2 * u.s, wrap_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and the period'):
ts.fold(period=3.2 * u.s, wrap_phase=-4.2 * u.s)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1, normalize_phase=True)
with pytest.raises(ValueError,
match='wrap_phase should be between 0 and 1'):
ts.fold(period=3.2 * u.s, wrap_phase=2.2, normalize_phase=True)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1['a'] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ['time', 'a']
assert len(ts.indices) == 1
assert (ts.indices['time'].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale='tcb')
assert ts_tcb.time.scale == 'tcb'
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(['a'])
assert (df1['a'] == df2['a']).all()
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(None)
assert exc.value.args[0] == 'Input should be a pandas DataFrame'
df4 = pandas.DataFrame()
df4['a'] = [1, 2, 3]
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(df4)
assert exc.value.args[0] == 'DataFrame does not have a DatetimeIndex'
def test_read_time_missing():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_column`` should be provided since the default Table readers are being used.'
def test_read_time_wrong():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, time_column='abc', format='csv')
assert exc.value.args[0] == "Time column 'abc' not found in the input data."
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column='Date', format='csv')
assert timeseries.colnames == ['time', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
assert len(timeseries) == 11
assert timeseries['time'].format == 'iso'
assert timeseries['A'].sum() == 266.5
@pytest.mark.remote_data(source='astropy')
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits')
with pytest.warns(UnitsWarning):
timeseries = TimeSeries.read(filename, format='kepler.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source='astropy')
def test_tess_astropy():
filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits')
with pytest.warns(UserWarning, match='Ignoring 815 rows with NaN times'):
timeseries = TimeSeries.read(filename, format='tess.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
with pytest.raises(ValueError) as exc:
ts.copy().add_column(Column([3, 4, 5], name='c'), index=0)
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'c'")
with pytest.raises(ValueError) as exc:
ts.copy().add_columns([Column([3, 4, 5], name='d'),
Column([3, 4, 5], name='e')], indexes=[0, 1])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'd'")
with pytest.raises(ValueError) as exc:
ts.copy().keep_columns(['a', 'b'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_column('time')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_columns(['time', 'a'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'b'")
with pytest.raises(ValueError) as exc:
ts.copy().rename_column('time', 'banana')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'banana'")
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
104e1b83a82d254f6d9397d8601341a5aa655fa4dd2b7d413194c1bc4c2340a2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.tests.helper import assert_quantity_allclose
CSV_FILE = get_pkg_data_filename('data/binned.csv')
def test_empty_initialization():
ts = BinnedTimeSeries()
ts['time_bin_start'] = Time([1, 2, 3], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = BinnedTimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected "
"'time_bin_start' as the first column but found 'flux'")
def test_initialization_time_bin_invalid():
# Make sure things crash when time_bin_* is passed incorrectly.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data=[[1, 4, 3]])
assert exc.value.args[0] == ("'time_bin_start' has not been specified")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]])
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_initialization_time_bin_both():
# Make sure things crash when time_bin_* is passed twice.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31")
assert exc.value.args[0] == ("'time_bin_start' has been given both in the table "
"and as a keyword argument")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]},
time_bin_size=[1]*u.s)
assert exc.value.args[0] == ("'time_bin_size' has been given both in the table "
"and as a keyword argument")
def test_initialization_time_bin_size():
# Make sure things crash when time_bin_size has no units
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=1)
assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta")
# TimeDelta for time_bin_size
ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=TimeDelta(1, format="jd"))
assert isinstance(ts.time_bin_size, u.quantity.Quantity)
def test_initialization_time_bin_start_scalar():
# Make sure things crash when time_bin_start is a scalar with no time_bin_size
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required")
def test_initialization_n_bins_invalid_arguments():
# Make sure an exception is raised when n_bins is passed as an argument while
# any of the parameters 'time_bin_start' or 'time_bin_end' is not scalar.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(time_bin_start=Time([1, 2, 3], format='cxcsec'),
time_bin_size=1*u.s,
n_bins=10)
assert exc.value.args[0] == ("'n_bins' cannot be specified if 'time_bin_start' or "
"'time_bin_size' are not scalar'")
def test_initialization_n_bins():
# Make sure things crash with incorrect n_bins
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'),
n_bins=10)
assert exc.value.args[0] == ("'n_bins' has been given and it is not the "
"same length as the input data.")
def test_initialization_non_scalar_time():
# Make sure things crash with incorrect size of time_bin_start
with pytest.raises(ValueError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"],
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31"],
time_bin_size=None,
time_bin_end=None)
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_even_contiguous():
# Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying
# the bin width:
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500',
'2016-03-22T12:30:35.500',
'2016-03-22T12:30:38.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000',
'2016-03-22T12:30:40.000'])
def test_uneven_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an
# end time:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:32',
'2016-03-22T12:30:40'],
time_bin_end='2016-03-22T12:30:55',
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:36.000',
'2016-03-22T12:30:47.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000',
'2016-03-22T12:30:55.000'])
def test_uneven_non_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with
# lists of start times, bin sizes and data:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:38',
'2016-03-22T12:34:40'],
time_bin_size=[5, 100, 2]*u.s,
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:38.000',
'2016-03-22T12:34:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500',
'2016-03-22T12:31:28.000',
'2016-03-22T12:34:41.000'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000',
'2016-03-22T12:32:18.000',
'2016-03-22T12:34:42.000'])
def test_uneven_non_contiguous_full():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by
# specifying the start and end times for the bins:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:33',
'2016-03-22T12:30:40'],
time_bin_end=['2016-03-22T12:30:32',
'2016-03-22T12:30:35',
'2016-03-22T12:30:41'],
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:33.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:40.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:35.000',
'2016-03-22T12:30:41.000'])
def test_read_empty():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.'
def test_read_no_size_end():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv')
assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.'
def test_read_both_extra_bins():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`."
def test_read_size_no_unit():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read_start_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data."
def test_read_end_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv')
assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data."
def test_read_size_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin size column 'missing' not found in the input data."
def test_read_time_unit_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read():
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_end_column='time_end', format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_size_column='bin_size',
time_bin_size_unit=u.second, format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time_bin_center.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
75fe47a1c89b41fac43966c638bc6c6b3eba4ec0ade7f165b180857a0ac4f744 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["bls_fast", "bls_slow"]
import numpy as np
from functools import partial
from ._impl import bls_impl
def bls_slow(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using a brute force reference method
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
f = partial(_bls_slow_one, t, y, ivar, duration,
oversample, use_likelihood)
return _apply(f, period)
def bls_fast(t, y, ivar, period, duration, oversample, use_likelihood):
"""Compute the periodogram using an optimized Cython implementation
t : array-like
Sequence of observation times.
y : array-like
Sequence of observations associated with times t.
ivar : array-like
The inverse variance of ``y``.
period : array-like
The trial periods where the periodogram should be computed.
duration : array-like
The durations that should be tested.
oversample :
The resolution of the phase grid in units of durations.
use_likeliood : bool
If true, maximize the log likelihood over phase, duration, and depth.
Returns
-------
power : array-like
The periodogram evaluated at the periods in ``period``.
depth : array-like
The estimated depth of the maximum power model at each period.
depth_err : array-like
The 1-sigma uncertainty on ``depth``.
duration : array-like
The maximum power duration at each period.
transit_time : array-like
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like
The log likelihood of the maximum power model.
"""
return bls_impl(
t, y, ivar, period, duration, oversample, use_likelihood
)
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period):
"""A private function to compute the brute force periodogram result"""
best = (-np.inf, None)
hp = 0.5*period
min_t = np.min(t)
for dur in duration:
# Compute the phase grid (this is set by the duration and oversample).
d_phase = dur / oversample
phase = np.arange(0, period+d_phase, d_phase)
for t0 in phase:
# Figure out which data points are in and out of transit.
m_in = np.abs((t-min_t-t0+hp) % period - hp) < 0.5*dur
m_out = ~m_in
# Compute the estimates of the in and out-of-transit flux.
ivar_in = np.sum(ivar[m_in])
ivar_out = np.sum(ivar[m_out])
y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in
y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out
# Use this to compute the best fit depth and uncertainty.
depth = y_out - y_in
depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out)
snr = depth / depth_err
# Compute the log likelihood of this model.
loglike = -0.5*np.sum((y_in - y[m_in])**2 * ivar[m_in])
loglike += 0.5*np.sum((y_out - y[m_in])**2 * ivar[m_in])
# Choose which objective should be used for the optimization.
if use_likelihood:
objective = loglike
else:
objective = snr
# If this model is better than any before, keep it.
if depth > 0 and objective > best[0]:
best = (
objective,
(objective, depth, depth_err, dur, (t0+min_t) % period,
snr, loglike)
)
return best[1]
def _apply(f, period):
return tuple(map(np.array, zip(*map(f, period))))
|
130492dd480faf45f8d1a34f5d127d8ce10cc89c3e4af4aacdc86f8a71ac9259 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from astropy import units as u
from . import methods
from astropy.timeseries.periodograms.base import BasePeriodogram
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like, or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.default_rng(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.standard_normal(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
2.000412388152837
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01723948, 0.0643028 , 0.1338783 , 0.09428816, 0.03577543])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, (Time, TimeDelta)):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(self, duration,
minimum_period=None, maximum_period=None,
minimum_n_transit=3, frequency_factor=1.0):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity` ['time'], optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transits : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity` ['time']
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units(self._trel.max() - self._trel.min())
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit-1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0/strip_units(maximum_period)
maximum_frequency = 1.0/strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency)/df))
return 1.0/(maximum_frequency-df*np.arange(nf)) * self._t_unit()
def autopower(self, duration, objective=None, method=None, oversample=10,
minimum_n_transit=3, minimum_period=None,
maximum_period=None, frequency_factor=1.0):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor)
return self.power(period, duration, objective=objective, method=method,
oversample=oversample)
def power(self, period, duration, objective=None, method=None,
oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or `~astropy.units.Quantity` ['time']
The periods where the power should be computed
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError(f"oversample must be an int, got {oversample}")
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(objective, allowed_objectives))
use_likelihood = (objective == "likelihood")
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(method, allowed_methods))
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
t_ref = np.min(t)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period),
dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration),
dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t - t_ref, y - np.median(y), ivar, period_fmt, duration,
oversample, use_likelihood)
return self._format_results(t_ref, objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{} was provided as an absolute time but '
'the BoxLeastSquares class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{} was provided as a relative time but '
'the BoxLeastSquares class was initialized '
'with absolute times.'.format(name))
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t_model = strip_units(self._as_relative_time('t_model', t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Compute the depth
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model-transit_time+hp) % period-hp) < 0.5*duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
m_odd = np.abs((t-transit_time) % (2*period) - period) \
< 0.5*duration
m_even = np.abs((t-transit_time+period) % (2*period) - period) \
< 0.5*duration
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t-transit_time) % period - hp) < 0.5*duration
depth_phase = _compute_depth(m_phase,
*_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = np.abs((t-transit_time+0.25*period) % (0.5*period)
- 0.25*period) < 0.5*duration
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in]-transit_time) / period).astype(int)
transit_times = period * np.arange(transit_id.min(),
transit_id.max()+1) + transit_time
unique_ids, unique_counts = np.unique(transit_id,
return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in)**2 - (y[m_in] - y_out)**2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5*np.sum(ivar[m_in] * (y[m_in] - y_in)**2)
full_ll -= 0.5*np.sum(ivar[m_out] * (y[m_out] - y_out)**2)
# Compute the log likelihood of a sine model
A = np.vstack((
np.sin(2*np.pi*t/period), np.cos(2*np.pi*t/period),
np.ones_like(t)
)).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]),
np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5*np.sum((y-mod)**2*ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed('transit_times', transit_times * self._t_unit()),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2]**2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t_model : array-like or `~astropy.units.Quantity` ['time']
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity` ['time']
The period of the transits.
duration : float or `~astropy.units.Quantity` ['time']
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t = strip_units(self._as_relative_time('t', t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5*period
return np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like, or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like, or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like, or `~astropy.units.Quantity` ['time']
The set of test periods.
duration : float, array-like, or `~astropy.units.Quantity` ['time']
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity` ['time']
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError("The maximum transit duration must be shorter "
"than the minimum period")
return period, duration
def _format_results(self, t_ref, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
t_ref : float
The minimum time in the time series (a reference time).
objective : str
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity` ['time']
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, duration, transit_time, depth_snr,
log_likelihood) = results
transit_time += t_ref
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed('transit_time', transit_time)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : str
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity` ['time']
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity` ['time']
The maximum power duration at each period.
transit_time : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(zip(
("objective", "period", "power", "depth", "depth_err",
"duration", "transit_time", "depth_snr", "log_likelihood"),
args
))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
a67a128f1564d09d806fa69b838e384a80851b77a336cb3cfb967407d1f2e106 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Box Least Squares
=================
AstroPy-compatible reference implementation of the transit periorogram used
to discover transiting exoplanets.
"""
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
from .core import BoxLeastSquares, BoxLeastSquaresResults
|
7e30b42a726b3e407c644a302da76a11ea2a7c019b03920079f85e3f44cc1ef8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from setuptools import Extension
import numpy
BLS_ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
ext = Extension(
"astropy.timeseries.periodograms.bls._impl",
sources=[
join(BLS_ROOT, "bls.c"),
join(BLS_ROOT, "_impl.pyx"),
],
include_dirs=[numpy.get_include()],
)
return [ext]
|
6ee490c99a4a4d05282c7c36b4e5d92ac2f488a614b248f23ea8e41330dc5af4 | """
Utilities for computing periodogram statistics.
This is an internal module; users should access this functionality via the
``false_alarm_probability`` and ``false_alarm_level`` methods of the
``astropy.timeseries.LombScargle`` API.
"""
from functools import wraps
import numpy as np
from astropy import units as u
def _weighted_sum(val, dy):
if dy is not None:
return (val / dy ** 2).sum()
else:
return val.sum()
def _weighted_mean(val, dy):
if dy is None:
return val.mean()
else:
return _weighted_sum(val, dy) / _weighted_sum(np.ones(val.shape), dy)
def _weighted_var(val, dy):
return _weighted_mean(val ** 2, dy) - _weighted_mean(val, dy) ** 2
def _gamma(N):
from scipy.special import gammaln
# Note: this is closely approximated by (1 - 0.75 / N) for large N
return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2))
def vectorize_first_argument(func):
@wraps(func)
def new_func(x, *args, **kwargs):
x = np.asarray(x)
return np.array([func(xi, *args, **kwargs)
for xi in x.flat]).reshape(x.shape)
return new_func
def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
pdf : np.ndarray
The expected probability density function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == 'model':
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == 'log':
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
false_alarm_probability : np.ndarray
The single-frequency false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return (1 - z) ** (0.5 * Nk)
elif normalization == 'model':
return (1 + z) ** (-0.5 * Nk)
elif normalization == 'log':
return np.exp(-0.5 * Nk * z)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def inv_fap_single(fap, N, normalization, dH=1, dK=3):
"""Single-frequency inverse false alarm probability
This function computes the periodogram value associated with the specified
single-frequency false alarm probability. This should not be confused with
the false alarm level of the largest peak.
Parameters
----------
fap : array-like
The false alarm probability.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
z : np.ndarray
The periodogram power corresponding to the single-peak false alarm
probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
fap = np.asarray(fap)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
# No warnings for fap = 0; rather, just let it give the right infinity.
with np.errstate(divide='ignore'):
if normalization == 'psd':
return -np.log(fap)
elif normalization == 'standard':
return 1 - fap ** (2 / Nk)
elif normalization == 'model':
return -1 + fap ** (-2 / Nk)
elif normalization == 'log':
return -2 / Nk * np.log(fap)
else:
raise ValueError(f"normalization='{normalization}' is not recognized")
def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : int, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline
W = fmax * Teff
Z = np.asarray(Z)
if normalization == 'psd':
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == 'standard':
# 'standard' normalization is Z = 2/NH * z_1
return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1))
* np.sqrt(0.5 * NH * Z))
elif normalization == 'model':
# 'model' normalization is Z = 2/NK * z_2
return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK)
* np.sqrt(0.5 * NK * Z))
elif normalization == 'log':
# 'log' normalization is Z = 2/NK * z_3
return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z)))
else:
raise NotImplementedError(f"normalization={normalization}")
def fap_naive(Z, fmax, t, y, dy, normalization='standard'):
"""False Alarm Probability based on estimated number of indep frequencies"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
fap_s = fap_single(Z, N, normalization=normalization)
# result is 1 - (1 - fap_s) ** N_eff
# this is much more precise for small Z / large N
# Ignore divide by zero no np.log1p - fine to let it return -inf.
with np.errstate(divide='ignore'):
return -np.expm1(N_eff * np.log1p(-fap_s))
def inv_fap_naive(fap, fmax, t, y, dy, normalization='standard'):
"""Inverse FAP based on estimated number of indep frequencies"""
fap = np.asarray(fap)
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
# fap_s = 1 - (1 - fap) ** (1 / N_eff)
# Ignore divide by zero no np.log - fine to let it return -inf.
with np.errstate(divide='ignore'):
fap_s = -np.expm1(np.log(1 - fap) / N_eff)
return inv_fap_single(fap_s, N, normalization)
def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau
@vectorize_first_argument
def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the davies upper-bound"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_davies(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError(f'inv_fap_baluev did not converge for p={p}')
return res.x
def fap_baluev(Z, fmax, t, y, dy, normalization='standard'):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
fap_s = fap_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
# result is 1 - (1 - fap_s) * np.exp(-tau)
# this is much more precise for small numbers
return -np.expm1(-tau) + fap_s * np.exp(-tau)
@vectorize_first_argument
def inv_fap_baluev(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the Baluev alias-free approximation"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_baluev(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError(f'inv_fap_baluev did not converge for p={p}')
return res.x
def _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstrap=1000):
"""Generate a sequence of bootstrap estimates of the max"""
from .core import LombScargle
rng = np.random.default_rng(random_seed)
power_max = []
for _ in range(n_bootstrap):
s = rng.integers(0, len(y), len(y)) # sample with replacement
ls_boot = LombScargle(t, y[s], dy if dy is None else dy[s],
normalization=normalization)
freq, power = ls_boot.autopower(maximum_frequency=fmax)
power_max.append(power.max())
power_max = u.Quantity(power_max)
power_max.sort()
return power_max
def fap_bootstrap(Z, fmax, t, y, dy, normalization='standard',
n_bootstraps=1000, random_seed=None):
"""Bootstrap estimate of the false alarm probability"""
pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed,
n_bootstraps)
return 1 - np.searchsorted(pmax, Z) / len(pmax)
def inv_fap_bootstrap(fap, fmax, t, y, dy, normalization='standard',
n_bootstraps=1000, random_seed=None):
"""Bootstrap estimate of the inverse false alarm probability"""
fap = np.asarray(fap)
pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed,
n_bootstraps)
return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int),
0, len(pmax) - 1)]
METHODS = {'single': fap_single,
'naive': fap_naive,
'davies': fap_davies,
'baluev': fap_baluev,
'bootstrap': fap_bootstrap}
def false_alarm_probability(Z, fmax, t, y, dy, normalization='standard',
method='baluev', method_kwds=None):
"""Compute the approximate false alarm probability for periodogram peaks Z
This gives an estimate of the false alarm probability for the largest value
in a periodogram, based on the null hypothesis of non-varying data with
Gaussian noise. The true probability cannot be computed analytically, so
each method available here is an approximation to the true value.
Parameters
----------
Z : array-like
The periodogram value.
fmax : float
The maximum frequency of the periodogram.
t, y, dy : array-like
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_level : compute the periodogram level for a particular fap
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == 'single':
return fap_single(Z, len(t), normalization)
elif method not in METHODS:
raise ValueError(f"Unrecognized method: {method}")
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds)
INV_METHODS = {'single': inv_fap_single,
'naive': inv_fap_naive,
'davies': inv_fap_davies,
'baluev': inv_fap_baluev,
'bootstrap': inv_fap_bootstrap}
def false_alarm_level(p, fmax, t, y, dy, normalization,
method='baluev', method_kwds=None):
"""Compute the approximate periodogram level given a false alarm probability
This gives an estimate of the periodogram level corresponding to a specified
false alarm probability for the largest peak, assuming a null hypothesis
of non-varying data with Gaussian noise. The true level cannot be computed
analytically, so each method available here is an approximation to the true
value.
Parameters
----------
p : array-like
The false alarm probability (0 < p < 1).
fmax : float
The maximum frequency of the periodogram.
t, y, dy : arrays
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
z : np.ndarray
The periodogram level.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_probability : compute the fap for a given periodogram level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == 'single':
return inv_fap_single(p, len(t), normalization)
elif method not in INV_METHODS:
raise ValueError(f"Unrecognized method: {method}")
method = INV_METHODS[method]
method_kwds = method_kwds or {}
return method(p, fmax, t, y, dy, normalization, **method_kwds)
|
594e77cc167826d021fb04c7263124173ef018052100eec9e87c78f8c8c38f86 | """Main Lomb-Scargle Implementation"""
import numpy as np
from .implementations import lombscargle, available_methods
from .implementations.mle import periodic_fit, design_matrix
from . import _statistics
from astropy import units
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.timeseries.periodograms.base import BasePeriodogram
def has_units(obj):
return hasattr(obj, 'unit')
def get_unit(obj):
return getattr(obj, 'unit', 1)
def strip_units(*arrs):
strip = lambda a: None if a is None else np.asarray(a)
if len(arrs) == 1:
return strip(arrs[0])
else:
return map(strip, arrs)
class LombScargle(BasePeriodogram):
"""Compute the Lomb-Scargle Periodogram.
This implementations here are based on code presented in [1]_ and [2]_;
if you use this functionality in an academic application, citation of
those works would be appreciated.
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
sequence of observation times
y : array-like or `~astropy.units.Quantity`
sequence of observations associated with times t
dy : float, array-like, or `~astropy.units.Quantity`, optional
error or sequence of observational errors associated with times t
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if fit_mean = False
nterms : int, optional
number of terms to use in the Fourier fit
normalization : {'standard', 'model', 'log', 'psd'}, optional
Normalization to use for the periodogram.
Examples
--------
Generate noisy periodic data:
>>> rand = np.random.default_rng(42)
>>> t = 100 * rand.random(100)
>>> y = np.sin(2 * np.pi * t) + rand.standard_normal(100)
Compute the Lomb-Scargle periodogram on an automatically-determined
frequency grid & find the frequency of max power:
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP
1.0007641728995051
Compute the Lomb-Scargle periodogram at a user-specified frequency grid:
>>> freq = np.arange(0.8, 1.3, 0.1)
>>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP
array([0.0792948 , 0.01778874, 0.25328167, 0.01064157, 0.01471387])
If the inputs are astropy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.s
>>> y = y * u.mag
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency.unit
Unit("1 / s")
>>> power.unit
Unit(dimensionless)
Note here that the Lomb-Scargle power is always a unitless quantity,
because it is related to the :math:`\\chi^2` of the best-fit periodic
model at each frequency.
References
----------
.. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to
astroML: Machine learning for astrophysics*. Proceedings of the
Conference on Intelligent Data Understanding (2012)
.. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical
Time Series*. ApJ 812.1:18 (2015)
"""
available_methods = available_methods()
def __init__(self, t, y, dy=None, fit_mean=True, center_data=True,
nterms=1, normalization='standard'):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
self.fit_mean = fit_mean
self.center_data = center_data
self.nterms = nterms
self.normalization = normalization
def _validate_inputs(self, t, y, dy):
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if any(has_units(arr) for arr in (t, y, dy)):
t, y = map(units.Quantity, (t, y))
if dy is not None:
dy = units.Quantity(dy)
try:
dy = units.Quantity(dy, unit=y.unit)
except units.UnitConversionError:
raise ValueError("Units of dy not equivalent "
"to units of y")
return t, y, dy
def _validate_frequency(self, frequency):
frequency = np.asanyarray(frequency)
if has_units(self._trel):
frequency = units.Quantity(frequency)
try:
frequency = units.Quantity(frequency, unit=1./self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of frequency not equivalent to "
"units of 1/t")
else:
if has_units(frequency):
raise ValueError("frequency have units while 1/t doesn't.")
return frequency
def _validate_t(self, t):
t = np.asanyarray(t)
if has_units(self._trel):
t = units.Quantity(t)
try:
t = units.Quantity(t, unit=self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of t not equivalent to "
"units of input self.t")
return t
def _power_unit(self, norm):
if has_units(self.y):
if self.dy is None and norm == 'psd':
return self.y.unit ** 2
else:
return units.dimensionless_unscaled
else:
return 1
def autofrequency(self, samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
return_freq_limits=False):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float, optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float, optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool, optional
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or `~astropy.units.Quantity` ['frequency']
The heuristically-determined optimal frequency bin
"""
baseline = self._trel.max() - self._trel.min()
n_samples = self._trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (Nf - 1)
else:
return minimum_frequency + df * np.arange(Nf)
def autopower(self, method='auto', method_kwds=None,
normalization=None, samples_per_peak=5,
nyquist_factor=5, minimum_frequency=None,
maximum_frequency=None):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float, optional
The approximate number of desired samples across the typical peak
nyquist_factor : float, optional
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
maximum_frequency : float or `~astropy.units.Quantity` ['frequency'], optional
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency. Should be `~astropy.units.Quantity`
if inputs to LombScargle are `~astropy.units.Quantity`.
Returns
-------
frequency, power : ndarray
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
power = self.power(frequency,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=True)
return frequency, power
def power(self, frequency, normalization=None, method='auto',
assume_regular_frequency=False, method_kwds=None):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
fit_mean : bool, optional
If True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in
the case of incomplete phase coverage.
center_data : bool, optional
If True, pre-center the data by subtracting the weighted mean of
the input data. This is especially important if fit_mean = False.
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
power = lombscargle(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=assume_regular_frequency)
return power * self._power_unit(normalization)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{} was provided as an absolute time but '
'the LombScargle class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{} was provided as a relative time but '
'the LombScargle class was initialized '
'with absolute times.'.format(name))
return times
def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array-like or `~astropy.units.Quantity` ['time']
Times (length ``n_samples``) at which to compute the model.
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray
The model fit corresponding to the input times
(will have length ``n_samples``).
See Also
--------
design_matrix
offset
model_parameters
"""
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time('t', t))
y_fit = periodic_fit(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
t_fit=strip_units(t),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms)
return y_fit * get_unit(self.y)
def offset(self):
"""Return the offset of the model
The offset of the model is the (weighted) mean of the y values.
Note that if self.center_data is False, the offset is 0 by definition.
Returns
-------
offset : scalar
See Also
--------
design_matrix
model
model_parameters
"""
y, dy = strip_units(self.y, self.dy)
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
if self.center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
else:
y_mean = 0
return y_mean * get_unit(self.y)
def model_parameters(self, frequency, units=True):
r"""Compute the best-fit model parameters at the given frequency.
The model described by these parameters is:
.. math::
y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)]
where :math:`\vec{\theta}` is the array of parameters returned by this function.
Parameters
----------
frequency : float
the frequency for the model
units : bool
If True (default), return design matrix with data units.
Returns
-------
theta : np.ndarray (n_parameters,)
The best-fit model parameters at the given frequency.
See Also
--------
design_matrix
model
offset
"""
frequency = self._validate_frequency(frequency)
t, y, dy = strip_units(self._trel, self.y, self.dy)
if self.center_data:
y = y - strip_units(self.offset())
dy = np.ones_like(y) if dy is None else np.asarray(dy)
X = self.design_matrix(frequency)
parameters = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
if units:
parameters = get_unit(self.y) * parameters
return parameters
def design_matrix(self, frequency, t=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t : array-like, `~astropy.units.Quantity`, or `~astropy.time.Time` (optional)
Times (length ``n_samples``) at which to compute the model.
If not specified, then the times and uncertainties of the input
data are used.
Returns
-------
X : array
The design matrix for the model at the given frequency.
This should have a shape of (``len(t)``, ``n_parameters``).
See Also
--------
model
model_parameters
offset
"""
if t is None:
t, dy = strip_units(self._trel, self.dy)
else:
t, dy = strip_units(self._validate_t(self._as_relative_time('t', t)), None)
return design_matrix(t, frequency, dy,
nterms=self.nterms,
bias=self.fit_mean)
def distribution(self, power, cumulative=False):
"""Expected periodogram distribution under the null hypothesis.
This computes the expected probability distribution or cumulative
probability distribution of periodogram power, under the null
hypothesis of a non-varying signal with Gaussian noise. Note that
this is not the same as the expected distribution of peak values;
for that see the ``false_alarm_probability()`` method.
Parameters
----------
power : array-like
The periodogram power at which to compute the distribution.
cumulative : bool, optional
If True, then return the cumulative distribution.
See Also
--------
false_alarm_probability
false_alarm_level
Returns
-------
dist : np.ndarray
The probability density or cumulative probability associated with
the provided powers.
"""
dH = 1 if self.fit_mean or self.center_data else 0
dK = dH + 2 * self.nterms
dist = _statistics.cdf_single if cumulative else _statistics.pdf_single
return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK)
def false_alarm_probability(self, power, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_probability(power,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
def false_alarm_level(self, false_alarm_probability, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_level(false_alarm_probability,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
|
6ce7ca07943f560bf64f4ded335691e4797fd15e3b6123d3c75de7af8c2d38fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
lombscargle
===========
AstroPy-compatible implementation of the Lomb-Scargle periodogram.
"""
from .core import LombScargle
|
4ea1bb879f1ae65fce81e87a53b8ad094bab8158abae5dae4ddd47eda3968b93 | import numpy as np
NORMALIZATIONS = ['standard', 'psd', 'model', 'log']
def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True):
"""Compute the reference chi-square for a particular dataset.
Note: this is not valid center_data=False and fit_mean=False.
Parameters
----------
y : array-like
data values
dy : float, array, or None, optional
data uncertainties
center_data : bool
specify whether data should be pre-centered
fit_mean : bool
specify whether model should fit the mean of the data
Returns
-------
chi2_ref : float
The reference chi-square for the periodogram of this data
"""
if dy is None:
dy = 1
y, dy = np.broadcast_arrays(y, dy)
w = dy ** -2.0
if center_data or fit_mean:
mu = np.dot(w, y) / w.sum()
else:
mu = 0
yw = (y - mu) / dy
return np.dot(yw, yw)
def convert_normalization(Z, N, from_normalization, to_normalization,
chi2_ref=None):
"""Convert power from one normalization to another.
This currently only works for standard & floating-mean models.
Parameters
----------
Z : array-like
the periodogram output
N : int
the number of data points
from_normalization, to_normalization : str
the normalization to convert from and to. Options are
['standard', 'model', 'log', 'psd']
chi2_ref : float
The reference chi-square, required for converting to or from the
psd normalization.
Returns
-------
Z_out : ndarray
The periodogram in the new normalization
"""
Z = np.asarray(Z)
from_to = (from_normalization, to_normalization)
for norm in from_to:
if norm not in NORMALIZATIONS:
raise ValueError(f"{from_normalization} is not a valid normalization")
if from_normalization == to_normalization:
return Z
if "psd" in from_to and chi2_ref is None:
raise ValueError("must supply reference chi^2 when converting "
"to or from psd normalization")
if from_to == ('log', 'standard'):
return 1 - np.exp(-Z)
elif from_to == ('standard', 'log'):
return -np.log(1 - Z)
elif from_to == ('log', 'model'):
return np.exp(Z) - 1
elif from_to == ('model', 'log'):
return np.log(Z + 1)
elif from_to == ('model', 'standard'):
return Z / (1 + Z)
elif from_to == ('standard', 'model'):
return Z / (1 - Z)
elif from_normalization == "psd":
return convert_normalization(2 / chi2_ref * Z, N,
from_normalization='standard',
to_normalization=to_normalization)
elif to_normalization == "psd":
Z_standard = convert_normalization(Z, N,
from_normalization=from_normalization,
to_normalization='standard')
return 0.5 * chi2_ref * Z_standard
else:
raise NotImplementedError("conversion from '{}' to '{}'"
"".format(from_normalization,
to_normalization))
|
2e5bdcc9a96a9a3aa10d56623a94de696ce44ce38ad0320d6c6358c11214567e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError(f"missing key '{k}'")
if k == "objective":
assert v == other[k], (
f"Mismatched objectives. Expected '{v}', got '{other[k]}'"
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
# NOTE: PR 10644 replaced deprecated usage of RandomState but could not
# find a new seed that did not cause test failure, resorted to hardcoding.
@pytest.fixture
def data():
t = np.array([
6.96469186, 2.86139335, 2.26851454, 5.51314769, 7.1946897,
4.2310646, 9.80764198, 6.84829739, 4.80931901, 3.92117518,
3.43178016, 7.29049707, 4.38572245, 0.59677897, 3.98044255,
7.37995406, 1.8249173, 1.75451756, 5.31551374, 5.31827587,
6.34400959, 8.49431794, 7.24455325, 6.11023511, 7.22443383,
3.22958914, 3.61788656, 2.28263231, 2.93714046, 6.30976124,
0.9210494, 4.33701173, 4.30862763, 4.93685098, 4.2583029,
3.12261223, 4.26351307, 8.93389163, 9.44160018, 5.01836676,
6.23952952, 1.15618395, 3.17285482, 4.14826212, 8.66309158,
2.50455365, 4.83034264, 9.85559786, 5.19485119, 6.12894526,
1.20628666, 8.26340801, 6.03060128, 5.45068006, 3.42763834,
3.04120789, 4.17022211, 6.81300766, 8.75456842, 5.10422337,
6.69313783, 5.85936553, 6.24903502, 6.74689051, 8.42342438,
0.83194988, 7.63682841, 2.43666375, 1.94222961, 5.72456957,
0.95712517, 8.85326826, 6.27248972, 7.23416358, 0.16129207,
5.94431879, 5.56785192, 1.58959644, 1.53070515, 6.95529529,
3.18766426, 6.91970296, 5.5438325, 3.88950574, 9.2513249,
8.41669997, 3.57397567, 0.43591464, 3.04768073, 3.98185682,
7.0495883, 9.95358482, 3.55914866, 7.62547814, 5.93176917,
6.91701799, 1.51127452, 3.98876293, 2.40855898, 3.43456014,
5.13128154, 6.6662455, 1.05908485, 1.30894951, 3.21980606,
6.61564337, 8.46506225, 5.53257345, 8.54452488, 3.84837811,
3.16787897, 3.54264676, 1.71081829, 8.29112635, 3.38670846,
5.52370075, 5.78551468, 5.21533059, 0.02688065, 9.88345419,
9.05341576, 2.07635861, 2.92489413, 5.20010153, 9.01911373,
9.83630885, 2.57542064, 5.64359043, 8.06968684, 3.94370054,
7.31073036, 1.61069014, 6.00698568, 8.65864458, 9.83521609,
0.7936579, 4.28347275, 2.0454286, 4.50636491, 5.47763573,
0.9332671, 2.96860775, 9.2758424, 5.69003731, 4.57411998,
7.53525991, 7.41862152, 0.48579033, 7.08697395, 8.39243348,
1.65937884, 7.80997938, 2.86536617, 3.06469753, 6.65261465,
1.11392172, 6.64872449, 8.87856793, 6.96311268, 4.40327877,
4.38214384, 7.65096095, 5.65642001, 0.84904163, 5.82671088,
8.14843703, 3.37066383, 9.2757658, 7.50717, 5.74063825,
7.51643989, 0.79148961, 8.59389076, 8.21504113, 9.0987166,
1.28631198, 0.81780087, 1.38415573, 3.9937871, 4.24306861,
5.62218379, 1.2224355, 2.01399501, 8.11644348, 4.67987574,
8.07938209, 0.07426379, 5.51592726, 9.31932148, 5.82175459,
2.06095727, 7.17757562, 3.7898585, 6.68383947, 0.29319723,
6.35900359, 0.32197935, 7.44780655, 4.72913002, 1.21754355,
5.42635926, 0.66774443, 6.53364871, 9.96086327, 7.69397337,
5.73774114, 1.02635259, 6.99834075, 6.61167867, 0.49097131,
7.92299302, 5.18716591, 4.25867694, 7.88187174, 4.11569223,
4.81026276, 1.81628843, 3.213189, 8.45532997, 1.86903749,
4.17291061, 9.89034507, 2.36599812, 9.16832333, 9.18397468,
0.91296342, 4.63652725, 5.02216335, 3.1366895, 0.47339537,
2.41685637, 0.95529642, 2.38249906, 8.07791086, 8.94978288,
0.43222892, 3.01946836, 9.80582199, 5.39504823, 6.26309362,
0.05545408, 4.84909443, 9.88328535, 3.75185527, 0.97038159,
4.61908762, 9.63004466, 3.41830614, 7.98922733, 7.98846331,
2.08248297, 4.43367702, 7.15601275, 4.10519785, 1.91006955,
9.67494307, 6.50750366, 8.65459852, 2.52423578e-01, 2.66905815,
5.02071100, 6.74486351e-01, 9.93033261, 2.36462396, 3.74292182,
2.14011915, 1.05445866, 2.32479786, 3.00610136, 6.34442268,
2.81234781, 3.62276761, 5.94284372e-02, 3.65719126, 5.33885982,
1.62015837, 5.97433108, 2.93152469, 6.32050495, 2.61966053e-01,
8.87593460, 1.61186304e-01, 1.26958031, 7.77162462, 4.58952322e-01,
7.10998694, 9.71046141, 8.71682933, 7.10161651, 9.58509743,
4.29813338, 8.72878914, 3.55957668, 9.29763653, 1.48777656,
9.40029015, 8.32716197, 8.46054838, 1.23923010, 5.96486898,
1.63924809e-01, 7.21184366, 7.73751413e-02, 8.48222774e-01, 2.25498410,
8.75124534, 3.63576318, 5.39959935, 5.68103214, 2.25463360,
5.72146768, 6.60951795, 2.98245393, 4.18626859, 4.53088925,
9.32350662, 5.87493747, 9.48252372, 5.56034754, 5.00561421,
3.53221097e-02, 4.80889044, 9.27454999, 1.98365689, 5.20911344e-01,
4.06778893, 3.72396481, 8.57153058, 2.66111156e-01, 9.20149230,
6.80902999, 9.04225994, 6.07529071, 8.11953312, 3.35543874,
3.49566228, 3.89874230, 7.54797082, 3.69291174, 2.42219806,
9.37668357, 9.08011084, 3.48797316, 6.34638070, 2.73842212,
2.06115129, 3.36339529, 3.27099893, 8.82276101, 8.22303815,
7.09623229, 9.59345225, 4.22543353, 2.45033039, 1.17398437,
3.01053358, 1.45263734, 9.21860974e-01, 6.02932197, 3.64187450,
5.64570343, 1.91335721, 6.76905860, 2.15505447, 2.78023594,
7.41760422, 5.59737896, 3.34836413, 5.42988783, 6.93984703,
9.12132121, 5.80713213, 2.32686379, 7.46697631, 7.77769018,
2.00401315, 8.20574220, 4.64934855, 7.79766662, 2.37478220,
3.32580270, 9.53697119, 6.57815073, 7.72877831, 6.88374343,
2.04304118, 4.70688748, 8.08963873, 6.75035127, 6.02788565e-02,
8.74077427e-01, 3.46794720, 9.44365540, 4.91190481, 2.70176267,
3.60423719, 2.10652628, 4.21200057, 2.18035440, 8.45752507,
4.56270599, 2.79802018, 9.32891648, 3.14351354, 9.09714662,
4.34180910e-01, 7.07115060, 4.83889039, 4.44221061, 3.63233444e-01,
4.06831905e-01, 3.32753617, 9.47119540, 6.17659977, 3.68874842,
6.11977039, 2.06131536, 1.65066443, 3.61817266, 8.63353352,
5.09401727, 2.96901516, 9.50251625, 8.15966090, 3.22973943,
9.72098245, 9.87351098, 4.08660134, 6.55923103, 4.05653198,
2.57348106, 8.26526760e-01, 2.63610346, 2.71479854, 3.98639080,
1.84886031, 9.53818403, 1.02879885, 6.25208533, 4.41697388,
4.23518049, 3.71991783, 8.68314710, 2.80476981, 2.05761574e-01,
9.18097016, 8.64480278, 2.76901790, 5.23487548, 1.09088197,
9.34270688e-01, 8.37466108, 4.10265718, 6.61716540, 9.43200558,
2.45130592, 1.31598313e-01, 2.41484058e-01, 7.09385692, 9.24551885,
4.67330273, 3.75109148, 5.42860425, 8.58916838, 6.52153874,
2.32979897, 7.74580205, 1.34613497, 1.65559971, 6.12682283,
2.38783406, 7.04778548, 3.49518527, 2.77423960, 9.98918406,
4.06161246e-01, 6.45822522, 3.86995850e-01, 7.60210258, 2.30089957,
8.98318671e-01, 6.48449712, 7.32601217, 6.78095315, 5.19009471e-01,
2.94306946, 4.51088346, 2.87103290, 8.10513456, 1.31115105,
6.12179362, 9.88214944, 9.02556539, 2.22157062, 8.18876137e-04,
9.80597342, 8.82712985, 9.19472466, 4.15503551, 7.44615462])
y = np.ones_like(t)
dy = np.array([
0.00606416, 0.00696152, 0.00925774, 0.00563806, 0.00946933,
0.00748254, 0.00713048, 0.00652823, 0.00958424, 0.00758812,
0.00902013, 0.00928826, 0.00961191, 0.0065169, 0.00669905,
0.00797537, 0.00720662, 0.00966421, 0.00698782, 0.00738889,
0.00808593, 0.0070237, 0.00996239, 0.00549426, 0.00610302,
0.00661328, 0.00573861, 0.0064211, 0.00889623, 0.00761446,
0.00516977, 0.00991311, 0.00808003, 0.0052947, 0.00830584,
0.00689185, 0.00567837, 0.00781832, 0.0086354, 0.00835563,
0.00623757, 0.00762433, 0.00768832, 0.00858402, 0.00679934,
0.00898866, 0.00813961, 0.00519166, 0.0077324, 0.00930956,
0.00783787, 0.00587914, 0.00755188, 0.00878473, 0.00555053,
0.0090855, 0.00583741, 0.00767038, 0.00692872, 0.00624312,
0.00823716, 0.00518696, 0.00880023, 0.0076347, 0.00937886,
0.00760359, 0.00517517, 0.005718, 0.00897802, 0.00745988,
0.0072094, 0.00659217, 0.00642275, 0.00982943, 0.00716485,
0.00942002, 0.00824082, 0.00929214, 0.00926225, 0.00978156,
0.00848971, 0.00902698, 0.00866564, 0.00802613, 0.00858677,
0.00857875, 0.00520454, 0.00758055, 0.00896326, 0.00621481,
0.00732574, 0.00717493, 0.00701394, 0.0056092, 0.00762856,
0.00723124, 0.00831696, 0.00774707, 0.00513771, 0.00515959,
0.0085068, 0.00853791, 0.0097997, 0.00938352, 0.0073403,
0.00812953, 0.00728591, 0.00611473, 0.00688338, 0.00551942,
0.00833264, 0.00596015, 0.00737734, 0.00983718, 0.00515834,
0.00575865, 0.0064929, 0.00970903, 0.00954421, 0.00581,
0.00990559, 0.00875374, 0.00769989, 0.00965851, 0.00940304,
0.00695658, 0.00828172, 0.00823693, 0.00663484, 0.00589695,
0.00733405, 0.00631641, 0.00677533, 0.00977072, 0.00730569,
0.00842446, 0.00668115, 0.00997931, 0.00829384, 0.00598005,
0.00549092, 0.0097159, 0.00972389, 0.00810664, 0.00508496,
0.00612767, 0.00900638, 0.0093773, 0.00726995, 0.0068276,
0.00637113, 0.00558485, 0.00557872, 0.00976301, 0.00904313,
0.0058239, 0.00603525, 0.00827776, 0.00882332, 0.00905157,
0.00581669, 0.00992064, 0.00613901, 0.00794708, 0.00793808,
0.00983681, 0.00828834, 0.00792452, 0.00759386, 0.00882329,
0.00553028, 0.00501046, 0.00976244, 0.00749329, 0.00664168,
0.00684027, 0.00901922, 0.00691185, 0.00885085, 0.00720231,
0.00922039, 0.00538102, 0.00740564, 0.00733425, 0.00632164,
0.00971807, 0.00952514, 0.00721798, 0.0054858, 0.00603392,
0.00635746, 0.0074211, 0.00669189, 0.00887068, 0.00738013,
0.00935185, 0.00997891, 0.00609918, 0.00805836, 0.00923751,
0.00972618, 0.00645043, 0.00863521, 0.00507508, 0.00939571,
0.00531969, 0.00866698, 0.00997305, 0.00750595, 0.00604667,
0.00797322, 0.00812075, 0.00834036, 0.00586306, 0.00949356,
0.00810496, 0.00521784, 0.00842021, 0.00598042, 0.0051367,
0.00775477, 0.00906657, 0.00929971, 0.0055176, 0.00831521,
0.00855038, 0.00647258, 0.00985682, 0.00639344, 0.00534991,
0.0075964, 0.00847157, 0.0062233, 0.00669291, 0.00781814,
0.00943339, 0.00873663, 0.00604796, 0.00625889, 0.0076194,
0.00884479, 0.00809381, 0.00750662, 0.00798563, 0.0087803,
0.0076854, 0.00948876, 0.00973534, 0.00957677, 0.00877259,
0.00623161, 0.00692636, 0.0064, 0.0082883, 0.00662111,
0.00877196, 0.00556755, 0.00887682, 0.00792951, 0.00917694,
0.00715438, 0.00812482, 0.00777206, 0.00987836, 0.00877737,
0.00772407, 0.00587016, 0.00952057, 0.00602919, 0.00825022,
0.00968236, 0.0061179, 0.00612962, 0.00925909, 0.00913828,
0.00675852, 0.00632548, 0.00563694, 0.00993968, 0.00917672,
0.00949696, 0.0075684, 0.00557192, 0.0052629, 0.00665291,
0.00960165, 0.00973791, 0.00920582, 0.0057934, 0.00709962,
0.00623121, 0.00602675, 0.00842413, 0.00743056, 0.00662455,
0.00550107, 0.00772382, 0.00673513, 0.00695548, 0.00655254,
0.00693598, 0.0077793, 0.00507072, 0.00923823, 0.0096096,
0.00775265, 0.00634011, 0.0099512, 0.00691597, 0.00846828,
0.00844976, 0.00717155, 0.00599579, 0.0098329, 0.00531845,
0.00742575, 0.00610365, 0.00646987, 0.00914264, 0.00683633,
0.00541674, 0.00598155, 0.00930187, 0.00988514, 0.00633991,
0.00837704, 0.00540599, 0.00861733, 0.00708218, 0.0095908,
0.00655768, 0.00970733, 0.00751624, 0.00674446, 0.0082351,
0.00624873, 0.00614882, 0.00598173, 0.0097995, 0.00746457,
0.00875807, 0.00736996, 0.0079377, 0.00792069, 0.00989943,
0.00834217, 0.00619885, 0.00507599, 0.00609341, 0.0072776,
0.0069671, 0.00906163, 0.00892778, 0.00544548, 0.00976005,
0.00763728, 0.00798202, 0.00702528, 0.0082475, 0.00935663,
0.00836968, 0.00985049, 0.00850561, 0.0091086, 0.0052252,
0.00836349, 0.00827376, 0.00550873, 0.00921194, 0.00807086,
0.00549164, 0.00797234, 0.00739208, 0.00616647, 0.00509878,
0.00682784, 0.00809926, 0.0066464, 0.00653627, 0.00875561,
0.00879312, 0.00859383, 0.00550591, 0.00758083, 0.00778899,
0.00872402, 0.00951589, 0.00684519, 0.00714332, 0.00866384,
0.00831318, 0.00778935, 0.0067507, 0.00597676, 0.00591904,
0.00540792, 0.005406, 0.00922899, 0.00691836, 0.0053037,
0.00948213, 0.00611635, 0.00634062, 0.00597249, 0.00983751,
0.0055627, 0.00861082, 0.00966044, 0.00834001, 0.00929363,
0.00621224, 0.00836964, 0.00850436, 0.00729166, 0.00935273,
0.00847193, 0.00947439, 0.00876602, 0.00760145, 0.00749344,
0.00726864, 0.00510823, 0.00767571, 0.00711487, 0.00578767,
0.00559535, 0.00724676, 0.00519957, 0.0099329, 0.0068906,
0.00691055, 0.00525563, 0.00713336, 0.00507873, 0.00515047,
0.0066955, 0.00910484, 0.00729411, 0.0050742, 0.0058161,
0.00869961, 0.00869147, 0.00877261, 0.00675835, 0.00676138,
0.00901038, 0.00699069, 0.00863596, 0.00790562, 0.00682171,
0.00540003, 0.00558063, 0.00944779, 0.0072617, 0.00997002,
0.00681948, 0.00624977, 0.0067527, 0.00671543, 0.00818678,
0.00506369, 0.00881634, 0.00708207, 0.0071612, 0.00740558,
0.00724606, 0.00748735, 0.00672952, 0.00726673, 0.00702326,
0.00759121, 0.00811635, 0.0062052, 0.00754219, 0.00797311,
0.00508474, 0.00760247, 0.00619647, 0.00702269, 0.00913265,
0.00663118, 0.00741608, 0.00512371, 0.00654375, 0.00819861,
0.00657581, 0.00602899, 0.00645328, 0.00977189, 0.00543401,
0.00731679, 0.00529193, 0.00769329, 0.00573018, 0.00817042,
0.00632199, 0.00845458, 0.00673573, 0.00502084, 0.00647447])
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
randn_arr = np.array([
-1.00326528e-02, -8.45644428e-01, 9.11460610e-01, -1.37449688e+00,
-5.47065645e-01, -7.55266106e-05, -1.21166803e-01, -2.00858547e+00,
-9.20646543e-01, 1.68234342e-01, -1.31989156e+00, 1.26642930e+00,
4.95180889e-01, -5.14240391e-01, -2.20292465e-01, 1.86156412e+00,
9.35988451e-01, 3.80219145e-01, -1.41551877e+00, 1.62961132e+00,
1.05240107e+00, -1.48405388e-01, -5.49698069e-01, -1.87903939e-01,
-1.20193668e+00, -4.70785558e-01, 7.63160514e-01, -1.80762128e+00,
-3.14074374e-01, 1.13755973e-01, 1.03568037e-01, -1.17893695e+00,
-1.18215289e+00, 1.08916538e+00, -1.22452909e+00, 1.00865096e+00,
-4.82365315e-01, 1.07979635e+00, -4.21078505e-01, -1.16647132e+00,
8.56554856e-01, -1.73912222e-02, 1.44857659e+00, 8.92200085e-01,
-2.29426629e-01, -4.49667602e-01, 2.33723433e-02, 1.90210018e-01,
-8.81748527e-01, 8.41939573e-01, -3.97363492e-01, -4.23027745e-01,
-5.40688337e-01, 2.31017267e-01, -6.92052602e-01, 1.34970110e-01,
2.76660307e+00, -5.36094601e-02, -4.34004738e-01, -1.66768923e+00,
5.02219248e-02, -1.10923094e+00, -3.75558119e-01, 1.51607594e-01,
-1.73098945e+00, 1.57462752e-01, 3.04515175e-01, -1.29710002e+00,
-3.92309192e-01, -1.83066636e+00, 1.57550094e+00, 3.30563277e-01,
-1.79588501e-01, -1.63435831e-01, 1.13144361e+00, -9.41655519e-02,
3.30816771e-01, 1.51862956e+00, -3.46167148e-01, -1.09263532e+00,
-8.24500575e-01, 1.42866383e+00, 9.14283085e-02, -5.02331288e-01,
9.73644380e-01, 9.97957386e-01, -4.75647768e-01, -9.71936837e-01,
-1.57052860e+00, -1.79388892e+00, -2.64986452e-01, -8.93195947e-01,
1.85847441e+00, 5.85377547e-02, -1.94214954e+00, 1.41872928e+00,
1.61710309e-01, 7.04979480e-01, 6.82034777e-01, 2.96556567e-01,
5.23342630e-01, 2.38760672e-01, -1.10638591e+00, 3.66732198e-01,
1.02390550e+00, -2.10056413e-01, 5.51302218e-01, 4.19589145e-01,
1.81565206e+00, -2.52750301e-01, -2.92004163e-01, -1.16931740e-01,
-1.02391075e-01, -2.27261771e+00, -6.42609841e-01, 2.99885067e-01,
-8.25651467e-03, -7.99339154e-01, -6.64779252e-01, -3.55613128e-01,
-8.01571781e-01, -5.13050610e-01, -5.39390119e-01, 8.95370847e-01,
1.01639127e+00, 9.33585094e-01, 4.26701799e-01, -7.08322484e-01,
9.59830450e-01, -3.14250587e-01, 2.30522083e-02, 1.33822053e+00,
8.39928561e-02, 2.47284030e-01, -1.41277949e+00, 4.87009294e-01,
-9.80006647e-01, 1.01193966e+00, -1.84599177e-01, -2.23616884e+00,
-3.58020103e-01, -2.28034538e-01, 4.85475226e-01, 6.70512391e-01,
-3.27764245e-01, 1.01286819e+00, -3.16705533e+00, -7.13988998e-01,
-1.11236427e+00, -1.25418351e+00, 9.59706371e-01, 8.29170399e-01,
-7.75770020e-01, 1.17805700e+00, 1.01466892e-01, -4.21684101e-01,
-6.92922796e-01, -7.78271726e-01, 4.72774857e-01, 6.50154901e-01,
2.38501212e-01, -2.05021768e+00, 2.96358656e-01, 5.65396564e-01,
-6.69205605e-01, 4.32505429e-02, -1.86388430e+00, -1.22996906e+00,
-3.24235348e-01, -3.09751144e-01, 3.51679372e-01, -1.18692539e+00,
-3.41206065e-01, -4.89779780e-01, 5.28010474e-01, 1.42104277e+00,
1.72092032e+00, -1.56844005e+00, -4.80141918e-02, -1.11252931e+00,
-6.47449515e-02, 4.22919280e-01, 8.14908987e-02, -4.90116988e-02,
1.48303917e+00, 7.20989392e-01, -2.72654462e-01, 2.42113609e-02,
8.70897807e-01, 6.09790506e-01, -4.25076104e-01, -1.77524284e+00,
-1.18465749e+00, 1.45979225e-01, -1.78652685e+00, -1.52394498e-01,
-4.53569176e-01, 9.99252803e-01, -1.31804382e+00, -1.93176898e+00,
-4.19640742e-01, 6.34763132e-01, 1.06991860e+00, -9.09327017e-01,
4.70263748e-01, -1.11143045e+00, -7.48827466e-01, 5.67594726e-01,
7.18150543e-01, -9.99380749e-01, 4.74898323e-01, -1.86849981e+00,
-2.02658907e-01, -1.13424803e+00, -8.07699340e-01, -1.27607735e+00,
5.53626395e-01, 5.53874470e-01, -6.91200445e-01, 3.75582306e-01,
2.61272553e-01, -1.28451754e-01, 2.15817020e+00, -8.40878617e-01,
1.43050907e-02, -3.82387029e-01, -3.71780015e-01, 1.59412004e-01,
-2.94395700e-01, -8.60426760e-01, 1.24227498e-01, 1.18233165e+00,
9.42766380e-01, 2.03044488e-01, -7.35396814e-01, 1.86429600e-01,
1.08464302e+00, 1.19118926e+00, 3.59687060e-01, -3.64357200e-01,
-2.02752749e-01, 7.72045927e-01, 6.86346215e-01, -1.75769961e+00,
6.58617565e-01, 7.11288340e-01, -8.87191425e-01, -7.64981116e-01,
-7.57164098e-01, -6.80262803e-01, -1.41674959e+00, 3.13091930e-01,
-7.85719399e-01, -7.03838361e-02, -4.97568783e-01, 2.55177521e-01,
-1.01061704e+00, 2.45265375e-01, 3.89781016e-01, 8.27594585e-01,
1.96776909e+00, -2.09210177e+00, 3.20314334e-01, -7.09162842e-01,
-1.92505867e+00, 8.41630623e-01, 1.33219988e+00, -3.91627710e-01,
2.10916296e-01, -6.40767402e-02, 4.34197668e-01, 8.80535749e-01,
3.44937336e-01, 3.45769929e-01, 1.25973654e+00, -1.64662222e-01,
9.23064571e-01, -8.22000422e-01, 1.60708495e+00, 7.37825392e-01,
-4.03759534e-01, -2.11454815e+00, -3.10717131e-04, -1.18180941e+00,
2.99634603e-01, 1.45116882e+00, 1.60059793e-01, -1.78012614e-01,
3.42205404e-01, 2.85650196e-01, -2.36286411e+00, 2.40936864e-01,
6.20277356e-01, -2.59341634e-01, 9.78559078e-01, -1.27674575e-01,
7.66998762e-01, 2.27310511e+00, -9.63911290e-02, -1.94213217e+00,
-3.36591724e-01, -1.72589000e+00, 6.11237826e-01, 1.30935097e+00,
6.95879662e-01, 3.20308213e-01, -6.44925458e-01, 1.57564975e+00,
7.53276212e-01, 2.84469557e-01, 2.04860319e-01, 1.11627359e-01,
4.52216424e-01, -6.13327179e-01, 1.52524993e+00, 1.52339753e-01,
6.00054450e-01, -4.33567278e-01, 3.74918534e-01, -2.28175243e+00,
-1.11829888e+00, -3.14131532e-02, -1.32247311e+00, 2.43941406e+00,
-1.66808131e+00, 3.45900749e-01, 1.65577315e+00, 4.81287059e-01,
-3.10227553e-01, -5.52144084e-01, 6.73255489e-01, -8.00270681e-01,
-1.19486110e-01, 6.91198606e-01, -3.07879027e-01, 8.75100102e-02,
-3.04086293e-01, -9.69797604e-01, 1.18915048e+00, 1.39306624e+00,
-3.16699954e-01, -2.65576159e-01, -1.77899339e-01, 5.38803274e-01,
-9.05300265e-01, -8.85253056e-02, 2.62959055e-01, 6.42042149e-01,
-2.78083727e+00, 4.03403210e-01, 3.45846762e-01, 1.00772824e+00,
-5.26264015e-01, -5.18353205e-01, 1.20251659e+00, -1.56315671e+00,
1.62909029e+00, 2.55589446e+00, 4.77451685e-01, 8.14098474e-01,
-1.48958171e+00, -6.94559787e-01, 1.05786255e+00, 3.61815347e-01,
-1.81427463e-01, 2.32869132e-01, 5.06976484e-01, -2.93095701e-01,
-2.89459450e-02, -3.63073748e-02, -1.05227898e+00, 3.23594628e-01,
1.80358591e+00, 1.73196213e+00, -1.47639930e+00, 5.70631220e-01,
6.75503781e-01, -4.10510463e-01, -9.64200035e-01, -1.32081431e+00,
-4.44703779e-01, 3.50009137e-01, -1.58058176e-01, -6.10933088e-01,
-1.24915663e+00, 3.50716258e-01, 1.06654245e+00, -9.26921972e-01,
4.48428964e-01, -1.87947524e+00, -6.57466109e-01, 7.29604120e-01,
-1.11776721e+00, -6.04436725e-01, 1.41796683e+00, -7.32843980e-01,
-8.53944819e-01, 5.75848362e-01, 1.95473356e+00, -2.39669947e-01,
7.68735860e-01, 1.34576918e+00, 3.25552163e-01, -2.69917901e-01,
-8.76326739e-01, -1.42521096e+00, 1.11170175e+00, 1.80957146e-01,
1.33280094e+00, 9.88925316e-01, -6.16970520e-01, -1.18688670e+00,
4.12669583e-01, -6.32506884e-01, 3.76689141e-01, -7.31151938e-01,
-8.61225253e-01, -1.40990810e-01, 9.34100620e-01, 3.06539895e-01,
1.17837515e+00, -1.23356170e+00, -1.05707714e+00, -8.91636992e-02,
2.16570138e+00, 6.74286114e-01, -1.06661274e+00, -7.61404530e-02,
2.20714791e-01, -5.68685746e-01, 6.13274991e-01, -1.56446138e-01,
-2.99330718e-01, 1.26025679e+00, -1.70966090e+00, -9.61805342e-01,
-8.17308981e-01, -8.47681070e-01, -7.28753045e-01, 4.88475958e-01,
1.09653283e+00, 9.16041261e-01, -1.01956213e+00, -1.07417899e-01,
4.52265213e-01, 2.40002952e-01, 1.30574740e+00, -6.75334236e-01,
1.56319421e-01, -3.93230715e-01, 2.51075019e-01, -1.07889691e+00,
-9.28937721e-01, -7.30110860e-01, -5.63669311e-01, 1.54792327e+00,
1.17540191e+00, -2.12649671e-01, 1.72933294e-01, -1.59443602e+00,
-1.79292347e-01, 1.59614713e-01, 1.14568421e+00, 3.26804720e-01,
4.32890059e-01, 2.97762890e-01, 2.69001190e-01, -1.39675918e+00,
-4.16757668e-01, 1.43488680e+00, 8.23896443e-01, 4.94234499e-01,
6.67153092e-02, 6.59441396e-01, -9.44889409e-01, -1.58005956e+00,
-3.82086552e-01, 5.37923058e-01, 1.07829882e-01, 1.01395868e+00,
3.51450517e-01, 4.48421962e-02, 1.32748495e+00, 1.13237578e+00,
-9.80913012e-02, -1.10304986e+00, -9.07361492e-01, -1.61451138e-01,
-3.66811384e-01, 1.65776233e+00, -1.68013415e+00, -6.42577869e-02,
-1.06622649e+00, 1.16801869e-01, 3.82264833e-01, -4.04896974e-01,
5.30481414e-01, -1.98626941e-01, -1.79395613e-01, -4.17888725e-01])
y += dy * randn_arr
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.default_rng(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.standard_normal(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert_allclose(results.period[np.argmax(results.power)],
2.000412388152837)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert_allclose(
results.power,
[0.01723948, 0.0643028, 0.1338783, 0.09428816, 0.03577543], rtol=1.1e-7)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(results, model.power(periods, durations,
method="slow",
objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], [9, 7, 7, 7, 8])
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
res = np.abs((stats[k][0]-f*params["depth"]) / stats[k][1])
assert res < 1, f'f={f}, k={k}, res={res}'
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == 'transit_time':
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == 'objective':
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t_model was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t_model was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == 'transit_times':
assert_quantity_allclose((stats1[key] - start).to(u.day), stats2[key], atol=1e-10 * u.day) # noqa: E501
elif key.startswith('depth'):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
with pytest.raises(TypeError) as exc:
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(TypeError) as exc:
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
def test_transit_time_in_range(data):
t, y, dy, params = data
t_ref = 10230.0
t2 = t + t_ref
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(t2, y, dy)
results1 = bls1.autopower(0.16)
results2 = bls2.autopower(0.16)
assert np.allclose(results1.transit_time, results2.transit_time - t_ref)
assert np.all(results1.transit_time >= t.min())
assert np.all(results1.transit_time <= t.max())
assert np.all(results2.transit_time >= t2.min())
assert np.all(results2.transit_time <= t2.max())
|
a9ec4c87cd041e43e1b8c79abdbe42cba080c874ca989c4bd4b1e8aea64630f8 |
import numpy as np
from .mle import design_matrix
def lombscargle_chi2(t, y, dy, frequency, normalization='standard',
fit_mean=True, center_data=True, nterms=1):
"""Lomb-Scargle Periodogram
This implements a chi-squared-based periodogram, which is relatively slow
but useful for validating the faster algorithms in the package.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity``.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy ** -2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
yw = (y - np.dot(w, y)) / dy
else:
yw = y / dy
chi2_ref = np.dot(yw, yw)
# compute the unnormalized model chi2 at each frequency
def compute_power(f):
X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms)
XTX = np.dot(X.T, X)
XTy = np.dot(X.T, yw)
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(f) for f in frequency])
if normalization == 'psd':
p *= 0.5
elif normalization == 'model':
p /= (chi2_ref - p)
elif normalization == 'log':
p = -np.log(1 - p / chi2_ref)
elif normalization == 'standard':
p /= chi2_ref
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
49eb977a7c24b30a58ae2b9c402686bd2d25bc0ecfa0a88f03c77d790b1bb629 | """
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
__all__ = ['lombscargle', 'available_methods']
import numpy as np
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .scipy_impl import lombscargle_scipy
from .chi2_impl import lombscargle_chi2
from .fastchi2_impl import lombscargle_fastchi2
from .cython_impl import lombscargle_cython
METHODS = {'slow': lombscargle_slow,
'fast': lombscargle_fast,
'chi2': lombscargle_chi2,
'scipy': lombscargle_scipy,
'fastchi2': lombscargle_fastchi2,
'cython': lombscargle_cython}
def available_methods():
methods = ['auto', 'slow', 'chi2', 'cython', 'fast', 'fastchi2']
# Scipy required for scipy algorithm (obviously)
try:
import scipy
except ImportError:
pass
else:
methods.append('scipy')
return methods
def _is_regular(frequency):
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = np.diff(frequency)
return np.allclose(diff[0], diff)
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array-like or `~astropy.units.Quantity` ['frequency']
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalar
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not (assume_regular_frequency or _is_regular(frequency)):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def validate_method(method, dy, fit_mean, nterms,
frequency, assume_regular_frequency):
"""
Validate the method argument, and if method='auto'
choose the appropriate method
"""
methods = available_methods()
prefer_fast = (len(frequency) > 200
and (assume_regular_frequency or _is_regular(frequency)))
prefer_scipy = 'scipy' in methods and dy is None and not fit_mean
# automatically choose the appropriate method
if method == 'auto':
if nterms != 1:
if prefer_fast:
method = 'fastchi2'
else:
method = 'chi2'
elif prefer_fast:
method = 'fast'
elif prefer_scipy:
method = 'scipy'
else:
method = 'cython'
if method not in METHODS:
raise ValueError(f"invalid method: {method}")
return method
def lombscargle(t, y, dy=None,
frequency=None,
method='auto',
assume_regular_frequency=False,
normalization='standard',
fit_mean=True, center_data=True,
method_kwds=None, nterms=1):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array-like
sequence of observation times
y : array-like
sequence of observations associated with times t
dy : float or array-like, optional
error or sequence of observational errors associated with times t
frequency : array-like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : str, optional
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool, optional
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard' or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_mean = False`
method_kwds : dict, optional
additional keywords to pass to the lomb-scargle method
nterms : int, optional
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array-like
Lomb-Scargle power associated with each frequency omega
"""
# frequencies should be one-dimensional arrays
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(frequency=frequency,
center_data=center_data,
fit_mean=fit_mean,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}))
method = validate_method(method, dy=dy, fit_mean=fit_mean, nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency)
# scipy doesn't support dy or fit_mean=True
if method == 'scipy':
if kwds.pop('fit_mean'):
raise ValueError("scipy method does not support fit_mean=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports "
"uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith('fast'):
f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'),
assume_regular_frequency)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith('chi2'):
if kwds.pop('nterms') != 1:
raise ValueError("nterms != 1 only supported with 'chi2' "
"or 'fastchi2' methods")
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape)
|
62848bb455ac328bdafd729b73570dc561bea8f794b3f9f29c1a084d63b87e87 | """Various implementations of the Lomb-Scargle Periodogram"""
from .main import lombscargle, available_methods
from .chi2_impl import lombscargle_chi2
from .scipy_impl import lombscargle_scipy
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .fastchi2_impl import lombscargle_fastchi2
|
a5ca3b3032782e5c0cb30f02b4dc637ed9819c287934655e2ca179e39e7691c0 |
import numpy as np
from .utils import trig_sum
def lombscargle_fast(t, y, dy, f0, df, Nf,
center_data=True, fit_mean=True,
normalization='standard',
use_fft=True, trig_sum_kwds=None):
"""Fast Lomb-Scargle Periodogram
This implements the Press & Rybicki method [1]_ for fast O[N log(N)]
Lomb-Scargle periodograms.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_mean : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
trig_sum_kwds : dict or None, optional
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Returns
-------
power : ndarray
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipes in C (2002)
"""
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy ** -2.0
w /= w.sum()
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_mean:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# ----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_mean:
S, C = trig_sum(t, w, **kwargs)
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
else:
tan_2omega_tau = S2 / C2
# This is what we're computing below; the straightforward way is slower
# and less stable, so we use trig identities instead
#
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
# ----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_mean:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
power = (YC * YC / CC + YS * YS / SS)
if normalization == 'standard':
power /= YY
elif normalization == 'model':
power /= YY - power
elif normalization == 'log':
power = -np.log(1 - power / YY)
elif normalization == 'psd':
power *= 0.5 * (dy ** -2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return power
|
c8bc6deedebd38ef58563d67324541bd3f62d9d5affea2637f813d6dedc97564 |
import numpy as np
def lombscargle_scipy(t, y, frequency, normalization='standard',
center_data=True):
"""Lomb-Scargle Periodogram
This is a wrapper of ``scipy.signal.lombscargle`` for computation of the
Lomb-Scargle periodogram. This is a relatively fast version of the naive
O[N^2] algorithm, but cannot handle heteroskedastic errors.
Parameters
----------
t, y: array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data.
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. 1982, ApJ 263:835-853
"""
try:
from scipy import signal
except ImportError:
raise ImportError("scipy must be installed to use lombscargle_scipy")
t, y = np.broadcast_arrays(t, y)
# Scipy requires floating-point input
t = np.asarray(t, dtype=float)
y = np.asarray(y, dtype=float)
frequency = np.asarray(frequency, dtype=float)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
if center_data:
y = y - y.mean()
# Note: scipy input accepts angular frequencies
p = signal.lombscargle(t, y, 2 * np.pi * frequency)
if normalization == 'psd':
pass
elif normalization == 'standard':
p *= 2 / (t.size * np.mean(y ** 2))
elif normalization == 'log':
p = -np.log(1 - 2 * p / (t.size * np.mean(y ** 2)))
elif normalization == 'model':
p /= 0.5 * t.size * np.mean(y ** 2) - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
7d67d1ccb9c0dc9eafa04ea381f1ec7c6bf7b31b29e403b47fb01a412a786506 | from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array-like
array of abscissas
y : array-like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.default_rng(0)
>>> x = 100 * rng.random(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums::
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array-like
array of input times
h : array-like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float, optional
The low frequency to use
freq_factor : float, optional
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarray
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
|
f741bb90fbeefc997e89afb9bd8369f04e4bfc5fc47a95a1fed611da71e52839 |
import numpy as np
def lombscargle_slow(t, y, dy, frequency, normalization='standard',
fit_mean=True, center_data=True):
"""Lomb-Scargle Periodogram
This is a pure-python implementation of the original Lomb-Scargle formalism
(e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_)
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
frequency : array-like
frequencies (not angular frequencies) at which to calculate periodogram
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] W. Press et al, Numerical Recipes in C (2002)
.. [2] Scargle, J.D. 1982, ApJ 263:835-853
.. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
"""
if dy is None:
dy = 1
t, y, dy = np.broadcast_arrays(t, y, dy)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if frequency.ndim != 1:
raise ValueError("frequency should be one-dimensional")
w = dy ** -2.0
w /= w.sum()
# if fit_mean is true, centering the data now simplifies the math below.
if fit_mean or center_data:
y = y - np.dot(w, y)
omega = 2 * np.pi * frequency
omega = omega.ravel()[np.newaxis, :]
# make following arrays into column vectors
t, y, dy, w = map(lambda x: x[:, np.newaxis], (t, y, dy, w))
sin_omega_t = np.sin(omega * t)
cos_omega_t = np.cos(omega * t)
# compute time-shift tau
# S2 = np.dot(w.T, np.sin(2 * omega * t)
S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t)
# C2 = np.dot(w.T, np.cos(2 * omega * t)
C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t ** 2)
if fit_mean:
S = np.dot(w.T, sin_omega_t)
C = np.dot(w.T, cos_omega_t)
S2 -= (2 * S * C)
C2 -= (C * C - S * S)
# compute components needed for the fit
omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2)
sin_omega_t_tau = np.sin(omega_t_tau)
cos_omega_t_tau = np.cos(omega_t_tau)
Y = np.dot(w.T, y)
wy = w * y
YCtau = np.dot(wy.T, cos_omega_t_tau)
YStau = np.dot(wy.T, sin_omega_t_tau)
CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau)
SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau)
if fit_mean:
Ctau = np.dot(w.T, cos_omega_t_tau)
Stau = np.dot(w.T, sin_omega_t_tau)
YCtau -= Y * Ctau
YStau -= Y * Stau
CCtau -= Ctau * Ctau
SStau -= Stau * Stau
p = (YCtau * YCtau / CCtau + YStau * YStau / SStau)
YY = np.dot(w.T, y * y)
if normalization == 'standard':
p /= YY
elif normalization == 'model':
p /= YY - p
elif normalization == 'log':
p = -np.log(1 - p / YY)
elif normalization == 'psd':
p *= 0.5 * (dy ** -2.0).sum()
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p.ravel()
|
303781ef1c7e09c552c66a6e7fb6190359355c9ad217858fe51df4afb0531cf6 |
import numpy as np
from .utils import trig_sum
def lombscargle_fastchi2(t, y, dy, f0, df, Nf, normalization='standard',
fit_mean=True, center_data=True, nterms=1,
use_fft=True, trig_sum_kwds=None):
"""Lomb-Scargle Periodogram
This implements a fast chi-squared periodogram using the algorithm
outlined in [4]_. The result is identical to the standard Lomb-Scargle
periodogram. The advantage of this algorithm is the
ability to compute multiterm periodograms relatively quickly.
Parameters
----------
t, y, dy : array-like
times, values, and errors of the data points. These should be
broadcastable to the same shape. None should be `~astropy.units.Quantity`.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
normalization : str, optional
Normalization to use for the periodogram.
Options are 'standard', 'model', 'log', or 'psd'.
fit_mean : bool, optional
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool, optional
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if ``fit_mean = False``
nterms : int, optional
Number of Fourier terms in the fit
Returns
-------
power : array-like
Lomb-Scargle power associated with each frequency.
Units of the result depend on the normalization.
References
----------
.. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [2] W. Press et al, Numerical Recipes in C (2002)
.. [3] Scargle, J.D. ApJ 263:835-853 (1982)
.. [4] Palmer, J. ApJ 695:496-502 (2009)
"""
if nterms == 0 and not fit_mean:
raise ValueError("Cannot have nterms = 0 without fitting bias")
if dy is None:
dy = 1
# Validate and setup input data
t, y, dy = np.broadcast_arrays(t, y, dy)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
# Validate and setup frequency grid
if f0 < 0:
raise ValueError("Frequencies must be positive")
if df <= 0:
raise ValueError("Frequency steps must be positive")
if Nf <= 0:
raise ValueError("Number of frequencies must be positive")
w = dy ** -2.0
ws = np.sum(w)
# if fit_mean is true, centering the data now simplifies the math below.
if center_data or fit_mean:
y = y - np.dot(w, y) / ws
yw = y / dy
chi2_ref = np.dot(yw, yw)
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
# Here we build-up the matrices XTX and XTy using pre-computed
# sums. The relevant identities are
# 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x
# 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x
# 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x
yws = np.sum(y * w)
SCw = [(np.zeros(Nf), ws * np.ones(Nf))]
SCw.extend([trig_sum(t, w, freq_factor=i, **kwargs)
for i in range(1, 2 * nterms + 1)])
Sw, Cw = zip(*SCw)
SCyw = [(np.zeros(Nf), yws * np.ones(Nf))]
SCyw.extend([trig_sum(t, w * y, freq_factor=i, **kwargs)
for i in range(1, nterms + 1)])
Syw, Cyw = zip(*SCyw)
# Now create an indexing scheme so we can quickly
# build-up matrices at each frequency
order = [('C', 0)] if fit_mean else []
order.extend(sum([[('S', i), ('C', i)]
for i in range(1, nterms + 1)], []))
funcs = dict(S=lambda m, i: Syw[m][i],
C=lambda m, i: Cyw[m][i],
SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]),
CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]),
SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i]
+ Sw[m + n][i]),
CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i]
+ Sw[n + m][i]))
def compute_power(i):
XTX = np.array([[funcs[A[0] + B[0]](A[1], B[1], i)
for A in order]
for B in order])
XTy = np.array([funcs[A[0]](A[1], i) for A in order])
return np.dot(XTy.T, np.linalg.solve(XTX, XTy))
p = np.array([compute_power(i) for i in range(Nf)])
if normalization == 'psd':
p *= 0.5
elif normalization == 'standard':
p /= chi2_ref
elif normalization == 'log':
p = -np.log(1 - p / chi2_ref)
elif normalization == 'model':
p /= chi2_ref - p
else:
raise ValueError(f"normalization='{normalization}' not recognized")
return p
|
e6f9d6e8b8ff4757f20ca6bdb39b6f95447f534ee2c384ca80129545eae700b7 |
import numpy as np
def design_matrix(t, frequency, dy=None, bias=True, nterms=1):
"""Compute the Lomb-Scargle design matrix at the given frequency
This is the matrix X such that the periodic model at the given frequency
can be expressed :math:`\\hat{y} = X \\theta`.
Parameters
----------
t : array-like, shape=(n_times,)
times at which to compute the design matrix
frequency : float
frequency for the design matrix
dy : float or array-like, optional
data uncertainties: should be broadcastable with `t`
bias : bool (default=True)
If true, include a bias column in the matrix
nterms : int (default=1)
Number of Fourier terms to include in the model
Returns
-------
X : ndarray, shape=(n_times, n_parameters)
The design matrix, where n_parameters = bool(bias) + 2 * nterms
"""
t = np.asarray(t)
frequency = np.asarray(frequency)
if t.ndim != 1:
raise ValueError("t should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency must be a scalar")
if nterms == 0 and not bias:
raise ValueError("cannot have nterms=0 and no bias")
if bias:
cols = [np.ones_like(t)]
else:
cols = []
for i in range(1, nterms + 1):
cols.append(np.sin(2 * np.pi * i * frequency * t))
cols.append(np.cos(2 * np.pi * i * frequency * t))
XT = np.vstack(cols)
if dy is not None:
XT /= dy
return np.transpose(XT)
def periodic_fit(t, y, dy, frequency, t_fit,
center_data=True, fit_mean=True, nterms=1):
"""Compute the Lomb-Scargle model fit at a given frequency
Parameters
----------
t, y, dy : float or array-like
The times, observations, and uncertainties to fit
frequency : float
The frequency at which to compute the model
t_fit : float or array-like
The times at which the fit should be computed
center_data : bool (default=True)
If True, center the input data before applying the fit
fit_mean : bool (default=True)
If True, include the bias as part of the model
nterms : int (default=1)
The number of Fourier terms to include in the fit
Returns
-------
y_fit : ndarray
The model fit evaluated at each value of t_fit
"""
t, y, frequency = map(np.asarray, (t, y, frequency))
if dy is None:
dy = np.ones_like(y)
else:
dy = np.asarray(dy)
t_fit = np.asarray(t_fit)
if t.ndim != 1:
raise ValueError("t, y, dy should be one dimensional")
if t_fit.ndim != 1:
raise ValueError("t_fit should be one dimensional")
if frequency.ndim != 0:
raise ValueError("frequency should be a scalar")
if center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
y = (y - y_mean)
else:
y_mean = 0
X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms)
theta_MLE = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms)
return y_mean + np.dot(X_fit, theta_MLE)
|
Subsets and Splits