hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
eaeeec226210e8b1f8a0a1ecbf9253b1b05b1701cce94c7015386dbb821ab1ef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, FK5, Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_nodata(tmpdir):
tree = {'coord': ICRS()}
assert_roundtrip_tree(tree, tmpdir)
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir)
def test_fk5_time(tmpdir):
tree = {'coord': FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir)
|
7a3bdf74df05acb0e54be3da94a5b04629f901e4fe317dddd37901814bd54894 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units as u
from astropy.coordinates.angles import Longitude, Latitude
from astropy.coordinates.earth import EarthLocation, ELLIPSOIDS
@pytest.fixture
def position():
lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
return lon, lat, h
def test_earthlocation_quantity(tmpdir):
location = EarthLocation(lat=34.4900*u.deg, lon=-104.221800*u.deg,
height=40*u.km)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
def test_earthlocation(position, tmpdir):
x, y, z = EarthLocation.from_geodetic(*position).to_geocentric()
geocentric = EarthLocation(x, y, z)
tree = dict(location=geocentric)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_earthlocation_geodetic(position, ellipsoid, tmpdir):
location = EarthLocation.from_geodetic(*position, ellipsoid=ellipsoid)
tree = dict(location=location)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.remote_data
def test_earthlocation_site(tmpdir):
keck = EarthLocation.of_site('Keck Observatory')
tree = dict(location=keck)
assert_roundtrip_tree(tree, tmpdir)
|
367474b6e232c5daa0b12b17022b9af5d9efed9ae2d354bc202ee475829f8345 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord, ICRS, Galactic, FK4, FK5, Longitude
asdf = pytest.importorskip('asdf')
from asdf.tests.helpers import assert_roundtrip_tree
# These tests are cribbed directly from the Examples section of
# http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html
def test_scalar_skycoord(tmpdir):
c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_vector_skycoord(tmpdir):
c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_fk4(tmpdir):
coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.parametrize('coord', [
SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic), # Units from string
SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
])
def test_skycoord_galactic(coord, tmpdir):
tree = dict(coord=coord)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_ra_dec(tmpdir):
ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
c = SkyCoord(ra, dec, frame='icrs')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_override_defaults(tmpdir):
c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_cartesian(tmpdir):
c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
representation_type='cartesian')
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
def test_skycoord_vector_frames(tmpdir):
c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_radial_velocity(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.xfail(reason='Velocities are not properly serialized yet')
def test_skycoord_proper_motion(tmpdir):
c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr,
pm_dec=1*u.mas/u.yr)
tree = dict(coord=c)
assert_roundtrip_tree(tree, tmpdir)
@pytest.mark.skip(reason='Apparent loss of precision during serialization')
def test_skycoord_extra_attribute(tmpdir):
sc = SkyCoord(10*u.deg, 20*u.deg, equinox="2011-01-01T00:00", frame="fk4")
tree = dict(coord=sc.transform_to("icrs"))
def check_asdf(asdffile):
assert hasattr(asdffile['coord'], 'equinox')
assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check_asdf)
def test_skycoord_2d_obstime(tmpdir):
sc = SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
tree = dict(coord=sc)
assert_roundtrip_tree(tree, tmpdir)
|
f15ab23ecdf72f94ca08ce2d7dc48bd536479d1e2f70eb4f3af100c03c3a98fd | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
import astropy.units as u
from asdf.tests.helpers import assert_roundtrip_tree
from astropy.coordinates import Longitude, Latitude, Angle
from astropy.io.misc.asdf.extension import AstropyExtension
def test_angle(tmpdir):
tree = {'angle': Angle(100, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_latitude(tmpdir):
tree = {'angle': Latitude(10, u.deg)}
assert_roundtrip_tree(tree, tmpdir)
def test_longitude(tmpdir):
tree = {'angle': Longitude(-100, u.deg, wrap_angle=180*u.deg)}
assert_roundtrip_tree(tree, tmpdir)
|
dd88b4633012d96651c2f844d03c7ce49dcdf79c1844cdc7aa1f93fe8876cc1f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from numpy.random import random, randint
import astropy.units as u
from astropy.coordinates import Angle
import astropy.coordinates.representation as r
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
from asdf.tests.helpers import assert_roundtrip_tree
@pytest.fixture(params=filter(lambda x: "Base" not in x, r.__all__))
def representation(request):
rep = getattr(r, request.param)
angle_unit = u.deg
other_unit = u.km
kwargs = {}
arr_len = randint(1, 100)
for aname, atype in rep.attr_classes.items():
if issubclass(atype, Angle):
value = ([random()] * arr_len) * angle_unit
else:
value = ([random()] * arr_len) * other_unit
kwargs[aname] = value
return rep(**kwargs)
def test_representations(tmpdir, representation):
tree = {'representation': representation}
assert_roundtrip_tree(tree, tmpdir)
|
6e2c2e404224d1e43e86d4095f0c66bb81c14758b87306149a32d49da1be5637 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units as u
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
568d1428ee497b99e9007ae3c7cc0cbdf7c48feb9d74fef57ca90e5ab0f6b15f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
from astropy import __minimum_asdf_version__
asdf = pytest.importorskip('asdf', minversion=__minimum_asdf_version__)
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
d751cdce980422500df4f5aedaa4c16eb1fb4aa4ef42325f58e1cc36ebb73327 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
from astropy import units as u
from astropy.units import equivalencies as eq
from astropy.cosmology import Planck15
asdf = pytest.importorskip('asdf', minversion='2.3.0.dev0')
from asdf.tests import helpers
def get_equivalencies():
"""
Return a list of example equivalencies for testing serialization.
"""
return [eq.plate_scale(.3 * u.deg/u.mm), eq.pixel_scale(.5 * u.deg/u.pix),
eq.spectral_density(3500 * u.Angstrom, factor=2),
eq.spectral_density(3500 * u.Angstrom), eq.spectral(),
eq.brightness_temperature(500 * u.GHz),
eq.brightness_temperature(500 * u.GHz, beam_area=23 * u.sr),
eq.with_H0(), eq.temperature_energy(), eq.temperature(),
eq.thermodynamic_temperature(300 * u.Hz),
eq.thermodynamic_temperature(140 * u.GHz, Planck15.Tcmb0),
eq.beam_angular_area(3 * u.sr), eq.mass_energy(),
eq.molar_mass_amu(), eq.doppler_relativistic(2 * u.m),
eq.doppler_optical(2 * u.nm), eq.doppler_radio(2 * u.Hz),
eq.parallax(), eq.logarithmic(), eq.dimensionless_angles(),
eq.spectral() + eq.temperature(),
(eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr)),
(eq.spectral() + eq.spectral_density(35 * u.nm) +
eq.brightness_temperature(5 * u.Hz, beam_area=2 * u.sr))
]
@pytest.mark.parametrize('equiv', get_equivalencies())
def test_equivalencies(tmpdir, equiv):
tree = {'equiv': equiv}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
527fd8f57d12fb7031069666b030c1a01e8f33d5cdbddb031afc9c01e7f9b100 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.tests.helper import catch_warnings
from astropy.io.votable import converters
from astropy.io.votable import exceptions
from astropy.io.votable import tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {'pedantic': True,
'filename': 'foo.xml'}
pos = (42, 64)
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config, pos=pos)
c = converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number': 47,
'is_exception': False,
'nchar': 64,
'warning': 'W47',
'is_something': True,
'message': 'Missing arraysize indicates length 1',
'doc_url': 'io/votable/api_exceptions.html#w47',
'nline': 42,
'is_warning': True
}
assert parts == match
|
35d13c41e3d9a40e8d665ffa3644bd6181d24396a0f6e91e41b6a98545286212 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A set of tests for the util.py module
"""
# LOCAL
from astropy.io.votable import util
from astropy.tests.helper import raises
def test_range_list():
assert util.coerce_range_list_param((5,)) == ("5.0", 1)
def test_range_list2():
assert util.coerce_range_list_param((5e-7, 8e-7)) == ("5e-07,8e-07", 2)
def test_range_list3():
assert util.coerce_range_list_param((5e-7, 8e-7, "FOO")) == (
"5e-07,8e-07;FOO", 3)
@raises(ValueError)
def test_range_list4a():
util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"))
def test_range_list4():
assert (util.coerce_range_list_param(
(5e-7, (None, 8e-7), (4, None), (4, 5), "J", "FOO"), numeric=False) ==
("5e-07,/8e-07,4/,4/5,J;FOO", 6))
@raises(ValueError)
def test_range_list5():
util.coerce_range_list_param(('FOO', ))
@raises(ValueError)
def test_range_list6():
print(util.coerce_range_list_param((5, 'FOO'), util.stc_reference_frames))
def test_range_list7():
assert util.coerce_range_list_param(("J",), numeric=False) == ("J", 1)
def test_range_list8():
for s in ["5.0",
"5e-07,8e-07",
"5e-07,8e-07;FOO",
"5e-07,/8e-07,4.0/,4.0/5.0;FOO",
"J"]:
assert util.coerce_range_list_param(s, numeric=False)[0] == s
@raises(ValueError)
def test_range_list9a():
util.coerce_range_list_param("52,-27.8;FOO", util.stc_reference_frames)
def test_range_list9():
assert util.coerce_range_list_param(
"52,-27.8;GALACTIC", util.stc_reference_frames)
|
4b9de501488314c791ec32d65ea7523d392ccdea5f261eee505264a9c10e72f3 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a set of regression tests for vo.
"""
# STDLIB
import difflib
import io
import pathlib
import sys
import gzip
from unittest import mock
# THIRD-PARTY
import pytest
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable.table import parse, parse_single_table, validate
from astropy.io.votable import tree
from astropy.io.votable.exceptions import VOTableSpecError, VOWarning
from astropy.io.votable.xmlutil import validate_schema
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_filenames
from astropy.tests.helper import raises, catch_warnings
# Determine the kind of float formatting in this build of Python
if hasattr(sys, 'float_repr_style'):
legacy_float_repr = (sys.float_repr_style == 'legacy')
else:
legacy_float_repr = sys.platform.startswith('win')
def assert_validate_schema(filename, version):
if sys.platform.startswith('win'):
return
try:
rc, stdout, stderr = validate_schema(filename, version)
except OSError:
# If xmllint is not installed, we want the test to pass anyway
return
assert rc == 0, 'File did not validate against VOTable schema'
def test_parse_single_table():
table = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
assert isinstance(table, tree.Table)
assert len(table.array) == 5
def test_parse_single_table2():
table2 = parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=1,
pedantic=False)
assert isinstance(table2, tree.Table)
assert len(table2.array) == 1
assert len(table2.array.dtype.names) == 28
@raises(IndexError)
def test_parse_single_table3():
parse_single_table(
get_pkg_data_filename('data/regression.xml'),
table_number=3, pedantic=False)
def _test_regression(tmpdir, _python_based=False, binary_mode=1):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False,
_debug_python_based_parser=_python_based)
table = votable.get_first_table()
dtypes = [
((str('string test'), str('string_test')), str('|O8')),
((str('fixed string test'), str('string_test_2')), str('|S10')),
(str('unicode_test'), str('|O8')),
((str('unicode test'), str('fixed_unicode_test')), str('<U10')),
((str('string array test'), str('string_array_test')), str('|S4')),
(str('unsignedByte'), str('|u1')),
(str('short'), str('<i2')),
(str('int'), str('<i4')),
(str('long'), str('<i8')),
(str('double'), str('<f8')),
(str('float'), str('<f4')),
(str('array'), str('|O8')),
(str('bit'), str('|b1')),
(str('bitarray'), str('|b1'), (3, 2)),
(str('bitvararray'), str('|O8')),
(str('bitvararray2'), str('|O8')),
(str('floatComplex'), str('<c8')),
(str('doubleComplex'), str('<c16')),
(str('doubleComplexArray'), str('|O8')),
(str('doubleComplexArrayFixed'), str('<c16'), (2,)),
(str('boolean'), str('|b1')),
(str('booleanArray'), str('|b1'), (4,)),
(str('nulls'), str('<i4')),
(str('nulls_array'), str('<i4'), (2, 2)),
(str('precision1'), str('<f8')),
(str('precision2'), str('<f8')),
(str('doublearray'), str('|O8')),
(str('bitarray2'), str('|b1'), (16,))
]
if sys.byteorder == 'big':
new_dtypes = []
for dtype in dtypes:
dtype = list(dtype)
dtype[1] = dtype[1].replace(str('<'), str('>'))
new_dtypes.append(tuple(dtype))
dtypes = new_dtypes
assert table.array.dtype == dtypes
votable.to_xml(str(tmpdir.join("regression.tabledata.xml")),
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.tabledata.xml")),
votable.version)
if binary_mode == 1:
votable.get_first_table().format = 'binary'
votable.version = '1.1'
elif binary_mode == 2:
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
votable.version = '1.3'
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "wb") as fd:
votable.to_xml(fd, _debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.binary.xml")),
votable.version)
# Also try passing a file handle
with open(str(tmpdir.join("regression.binary.xml")), "rb") as fd:
votable2 = parse(fd, pedantic=False,
_debug_python_based_parser=_python_based)
votable2.get_first_table().format = 'tabledata'
votable2.to_xml(str(tmpdir.join("regression.bin.tabledata.xml")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
assert_validate_schema(str(tmpdir.join("regression.bin.tabledata.xml")),
votable.version)
with open(
get_pkg_data_filename(
'data/regression.bin.tabledata.truth.{0}.xml'.format(
votable.version)),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
with open(str(tmpdir.join("regression.bin.tabledata.xml")),
'rt', encoding='utf-8') as fd:
output = fd.readlines()
# If the lines happen to be different, print a diff
# This is convenient for debugging
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
# Test implicit gzip saving
votable2.to_xml(
str(tmpdir.join("regression.bin.tabledata.xml.gz")),
_astropy_version="testing",
_debug_python_based_parser=_python_based)
with gzip.GzipFile(
str(tmpdir.join("regression.bin.tabledata.xml.gz")), 'rb') as gzfd:
output = gzfd.readlines()
output = [x.decode('utf-8').rstrip() for x in output]
truth = [x.rstrip() for x in truth]
assert truth == output
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression(tmpdir):
_test_regression(tmpdir, False)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_python_based_parser(tmpdir):
_test_regression(tmpdir, True)
@pytest.mark.xfail(str('legacy_float_repr'))
def test_regression_binary2(tmpdir):
_test_regression(tmpdir, False, 2)
class TestFixups:
def setup_class(self):
self.table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False).get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_implicit_id(self):
assert_array_equal(self.array['string_test_2'],
self.array['fixed string test'])
class TestReferences:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_fieldref(self):
fieldref = self.table.groups[1].entries[0]
assert isinstance(fieldref, tree.FieldRef)
assert fieldref.get_ref().name == 'boolean'
assert fieldref.get_ref().datatype == 'boolean'
def test_paramref(self):
paramref = self.table.groups[0].entries[0]
assert isinstance(paramref, tree.ParamRef)
assert paramref.get_ref().name == 'INPUT'
assert paramref.get_ref().datatype == 'float'
def test_iter_fields_and_params_on_a_group(self):
assert len(list(self.table.groups[1].iter_fields_and_params())) == 2
def test_iter_groups_on_a_group(self):
assert len(list(self.table.groups[1].iter_groups())) == 1
def test_iter_groups(self):
# Because of the ref'd table, there are more logical groups
# than actually exist in the file
assert len(list(self.votable.iter_groups())) == 9
def test_ref_table(self):
tables = list(self.votable.iter_tables())
for x, y in zip(tables[0].array.data[0], tables[1].array.data[0]):
assert_array_equal(x, y)
def test_iter_coosys(self):
assert len(list(self.votable.iter_coosys())) == 1
def test_select_columns_by_index():
columns = [0, 5, 13]
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
columns = ['string_test', 'unsignedByte', 'bitarray']
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
def test_select_columns_by_name():
columns = ['string_test', 'unsignedByte', 'bitarray']
table = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False, columns=columns).get_first_table()
array = table.array
mask = table.array.mask
assert array['string_test'][0] == b"String & test"
for c in columns:
assert not np.all(mask[c])
assert np.all(mask['unicode_test'])
class TestParse:
def setup_class(self):
self.votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array['string_test'].dtype.type,
np.object_)
assert_array_equal(
self.array['string_test'],
[b'String & test', b'String & test', b'XXXX',
b'', b''])
def test_fixed_string_test(self):
assert issubclass(self.array['string_test_2'].dtype.type,
np.string_)
assert_array_equal(
self.array['string_test_2'],
[b'Fixed stri', b'0123456789', b'XXXX', b'', b''])
def test_unicode_test(self):
assert issubclass(self.array['unicode_test'].dtype.type,
np.object_)
assert_array_equal(self.array['unicode_test'],
["Ceçi n'est pas un pipe",
'வணக்கம்',
'XXXX', '', ''])
def test_fixed_unicode_test(self):
assert issubclass(self.array['fixed_unicode_test'].dtype.type,
np.unicode_)
assert_array_equal(self.array['fixed_unicode_test'],
["Ceçi n'est",
'வணக்கம்',
'0123456789', '', ''])
def test_unsignedByte(self):
assert issubclass(self.array['unsignedByte'].dtype.type,
np.uint8)
assert_array_equal(self.array['unsignedByte'],
[128, 255, 0, 255, 255])
assert not np.any(self.mask['unsignedByte'])
def test_short(self):
assert issubclass(self.array['short'].dtype.type,
np.int16)
assert_array_equal(self.array['short'],
[4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask['short'])
def test_int(self):
assert issubclass(self.array['int'].dtype.type,
np.int32)
assert_array_equal(
self.array['int'],
[268435456, 2147483647, -268435456, 268435455, 123456789])
assert_array_equal(self.mask['int'],
[False, False, False, False, True])
def test_long(self):
assert issubclass(self.array['long'].dtype.type,
np.int64)
assert_array_equal(
self.array['long'],
[922337203685477, 123456789, -1152921504606846976,
1152921504606846975, 123456789])
assert_array_equal(self.mask['long'],
[False, True, False, False, True])
def test_double(self):
assert issubclass(self.array['double'].dtype.type,
np.float64)
assert_array_equal(self.array['double'],
[8.9990234375, 0.0, np.inf, np.nan, -np.inf])
assert_array_equal(self.mask['double'],
[False, False, False, True, False])
def test_float(self):
assert issubclass(self.array['float'].dtype.type,
np.float32)
assert_array_equal(self.array['float'],
[1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask['float'],
[False, False, False, False, True])
def test_array(self):
assert issubclass(self.array['array'].dtype.type,
np.object_)
match = [[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]]]
for a, b in zip(self.array['array'], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data['array'][3].mask[0][0]
assert self.array.data['array'][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array['bit'].dtype.type,
np.bool_)
assert_array_equal(self.array['bit'],
[True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array['bitarray'].dtype.type,
np.bool_)
assert self.array['bitarray'].shape == (5, 3, 2)
assert_array_equal(self.array['bitarray'],
[[[True, False],
[True, True],
[False, True]],
[[False, True],
[False, False],
[True, True]],
[[True, True],
[True, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]]])
def test_bitarray_mask(self):
assert_array_equal(self.mask['bitarray'],
[[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[False, False],
[False, False],
[False, False]],
[[True, True],
[True, True],
[True, True]],
[[True, True],
[True, True],
[True, True]]])
def test_bitvararray(self):
assert issubclass(self.array['bitvararray'].dtype.type,
np.object_)
match = [[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[], []]
for a, b in zip(self.array['bitvararray'], match):
assert_array_equal(a, b)
match_mask = [[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False, False]
for a, b in zip(self.array['bitvararray'], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array['bitvararray2'].dtype.type,
np.object_)
match = [[],
[[[False, True],
[False, False],
[True, False]],
[[True, False],
[True, False],
[True, False]]],
[[[True, True],
[True, True],
[True, True]]],
[],
[]]
for a, b in zip(self.array['bitvararray2'], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array['floatComplex'].dtype.type,
np.complex64)
assert_array_equal(self.array['floatComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+0j, np.nan+0j])
assert_array_equal(self.mask['floatComplex'],
[True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array['doubleComplex'].dtype.type,
np.complex128)
assert_array_equal(
self.array['doubleComplex'],
[np.nan+0j, 0+0j, 0+-1j, np.nan+(np.inf*1j), np.nan+0j])
assert_array_equal(self.mask['doubleComplex'],
[True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array['doubleComplexArray'].dtype.type,
np.object_)
assert ([len(x) for x in self.array['doubleComplexArray']] ==
[0, 2, 2, 0, 0])
def test_boolean(self):
assert issubclass(self.array['boolean'].dtype.type,
np.bool_)
assert_array_equal(self.array['boolean'],
[True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask['boolean'],
[False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array['booleanArray'].dtype.type,
np.bool_)
assert_array_equal(self.array['booleanArray'],
[[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False]])
def test_boolean_array_mask(self):
assert_array_equal(self.mask['booleanArray'],
[[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True]])
def test_nulls(self):
assert_array_equal(self.array['nulls'],
[0, -9, 2, -9, -9])
assert_array_equal(self.mask['nulls'],
[False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(self.array['nulls_array'],
[[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]]])
assert_array_equal(self.mask['nulls_array'],
[[[True, True],
[True, True]],
[[False, False],
[False, False]],
[[True, False],
[True, False]],
[[False, True],
[False, True]],
[[True, True],
[True, True]]])
def test_double_array(self):
assert issubclass(self.array['doublearray'].dtype.type,
np.object_)
assert len(self.array['doublearray'][0]) == 0
assert_array_equal(self.array['doublearray'][1],
[0, 1, np.inf, -np.inf, np.nan, 0, -1])
assert_array_equal(self.array.data['doublearray'][1].mask,
[False, False, False, False, False, False, True])
def test_bit_array2(self):
assert_array_equal(self.array['bitarray2'][0],
[True, True, True, True,
False, False, False, False,
True, True, True, True,
False, False, False, False])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'][0])
assert np.all(self.mask['bitarray2'][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id('J2000')
assert coosys.system == 'eq_FK5'
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id('QUERY_STATUS')
assert info.value == 'OK'
if self.votable.version != '1.1':
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..." # noqa
def test_repr(self):
assert '3 tables' in repr(self.votable)
assert repr(list(self.votable.iter_fields_and_params())[0]) == \
'<PARAM ID="awesome" arraysize="*" datatype="float" name="INPUT" unit="deg" value="[0.0 0.0]"/>' # noqa
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == '[</>]'
class TestThroughTableData(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_bit_mask(self):
assert_array_equal(self.mask['bit'],
[False, False, False, False, False])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
def test_schema(self, tmpdir):
# have to use an actual file because assert_validate_schema only works
# on filenames, not file-like objects
fn = str(tmpdir.join("test_through_tabledata.xml"))
with open(fn, 'wb') as f:
f.write(self.xmlout.getvalue())
assert_validate_schema(fn, '1.1')
class TestThroughBinary(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.get_first_table().format = 'binary'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
# Masked values in bit fields don't roundtrip through the binary
# representation -- that's not a bug, just a limitation, so
# override the mask array checks here.
def test_bit_mask(self):
assert not np.any(self.mask['bit'])
def test_bitarray_mask(self):
assert not np.any(self.mask['bitarray'])
def test_bit_array2_mask(self):
assert not np.any(self.mask['bitarray2'])
class TestThroughBinary2(TestParse):
def setup_class(self):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
votable.version = '1.3'
votable.get_first_table()._config['version_1_3_or_later'] = True
votable.get_first_table().format = 'binary2'
self.xmlout = bio = io.BytesIO()
votable.to_xml(bio)
bio.seek(0)
self.votable = parse(bio, pedantic=False)
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_get_coosys_by_id(self):
# No COOSYS in VOTable 1.2 or later
pass
def table_from_scratch():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, ID="filename", datatype="char"),
Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
out = io.StringIO()
votable.to_xml(out)
def test_open_files():
for filename in get_pkg_data_filenames('data', pattern='*.xml'):
if filename.endswith('custom_datatype.xml'):
continue
parse(filename, pedantic=False)
@raises(VOTableSpecError)
def test_too_many_columns():
parse(
get_pkg_data_filename('data/too_many_columns.xml.gz'),
pedantic=False)
def test_build_from_scratch(tmpdir):
# Create a new VOTable file...
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
tree.Field(votable, ID="filename", datatype="char"),
tree.Field(votable, ID="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
# Now write the whole thing to a file.
# Note, we have to use the top-level votable file object
votable.to_xml(str(tmpdir.join("new_votable.xml")))
votable = parse(str(tmpdir.join("new_votable.xml")))
table = votable.get_first_table()
assert_array_equal(
table.array.mask, np.array([(False, [[False, False], [False, False]]),
(False, [[False, False], [False, False]])],
dtype=[(str('filename'), str('?')),
(str('matrix'), str('?'), (2, 2))]))
def test_validate(test_path_object=False):
"""
test_path_object is needed for test below ``test_validate_path_object``
so that file could be passed as pathlib.Path object.
"""
output = io.StringIO()
fpath = get_pkg_data_filename('data/regression.xml')
if test_path_object:
fpath = pathlib.Path(fpath)
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
with catch_warnings():
result = validate(fpath,
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('validation.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/validation.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
@mock.patch('subprocess.Popen')
def test_validate_xmllint_true(mock_subproc_popen):
process_mock = mock.Mock()
attrs = {'communicate.return_value': ('ok', 'ko'),
'returncode': 0}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
assert validate(get_pkg_data_filename('data/empty_table.xml'),
xmllint=True)
def test_validate_path_object():
"""
Validating when source is passed as path object. (#4412)
"""
test_validate(test_path_object=True)
def test_gzip_filehandles(tmpdir):
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
with open(str(tmpdir.join("regression.compressed.xml")), 'wb') as fd:
votable.to_xml(
fd,
compressed=True,
_astropy_version="testing")
with open(str(tmpdir.join("regression.compressed.xml")), 'rb') as fd:
votable = parse(
fd,
pedantic=False)
def test_from_scratch_example():
with catch_warnings(VOWarning) as warning_lines:
try:
_run_test_from_scratch_example()
except ValueError as e:
warning_lines.append(str(e))
assert len(warning_lines) == 0
def _run_test_from_scratch_example():
from astropy.io.votable.tree import VOTableFile, Resource, Table, Field
# Create a new VOTable file...
votable = VOTableFile()
# ...with one resource...
resource = Resource()
votable.resources.append(resource)
# ... with one table
table = Table(votable)
resource.tables.append(table)
# Define some fields
table.fields.extend([
Field(votable, name="filename", datatype="char", arraysize="*"),
Field(votable, name="matrix", datatype="double", arraysize="2x2")])
# Now, use those field definitions to create the numpy record arrays, with
# the given number of rows
table.create_arrays(2)
# Now table.array can be filled with data
table.array[0] = ('test1.xml', [[1, 0], [0, 1]])
table.array[1] = ('test2.xml', [[0.5, 0.3], [0.2, 0.1]])
assert table.array[0][0] == 'test1.xml'
def test_fileobj():
# Assert that what we get back is a raw C file pointer
# so it will be super fast in the C extension.
from astropy.utils.xml import iterparser
filename = get_pkg_data_filename('data/regression.xml')
with iterparser._convert_to_fd_or_read_function(filename) as fd:
if sys.platform == 'win32':
fd()
else:
assert isinstance(fd, io.FileIO)
def test_nonstandard_units():
from astropy import units as u
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False)
assert isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
votable = parse(
get_pkg_data_filename('data/nonstandard_units.xml'),
pedantic=False,
unit_format='generic')
assert not isinstance(
votable.get_first_table().fields[0].unit, u.UnrecognizedUnit)
def test_resource_structure():
# Based on issue #1223, as reported by @astro-friedel and @RayPlante
from astropy.io.votable import tree as vot
vtf = vot.VOTableFile()
r1 = vot.Resource()
vtf.resources.append(r1)
t1 = vot.Table(vtf)
t1.name = "t1"
t2 = vot.Table(vtf)
t2.name = 't2'
r1.tables.append(t1)
r1.tables.append(t2)
r2 = vot.Resource()
vtf.resources.append(r2)
t3 = vot.Table(vtf)
t3.name = "t3"
t4 = vot.Table(vtf)
t4.name = "t4"
r2.tables.append(t3)
r2.tables.append(t4)
r3 = vot.Resource()
vtf.resources.append(r3)
t5 = vot.Table(vtf)
t5.name = "t5"
t6 = vot.Table(vtf)
t6.name = "t6"
r3.tables.append(t5)
r3.tables.append(t6)
buff = io.BytesIO()
vtf.to_xml(buff)
buff.seek(0)
vtf2 = parse(buff)
assert len(vtf2.resources) == 3
for r in range(len(vtf2.resources)):
res = vtf2.resources[r]
assert len(res.tables) == 2
assert len(res.resources) == 0
def test_no_resource_check():
output = io.StringIO()
with catch_warnings():
# We can't test xmllint, because we can't rely on it being on the
# user's machine.
result = validate(get_pkg_data_filename('data/no_resource.xml'),
output, xmllint=False)
assert result is False
output.seek(0)
output = output.readlines()
# Uncomment to generate new groundtruth
# with open('no_resource.txt', 'wt', encoding='utf-8') as fd:
# fd.write(u''.join(output))
with open(
get_pkg_data_filename('data/no_resource.txt'),
'rt', encoding='utf-8') as fd:
truth = fd.readlines()
truth = truth[1:]
output = output[1:-1]
sys.stdout.writelines(
difflib.unified_diff(truth, output, fromfile='truth', tofile='output'))
assert truth == output
def test_instantiate_vowarning():
# This used to raise a deprecation exception.
# See https://github.com/astropy/astroquery/pull/276
VOWarning(())
def test_custom_datatype():
votable = parse(
get_pkg_data_filename('data/custom_datatype.xml'),
pedantic=False,
datatype_mapping={'bar': 'int'}
)
table = votable.get_first_table()
assert table.array.dtype['foo'] == np.int32
|
54031b87f8fd585ae5dbc71f6f1c474ba0675a862c1741193e41c2a98481262c | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.tests.helper import raises
# LOCAL
from astropy.io.votable import ucd
def test_none():
assert ucd.check_ucd(None)
examples = {
'phys.temperature':
[('ivoa', 'phys.temperature')],
'pos.eq.ra;meta.main':
[('ivoa', 'pos.eq.ra'), ('ivoa', 'meta.main')],
'meta.id;src':
[('ivoa', 'meta.id'), ('ivoa', 'src')],
'phot.flux;em.radio;arith.ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'PHot.Flux;EM.Radio;ivoa:arith.Ratio':
[('ivoa', 'phot.flux'), ('ivoa', 'em.radio'), ('ivoa', 'arith.ratio')],
'pos.galactic.lat':
[('ivoa', 'pos.galactic.lat')],
'meta.code;phot.mag':
[('ivoa', 'meta.code'), ('ivoa', 'phot.mag')],
'stat.error;phot.mag':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag')],
'phys.temperature;instr;stat.max':
[('ivoa', 'phys.temperature'), ('ivoa', 'instr'),
('ivoa', 'stat.max')],
'stat.error;phot.mag;em.opt.V':
[('ivoa', 'stat.error'), ('ivoa', 'phot.mag'), ('ivoa', 'em.opt.V')],
}
def test_check():
for s, p in examples.items():
assert ucd.parse_ucd(s, True, True) == p
assert ucd.check_ucd(s, True, True)
@raises(ValueError)
def test_too_many_colons():
ucd.parse_ucd("ivoa:stsci:phot", True, True)
@raises(ValueError)
def test_invalid_namespace():
ucd.parse_ucd("_ivoa:phot.mag", True, True)
@raises(ValueError)
def test_invalid_word():
ucd.parse_ucd("-pho")
|
a972f74a09850ae91601ec1db48af94d44a020851114268677a36e73bc509b46 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the conversion to/from astropy.table
"""
import io
import os
import pathlib
import numpy as np
from astropy.utils.data import get_pkg_data_filename, get_pkg_data_fileobj
from astropy.io.votable.table import parse, writeto
from astropy.io.votable import tree
def test_table(tmpdir):
# Read the VOTABLE
votable = parse(
get_pkg_data_filename('data/regression.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table()
for name in table.array.dtype.names:
assert np.all(astropy_table.mask[name] == table.array.mask[name])
votable2 = tree.VOTableFile.from_table(astropy_table)
t = votable2.get_first_table()
field_types = [
('string_test', {'datatype': 'char', 'arraysize': '*'}),
('string_test_2', {'datatype': 'char', 'arraysize': '10'}),
('unicode_test', {'datatype': 'unicodeChar', 'arraysize': '*'}),
('fixed_unicode_test', {'datatype': 'unicodeChar', 'arraysize': '10'}),
('string_array_test', {'datatype': 'char', 'arraysize': '4'}),
('unsignedByte', {'datatype': 'unsignedByte'}),
('short', {'datatype': 'short'}),
('int', {'datatype': 'int'}),
('long', {'datatype': 'long'}),
('double', {'datatype': 'double'}),
('float', {'datatype': 'float'}),
('array', {'datatype': 'long', 'arraysize': '2*'}),
('bit', {'datatype': 'bit'}),
('bitarray', {'datatype': 'bit', 'arraysize': '3x2'}),
('bitvararray', {'datatype': 'bit', 'arraysize': '*'}),
('bitvararray2', {'datatype': 'bit', 'arraysize': '3x2*'}),
('floatComplex', {'datatype': 'floatComplex'}),
('doubleComplex', {'datatype': 'doubleComplex'}),
('doubleComplexArray', {'datatype': 'doubleComplex', 'arraysize': '*'}),
('doubleComplexArrayFixed', {'datatype': 'doubleComplex', 'arraysize': '2'}),
('boolean', {'datatype': 'bit'}),
('booleanArray', {'datatype': 'bit', 'arraysize': '4'}),
('nulls', {'datatype': 'int'}),
('nulls_array', {'datatype': 'int', 'arraysize': '2x2'}),
('precision1', {'datatype': 'double'}),
('precision2', {'datatype': 'double'}),
('doublearray', {'datatype': 'double', 'arraysize': '*'}),
('bitarray2', {'datatype': 'bit', 'arraysize': '16'})]
for field, type in zip(t.fields, field_types):
name, d = type
assert field.ID == name
assert field.datatype == d['datatype']
if 'arraysize' in d:
assert field.arraysize == d['arraysize']
writeto(votable2, os.path.join(str(tmpdir), "through_table.xml"))
def test_read_through_table_interface(tmpdir):
from astropy.table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='main_table')
assert len(t) == 5
# Issue 8354
assert t['float'].format is None
fn = os.path.join(str(tmpdir), "table_interface.xml")
t.write(fn, table_id='FOO', format='votable')
with open(fn, 'rb') as fd:
t2 = Table.read(fd, format='votable', table_id='FOO')
assert len(t2) == 5
def test_read_through_table_interface2():
from astropy.table import Table
with get_pkg_data_fileobj('data/regression.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable', table_id='last_table')
assert len(t) == 0
def test_names_over_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=True)
assert table.colnames == [
'Name', 'GLON', 'GLAT', 'RAdeg', 'DEdeg', 'Jmag', 'Hmag', 'Kmag',
'G3.6mag', 'G4.5mag', 'G5.8mag', 'G8.0mag', '4.5mag', '8.0mag',
'Emag', '24mag', 'f_Name']
def test_explicit_ids():
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
votable = parse(fd)
table = votable.get_first_table().to_table(use_names_over_ids=False)
assert table.colnames == [
'col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7', 'col8', 'col9',
'col10', 'col11', 'col12', 'col13', 'col14', 'col15', 'col16', 'col17']
def test_table_read_with_unnamed_tables():
"""
Issue #927
"""
from astropy.table import Table
with get_pkg_data_fileobj('data/names.xml', encoding='binary') as fd:
t = Table.read(fd, format='votable')
assert len(t) == 1
def test_votable_path_object():
"""
Testing when votable is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/names.xml'))
table = parse(fpath).get_first_table().to_table()
assert len(table) == 1
assert int(table[0][3]) == 266
def test_from_table_without_mask():
from astropy.table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable')
def test_write_with_format():
from astropy.table import Table, Column
t = Table()
c = Column(data=[1, 2, 3], name='a')
t.add_column(c)
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY' in obuff
assert b'TABLEDATA' not in obuff
output = io.BytesIO()
t.write(output, format='votable', tabledata_format="binary2")
obuff = output.getvalue()
assert b'VOTABLE version="1.3"' in obuff
assert b'BINARY2' in obuff
assert b'TABLEDATA' not in obuff
def test_empty_table():
votable = parse(
get_pkg_data_filename('data/empty_table.xml'),
pedantic=False)
table = votable.get_first_table()
astropy_table = table.to_table() # noqa
|
34d10e2b6cb552aae2e0afec6c0773c97d4bef2f478d129a6dc7f76a7770240b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# LOCAL
from astropy.io.votable import exceptions
from astropy.io.votable import tree
from astropy.tests.helper import raises
@raises(exceptions.W07)
def test_check_astroyear_fail():
config = {'pedantic': True}
field = tree.Field(None, name='astroyear')
tree.check_astroyear('X2100', field, config)
@raises(exceptions.W08)
def test_string_fail():
config = {'pedantic': True}
tree.check_string(42, 'foo', config)
def test_make_Fields():
votable = tree.VOTableFile()
# ...with one resource...
resource = tree.Resource()
votable.resources.append(resource)
# ... with one table
table = tree.Table(votable)
resource.tables.append(table)
table.fields.extend([tree.Field(votable, name='Test', datatype="float", unit="mag")])
|
ca98694cfb959cf38edbaccb7da19d66aaccc0bd69765f7a2506db500204e46e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
# THIRD-PARTY
import numpy as np
from numpy.testing import assert_array_equal
# LOCAL
from astropy.io.votable import converters
from astropy.io.votable import exceptions
from astropy.io.votable import tree
from astropy.io.votable.table import parse_single_table
from astropy.tests.helper import raises, catch_warnings
from astropy.utils.data import get_pkg_data_filename
@raises(exceptions.E13)
def test_invalid_arraysize():
field = tree.Field(
None, name='broken', datatype='char', arraysize='foo')
converters.get_converter(field)
def test_oversize_char():
config = {'pedantic': True}
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert len(w) == 1
with catch_warnings(exceptions.W46) as w:
c.parse("XXX")
assert len(w) == 1
def test_char_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='char',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
def test_oversize_unicode():
config = {'pedantic': True}
with catch_warnings(exceptions.W46) as w:
field = tree.Field(
None, name='c2', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
c.parse("XXX")
assert len(w) == 1
def test_unicode_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='unicodeChar',
config=config)
c = converters.get_converter(field, config=config)
assert c.output("Foo", True) == ''
@raises(exceptions.E02)
def test_wrong_number_of_elements():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
@raises(ValueError)
def test_float_mask():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('') == (c.null, True)
c.parse('null')
def test_float_mask_permissive():
config = {'pedantic': False}
field = tree.Field(
None, name='c', datatype='float',
config=config)
c = converters.get_converter(field, config=config)
assert c.parse('null') == (c.null, True)
@raises(exceptions.E02)
def test_complex_array_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
c.parse("2 3 4 5 6")
def test_complex_array_vararray2():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='floatComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("")
assert len(x[0]) == 0
def test_complex_array_vararray3():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='2x3*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4 5 6 7 8 9 10 11 12")
assert len(x) == 2
assert np.all(x[0][0][0] == complex(1, 2))
def test_complex_vararray():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3 4")
assert len(x) == 2
assert x[0][0] == complex(1, 2)
@raises(exceptions.E03)
def test_complex():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='doubleComplex',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("1 2 3")
@raises(exceptions.E04)
def test_bit():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
x = c.parse("T")
def test_bit_mask():
config = {'pedantic': True}
with catch_warnings(exceptions.W39) as w:
field = tree.Field(
None, name='c', datatype='bit',
config=config)
c = converters.get_converter(field, config=config)
c.output(True, True)
assert len(w) == 1
@raises(exceptions.E05)
def test_boolean():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean',
config=config)
c = converters.get_converter(field, config=config)
c.parse('YES')
def test_boolean_array():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='boolean', arraysize='*',
config=config)
c = converters.get_converter(field, config=config)
r, mask = c.parse('TRUE FALSE T F 0 1')
assert_array_equal(r, [True, False, True, False, False, True])
@raises(exceptions.E06)
def test_invalid_type():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='foobar',
config=config)
c = converters.get_converter(field, config=config)
def test_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', precision="E4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2'
field = tree.Field(
None, name='c', datatype='float', precision="F4",
config=config)
c = converters.get_converter(field, config=config)
assert c.output(266.248, False) == '266.2480'
@raises(exceptions.W51)
def test_integer_overflow():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='int', config=config)
c = converters.get_converter(field, config=config)
c.parse('-2208988800', config=config)
def test_float_default_precision():
config = {'pedantic': True}
field = tree.Field(
None, name='c', datatype='float', arraysize="4",
config=config)
c = converters.get_converter(field, config=config)
assert (c.output([1, 2, 3, 8.9990234375], [False, False, False, False]) ==
'1 2 3 8.9990234375')
def test_vararray():
votable = tree.VOTableFile()
resource = tree.Resource()
votable.resources.append(resource)
table = tree.Table(votable)
resource.tables.append(table)
tabarr = []
heads = ['headA', 'headB', 'headC']
types = ["char", "double", "int"]
vals = [["A", 1.0, 2],
["B", 2.0, 3],
["C", 3.0, 4]]
for i in range(len(heads)):
tabarr.append(tree.Field(
votable, name=heads[i], datatype=types[i], arraysize="*"))
table.fields.extend(tabarr)
table.create_arrays(len(vals))
for i in range(len(vals)):
values = tuple(vals[i])
table.array[i] = values
buff = io.BytesIO()
votable.to_xml(buff)
def test_gemini_v1_2():
'''
see Pull Request 4782 or Issue 4781 for details
'''
table = parse_single_table(get_pkg_data_filename('data/gemini.xml'))
assert table is not None
|
40ab3c944a731dd30541aa6e8a9924f7cec0278c60a735773089228629c82ccc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains a class to handle a validation result for a single VOTable
file.
"""
# STDLIB
from xml.parsers.expat import ExpatError
import hashlib
import os
import shutil
import socket
import subprocess
import warnings
import pickle
import urllib.request
import urllib.error
import http.client
# VO
from astropy.io.votable import table
from astropy.io.votable import exceptions
from astropy.io.votable import xmlutil
class Result:
def __init__(self, url, root='results', timeout=10):
self.url = url
m = hashlib.md5()
m.update(url)
self._hash = m.hexdigest()
self._root = root
self._path = os.path.join(
self._hash[0:2], self._hash[2:4], self._hash[4:])
if not os.path.exists(self.get_dirpath()):
os.makedirs(self.get_dirpath())
self.timeout = timeout
self.load_attributes()
def __enter__(self):
return self
def __exit__(self, *args):
self.save_attributes()
def get_dirpath(self):
return os.path.join(self._root, self._path)
def get_htmlpath(self):
return self._path
def get_attribute_path(self):
return os.path.join(self.get_dirpath(), "values.dat")
def get_vo_xml_path(self):
return os.path.join(self.get_dirpath(), "vo.xml")
# ATTRIBUTES
def load_attributes(self):
path = self.get_attribute_path()
if os.path.exists(path):
try:
with open(path, 'rb') as fd:
self._attributes = pickle.load(fd)
except Exception:
shutil.rmtree(self.get_dirpath())
os.makedirs(self.get_dirpath())
self._attributes = {}
else:
self._attributes = {}
def save_attributes(self):
path = self.get_attribute_path()
with open(path, 'wb') as fd:
pickle.dump(self._attributes, fd)
def __getitem__(self, key):
return self._attributes[key]
def __setitem__(self, key, val):
self._attributes[key] = val
def __contains__(self, key):
return key in self._attributes
# VO XML
def download_xml_content(self):
path = self.get_vo_xml_path()
if 'network_error' not in self._attributes:
self['network_error'] = None
if os.path.exists(path):
return
def fail(reason):
reason = str(reason)
with open(path, 'wb') as fd:
fd.write('FAILED: {0}\n'.format(reason).encode('utf-8'))
self['network_error'] = reason
r = None
try:
r = urllib.request.urlopen(
self.url.decode('ascii'), timeout=self.timeout)
except urllib.error.URLError as e:
if hasattr(e, 'reason'):
reason = e.reason
else:
reason = e.code
fail(reason)
return
except http.client.HTTPException as e:
fail("HTTPException: {}".format(str(e)))
return
except (socket.timeout, socket.error) as e:
fail("Timeout")
return
if r is None:
fail("Invalid URL")
return
try:
content = r.read()
except socket.timeout as e:
fail("Timeout")
return
else:
r.close()
with open(path, 'wb') as fd:
fd.write(content)
def get_xml_content(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
with open(path, 'rb') as fd:
content = fd.read()
return content
def validate_vo(self):
path = self.get_vo_xml_path()
if not os.path.exists(path):
self.download_xml_content()
self['version'] = ''
if 'network_error' in self and self['network_error'] is not None:
self['nwarnings'] = 0
self['nexceptions'] = 0
self['warnings'] = []
self['xmllint'] = None
self['warning_types'] = set()
return
nexceptions = 0
nwarnings = 0
t = None
lines = []
with open(path, 'rb') as input:
with warnings.catch_warnings(record=True) as warning_lines:
try:
t = table.parse(input, pedantic=False, filename=path)
except (ValueError, TypeError, ExpatError) as e:
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
if t is not None:
self['version'] = version = t.version
else:
self['version'] = version = "1.0"
if 'xmllint' not in self:
# Now check the VO schema based on the version in
# the file.
try:
success, stdout, stderr = xmlutil.validate_schema(path, version)
# OSError is raised when XML file eats all memory and
# system sends kill signal.
except OSError as e:
self['xmllint'] = None
self['xmllint_content'] = str(e)
else:
self['xmllint'] = (success == 0)
self['xmllint_content'] = stderr
warning_types = set()
for line in lines:
w = exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
self['nwarnings'] = nwarnings
self['nexceptions'] = nexceptions
self['warnings'] = lines
self['warning_types'] = warning_types
def has_warning(self, warning_code):
return warning_code in self['warning_types']
def match_expectations(self):
if 'network_error' not in self:
self['network_error'] = None
if self['expected'] == 'good':
return (not self['network_error'] and
self['nwarnings'] == 0 and
self['nexceptions'] == 0)
elif self['expected'] == 'incorrect':
return (not self['network_error'] and
(self['nwarnings'] > 0 or
self['nexceptions'] > 0))
elif self['expected'] == 'broken':
return self['network_error'] is not None
def validate_with_votlint(self, path_to_stilts_jar):
filename = self.get_vo_xml_path()
p = subprocess.Popen(
"java -jar {} votlint validate=false {}".format(
path_to_stilts_jar, filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if len(stdout) or p.returncode:
self['votlint'] = False
else:
self['votlint'] = True
self['votlint_content'] = stdout
def get_result_subsets(results, root, s=None):
all_results = []
correct = []
not_expected = []
fail_schema = []
schema_mismatch = []
fail_votlint = []
votlint_mismatch = []
network_failures = []
version_10 = []
version_11 = []
version_12 = []
version_unknown = []
has_warnings = []
warning_set = {}
has_exceptions = []
exception_set = {}
for url in results:
if s:
next(s)
if isinstance(url, Result):
x = url
else:
x = Result(url, root=root)
all_results.append(x)
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['xmllint'] is True):
correct.append(x)
if not x.match_expectations():
not_expected.append(x)
if x['xmllint'] is False:
fail_schema.append(x)
if (x['xmllint'] is False and
x['nwarnings'] == 0 and
x['nexceptions'] == 0):
schema_mismatch.append(x)
if 'votlint' in x and x['votlint'] is False:
fail_votlint.append(x)
if 'network_error' not in x:
x['network_error'] = None
if (x['nwarnings'] == 0 and
x['nexceptions'] == 0 and
x['network_error'] is None):
votlint_mismatch.append(x)
if 'network_error' in x and x['network_error'] is not None:
network_failures.append(x)
version = x['version']
if version == '1.0':
version_10.append(x)
elif version == '1.1':
version_11.append(x)
elif version == '1.2':
version_12.append(x)
else:
version_unknown.append(x)
if x['nwarnings'] > 0:
has_warnings.append(x)
for warning in x['warning_types']:
if (warning is not None and
len(warning) == 3 and
warning.startswith('W')):
warning_set.setdefault(warning, [])
warning_set[warning].append(x)
if x['nexceptions'] > 0:
has_exceptions.append(x)
for exc in x['warning_types']:
if exc is not None and len(exc) == 3 and exc.startswith('E'):
exception_set.setdefault(exc, [])
exception_set[exc].append(x)
warning_set = list(warning_set.items())
warning_set.sort()
exception_set = list(exception_set.items())
exception_set.sort()
tables = [
('all', 'All tests', all_results),
('correct', 'Correct', correct),
('unexpected', 'Unexpected', not_expected),
('schema', 'Invalid against schema', fail_schema),
('schema_mismatch', 'Invalid against schema/Passed vo.table',
schema_mismatch, ['ul']),
('fail_votlint', 'Failed votlint', fail_votlint),
('votlint_mismatch', 'Failed votlint/Passed vo.table',
votlint_mismatch, ['ul']),
('network_failures', 'Network failures', network_failures),
('version1.0', 'Version 1.0', version_10),
('version1.1', 'Version 1.1', version_11),
('version1.2', 'Version 1.2', version_12),
('version_unknown', 'Version unknown', version_unknown),
('warnings', 'Warnings', has_warnings)]
for warning_code, warning in warning_set:
if s:
next(s)
warning_class = getattr(exceptions, warning_code, None)
if warning_class:
warning_descr = warning_class.get_short_name()
tables.append(
(warning_code,
'{}: {}'.format(warning_code, warning_descr),
warning, ['ul', 'li']))
tables.append(
('exceptions', 'Exceptions', has_exceptions))
for exception_code, exc in exception_set:
if s:
next(s)
exception_class = getattr(exceptions, exception_code, None)
if exception_class:
exception_descr = exception_class.get_short_name()
tables.append(
(exception_code,
'{}: {}'.format(exception_code, exception_descr),
exc, ['ul', 'li']))
return tables
|
ef6d890486e30e714949424b15ea29ac01d85afea82de46fa61a2301d0a9097f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Validates a large collection of web-accessible VOTable files,
and generates a report as a directory tree of HTML files.
"""
# STDLIB
import os
# LOCAL
from astropy.utils.data import get_pkg_data_filename
from . import html
from . import result
__all__ = ['make_validation_report']
def get_srcdir():
return os.path.dirname(__file__)
def get_urls(destdir, s):
import gzip
types = ['good', 'broken', 'incorrect']
seen = set()
urls = []
for type in types:
filename = get_pkg_data_filename(
'data/urls/cone.{0}.dat.gz'.format(type))
with gzip.open(filename, 'rb') as fd:
for url in fd.readlines():
next(s)
url = url.strip()
if url not in seen:
with result.Result(url, root=destdir) as r:
r['expected'] = type
urls.append(url)
seen.add(url)
return urls
def download(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.download_xml_content()
def validate_vo(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
r.validate_vo()
def votlint_validate(args):
path_to_stilts_jar, url, destdir = args
with result.Result(url, root=destdir) as r:
if r['network_error'] is None:
r.validate_with_votlint(path_to_stilts_jar)
def write_html_result(args):
url, destdir = args
with result.Result(url, root=destdir) as r:
html.write_result(r)
def write_subindex(args):
subset, destdir, total = args
html.write_index_table(destdir, *subset, total=total)
def make_validation_report(
urls=None, destdir='astropy.io.votable.validator.results',
multiprocess=True, stilts=None):
"""
Validates a large collection of web-accessible VOTable files.
Generates a report as a directory tree of HTML files.
Parameters
----------
urls : list of strings, optional
If provided, is a list of HTTP urls to download VOTable files
from. If not provided, a built-in set of ~22,000 urls
compiled by HEASARC will be used.
destdir : path, optional
The directory to write the report to. By default, this is a
directory called ``'results'`` in the current directory. If the
directory does not exist, it will be created.
multiprocess : bool, optional
If `True` (default), perform validations in parallel using all
of the cores on this machine.
stilts : path, optional
To perform validation with ``votlint`` from the the Java-based
`STILTS <http://www.star.bris.ac.uk/~mbt/stilts/>`_ VOTable
parser, in addition to `astropy.io.votable`, set this to the
path of the ``'stilts.jar'`` file. ``java`` on the system shell
path will be used to run it.
Notes
-----
Downloads of each given URL will be performed only once and cached
locally in *destdir*. To refresh the cache, remove *destdir*
first.
"""
from astropy.utils.console import (color_print, ProgressBar, Spinner)
if stilts is not None:
if not os.path.exists(stilts):
raise ValueError(
'{0} does not exist.'.format(stilts))
destdir = os.path.abspath(destdir)
if urls is None:
with Spinner('Loading URLs', 'green') as s:
urls = get_urls(destdir, s)
else:
color_print('Marking URLs', 'green')
for url in ProgressBar.iterate(urls):
with result.Result(url, root=destdir) as r:
r['expected'] = type
args = [(url, destdir) for url in urls]
color_print('Downloading VO files', 'green')
ProgressBar.map(
download, args, multiprocess=multiprocess)
color_print('Validating VO files', 'green')
ProgressBar.map(
validate_vo, args, multiprocess=multiprocess)
if stilts is not None:
color_print('Validating with votlint', 'green')
votlint_args = [(stilts, x, destdir) for x in urls]
ProgressBar.map(
votlint_validate, votlint_args, multiprocess=multiprocess)
color_print('Generating HTML files', 'green')
ProgressBar.map(
write_html_result, args, multiprocess=multiprocess)
with Spinner('Grouping results', 'green') as s:
subsets = result.get_result_subsets(urls, destdir, s)
color_print('Generating index', 'green')
html.write_index(subsets, urls, destdir)
color_print('Generating subindices', 'green')
subindex_args = [(subset, destdir, len(urls)) for subset in subsets]
ProgressBar.map(
write_subindex, subindex_args, multiprocess=multiprocess)
|
11346ba18113042fb2575bfe950ff980b25b0c27f0352688e4a1f569cff19113 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# STDLIB
import contextlib
from math import ceil
import os
import re
# ASTROPY
from astropy.utils.xml.writer import XMLWriter, xml_escape
from astropy import online_docs_root
# VO
from astropy.io.votable import exceptions
html_header = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html
PUBLIC "-//W3C//DTD XHTML Basic 1.0//EN"
"http://www.w3.org/TR/xhtml-basic/xhtml-basic10.dtd">
"""
default_style = """
body {
font-family: sans-serif
}
a {
text-decoration: none
}
.highlight {
color: red;
font-weight: bold;
text-decoration: underline;
}
.green { background-color: #ddffdd }
.red { background-color: #ffdddd }
.yellow { background-color: #ffffdd }
tr:hover { background-color: #dddddd }
table {
border-width: 1px;
border-spacing: 0px;
border-style: solid;
border-color: gray;
border-collapse: collapse;
background-color: white;
padding: 5px;
}
table th {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
table td {
border-width: 1px;
padding: 5px;
border-style: solid;
border-color: gray;
}
"""
@contextlib.contextmanager
def make_html_header(w):
w.write(html_header)
with w.tag('html', xmlns="http://www.w3.org/1999/xhtml", lang="en-US"):
with w.tag('head'):
w.element('title', 'VO Validation results')
w.element('style', default_style)
with w.tag('body'):
yield
def write_source_line(w, line, nchar=0):
part1 = xml_escape(line[:nchar].decode('utf-8'))
char = xml_escape(line[nchar:nchar+1].decode('utf-8'))
part2 = xml_escape(line[nchar+1:].decode('utf-8'))
w.write(' ')
w.write(part1)
w.write('<span class="highlight">{}</span>'.format(char))
w.write(part2)
w.write('\n\n')
def write_warning(w, line, xml_lines):
warning = exceptions.parse_vowarning(line)
if not warning['is_something']:
w.data(line)
else:
w.write('Line {:d}: '.format(warning['nline']))
if warning['warning']:
w.write('<a href="{}/{}">{}</a>: '.format(
online_docs_root, warning['doc_url'], warning['warning']))
msg = warning['message']
if not isinstance(warning['message'], str):
msg = msg.decode('utf-8')
w.write(xml_escape(msg))
w.write('\n')
if 1 <= warning['nline'] < len(xml_lines):
write_source_line(w, xml_lines[warning['nline'] - 1], warning['nchar'])
def write_votlint_warning(w, line, xml_lines):
match = re.search(r"(WARNING|ERROR|INFO) \(l.(?P<line>[0-9]+), c.(?P<column>[0-9]+)\): (?P<rest>.*)", line)
if match:
w.write('Line {:d}: {}\n'.format(
int(match.group('line')), xml_escape(match.group('rest'))))
write_source_line(
w, xml_lines[int(match.group('line')) - 1],
int(match.group('column')) - 1)
else:
w.data(line)
w.data('\n')
def write_result(result):
if 'network_error' in result and result['network_error'] is not None:
return
xml = result.get_xml_content()
xml_lines = xml.splitlines()
path = os.path.join(result.get_dirpath(), 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
with w.tag('p'):
with w.tag('a', href='vo.xml'):
w.data(result.url.decode('ascii'))
w.element('hr')
with w.tag('pre'):
w._flush()
for line in result['warnings']:
write_warning(w, line, xml_lines)
if result['xmllint'] is False:
w.element('hr')
w.element('p', 'xmllint results:')
content = result['xmllint_content']
if not isinstance(content, str):
content = content.decode('ascii')
content = content.replace(result.get_dirpath() + '/', '')
with w.tag('pre'):
w.data(content)
if 'votlint' in result:
if result['votlint'] is False:
w.element('hr')
w.element('p', 'votlint results:')
content = result['votlint_content']
if not isinstance(content, str):
content = content.decode('ascii')
with w.tag('pre'):
w._flush()
for line in content.splitlines():
write_votlint_warning(w, line, xml_lines)
def write_result_row(w, result):
with w.tag('tr'):
with w.tag('td'):
if ('network_error' in result and
result['network_error'] is not None):
w.data(result.url.decode('ascii'))
else:
w.element('a', result.url.decode('ascii'),
href='{}/index.html'.format(result.get_htmlpath()))
if 'network_error' in result and result['network_error'] is not None:
w.element('td', str(result['network_error']),
attrib={'class': 'red'})
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
w.element('td', '-')
else:
w.element('td', '-', attrib={'class': 'green'})
if result['nexceptions']:
cls = 'red'
msg = 'Fatal'
elif result['nwarnings']:
cls = 'yellow'
msg = str(result['nwarnings'])
else:
cls = 'green'
msg = '-'
w.element('td', msg, attrib={'class': cls})
msg = result['version']
if result['xmllint'] is None:
cls = ''
elif result['xmllint'] is False:
cls = 'red'
else:
cls = 'green'
w.element('td', msg, attrib={'class': cls})
if result['expected'] == 'good':
cls = 'green'
msg = '-'
elif result['expected'] == 'broken':
cls = 'red'
msg = 'net'
elif result['expected'] == 'incorrect':
cls = 'yellow'
msg = 'invalid'
w.element('td', msg, attrib={'class': cls})
if 'votlint' in result:
if result['votlint']:
cls = 'green'
msg = 'Passed'
else:
cls = 'red'
msg = 'Failed'
else:
cls = ''
msg = '?'
w.element('td', msg, attrib={'class': cls})
def write_table(basename, name, results, root="results", chunk_size=500):
def write_page_links(j):
if npages <= 1:
return
with w.tag('center'):
if j > 0:
w.element('a', '<< ', href='{}_{:02d}.html'.format(basename, j-1))
for i in range(npages):
if i == j:
w.data(str(i+1))
else:
w.element(
'a', str(i+1),
href='{}_{:02d}.html'.format(basename, i))
w.data(' ')
if j < npages - 1:
w.element('a', '>>', href='{}_{:02d}.html'.format(basename, j+1))
npages = int(ceil(float(len(results)) / chunk_size))
for i, j in enumerate(range(0, max(len(results), 1), chunk_size)):
subresults = results[j:j+chunk_size]
path = os.path.join(root, '{}_{:02d}.html'.format(basename, i))
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
write_page_links(i)
w.element('h2', name)
with w.tag('table'):
with w.tag('tr'):
w.element('th', 'URL')
w.element('th', 'Network')
w.element('th', 'Warnings')
w.element('th', 'Schema')
w.element('th', 'Expected')
w.element('th', 'votlint')
for result in subresults:
write_result_row(w, result)
write_page_links(i)
def add_subset(w, basename, name, subresults, inside=['p'], total=None):
with w.tag('tr'):
subresults = list(subresults)
if total is None:
total = len(subresults)
if total == 0: # pragma: no cover
percentage = 0.0
else:
percentage = (float(len(subresults)) / total)
with w.tag('td'):
for element in inside:
w.start(element)
w.element('a', name, href='{}_00.html'.format(basename))
for element in reversed(inside):
w.end(element)
numbers = '{:d} ({:.2%})'.format(len(subresults), percentage)
with w.tag('td'):
w.data(numbers)
def write_index(subsets, results, root='results'):
path = os.path.join(root, 'index.html')
with open(path, 'w', encoding='utf-8') as fd:
w = XMLWriter(fd)
with make_html_header(w):
w.element('h1', 'VO Validation results')
with w.tag('table'):
for subset in subsets:
add_subset(w, *subset, total=len(results))
def write_index_table(root, basename, name, subresults, inside=None,
total=None, chunk_size=500):
if total is None:
total = len(subresults)
percentage = (float(len(subresults)) / total)
numbers = '{:d} ({:.2%})'.format(len(subresults), percentage)
write_table(basename, name + ' ' + numbers, subresults, root, chunk_size)
|
5150a3fd7867f71102e42c00a1077009ebc1fc1db84919ead80094933e0b6a86 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from astropy.tests.helper import ignore_warnings
from astropy.utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from astropy import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg not in ['Astropy', 'astropy_helpers']:
s = "\nRunning tests with {0} version {1}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {0}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {0}.\n\n".format(" ".join(dirs))
s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += "{0}: not available\n".format(module_display)
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += "{0}: {1}\n".format(module_display, version)
# Helpers version
if 'astropy_helpers' in TESTED_VERSIONS:
astropy_helpers_version = TESTED_VERSIONS['astropy_helpers']
else:
try:
from astropy.version import astropy_helpers_version
except ImportError:
astropy_helpers_version = None
if astropy_helpers_version:
s += "astropy_helpers: {0}\n".format(astropy_helpers_version)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
0d14ee5753a27bea5348f743de970628b07fff421dd1e048ffceb3e77cf98527 | from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose, pytest
def test_assert_quantity_allclose():
assert_quantity_allclose([1, 2], [1, 2])
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm)
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm)
with pytest.raises(AssertionError) as exc:
assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm)
assert exc.value.args[0].startswith("\nNot equal to tolerance")
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm)
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200])
assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [100, 200] * u.cm)
assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3)
assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m)
assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m)
assert exc.value.args[0] == "`rtol` should be dimensionless"
|
87d71269a53a8eb09287b986a512d2691ce354ed4fe7a890e72dfc29ea7f8ec8 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import doctest
from textwrap import dedent
import pytest
# test helper.run_tests function
from astropy import test as run_tests
from astropy.tests import helper
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
run_tests(package='fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
run_tests(pastebin='not_an_option')
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
def test_unicode_literal_conversion():
assert isinstance('ångström', str)
def test_doctest_float_replacement(tmpdir):
test1 = dedent("""
This will demonstrate a doctest that fails due to a few extra decimal
places::
>>> 1.0 / 3.0
0.333333333333333311
""")
test2 = dedent("""
This is the same test, but it should pass with use of
+FLOAT_CMP::
>>> 1.0 / 3.0 # doctest: +FLOAT_CMP
0.333333333333333311
""")
test1_rst = tmpdir.join('test1.rst')
test2_rst = tmpdir.join('test2.rst')
test1_rst.write(test1)
test2_rst.write(test2)
with pytest.raises(doctest.DocTestFailure):
doctest.testfile(str(test1_rst), module_relative=False,
raise_on_error=True, verbose=False, encoding='utf-8')
doctest.testfile(str(test2_rst), module_relative=False,
raise_on_error=True, verbose=False, encoding='utf-8')
|
57fe1b39e3e287cc6ca46df10a9a72fe288ef404f8c7564fe36113d19dd4b5e0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pkgutil
import os
import types
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
from astropy.utils import find_current_module
pkgornm = find_current_module(1).__name__.split('.')[0]
if isinstance(pkgornm, str):
package = pkgutil.get_loader(pkgornm).load_module(pkgornm)
elif (isinstance(pkgornm, types.ModuleType) and
'__init__' in pkgornm.__file__):
package = pkgornm
else:
msg = 'test_imports is not determining a valid package/package name'
raise TypeError(msg)
if hasattr(package, '__path__'):
pkgpath = package.__path__
elif hasattr(package, '__file__'):
pkgpath = os.path.split(package.__file__)[0]
else:
raise AttributeError('package to generate config items for does not '
'have __file__ or __path__')
prefix = package.__name__ + '.'
def onerror(name):
# A legitimate error occurred in a module that wasn't excluded
raise
for imper, nm, ispkg in pkgutil.walk_packages(pkgpath, prefix,
onerror=onerror):
imper.find_module(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
|
85618dc3c4c4f6845db01655d3c9c5098ec850e0971b52dc547c2b3047d9d27e | import abc
import numpy as np
from astropy.timeseries import TimeSeries, BinnedTimeSeries
__all__ = ['BasePeriodogram']
class BasePeriodogram:
@abc.abstractmethod
def __init__(self, t, y, dy=None):
pass
@classmethod
def from_timeseries(cls, timeseries, signal_column_name=None, uncertainty=None, **kwargs):
"""
Initialize a periodogram from a time series object.
If a binned time series is passed, the time at the center of the bins is
used. Also note that this method automatically gets rid of NaN/undefined
values when initalizing the periodogram.
Parameters
----------
signal_column_name : str
The name of the column containing the signal values to use.
uncertainty : str or float or `~astropy.units.Quantity`, optional
The name of the column containing the errors on the signal, or the
value to use for the error, if a scalar.
**kwargs
Additional keyword arguments are passed to the initializer for this
periodogram class.
"""
if signal_column_name is None:
raise ValueError('signal_column_name should be set to a valid column name')
y = timeseries[signal_column_name]
keep = ~np.isnan(y)
if isinstance(uncertainty, str):
dy = timeseries[uncertainty]
keep &= ~np.isnan(dy)
dy = dy[keep]
else:
dy = uncertainty
if isinstance(timeseries, TimeSeries):
time = timeseries.time
elif isinstance(timeseries, BinnedTimeSeries):
time = timeseries.time_bin_center
else:
raise TypeError('Input time series should be an instance of '
'TimeSeries or BinnedTimeSeries')
return cls(time[keep], y[keep], dy=dy, **kwargs)
|
673e175d7ed3412879d850631258803a98e02bac38d3a171bd9bf45d77a7789f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy.io import registry, fits
from astropy.table import Table
from astropy.time import Time, TimeDelta
from astropy.timeseries.sampled import TimeSeries
__all__ = ["kepler_fits_reader"]
def kepler_fits_reader(filename):
"""
This serves as the FITS reader for KEPLER or TESS files within
astropy-timeseries.
This function should generally not be called directly, and instead this
time series reader should be accessed with the
:meth:`~astropy.timeseries.TimeSeries.read` method::
>>> from astropy.timeseries import TimeSeries
>>> ts = TimeSeries.read('kplr33122.fits', format='kepler.fits') # doctest: +SKIP
Parameters
----------
filename : `str` or `pathlib.Path`
File to load.
Returns
-------
ts : `~astropy.timeseries.TimeSeries`
Data converted into a TimeSeries.
"""
hdulist = fits.open(filename)
# Get the lightcurve HDU
telescope = hdulist[0].header['telescop'].lower()
if telescope == 'tess':
hdu = hdulist['LIGHTCURVE']
elif telescope == 'kepler':
hdu = hdulist[1]
else:
raise NotImplementedError("{} is not implemented, only KEPLER or TESS are "
"supported through this reader".format(hdulist[0].header['telescop']))
if hdu.header['EXTVER'] > 1:
raise NotImplementedError("Support for {0} v{1} files not yet "
"implemented".format(hdu.header['TELESCOP'], hdu.header['EXTVER']))
# Check time scale
if hdu.header['TIMESYS'] != 'TDB':
raise NotImplementedError("Support for {0} time scale not yet "
"implemented in {1} reader".format(hdu.header['TIMESYS'], hdu.header['TELESCOP']))
tab = Table.read(hdu, format='fits')
# Some KEPLER files have a T column instead of TIME.
if "T" in tab.colnames:
tab.rename_column("T", "TIME")
for colname in tab.colnames:
# Fix units
if tab[colname].unit == 'e-/s':
tab[colname].unit = 'electron/s'
if tab[colname].unit == 'pixels':
tab[colname].unit = 'pixel'
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
# Filter out NaN rows
nans = np.isnan(tab['time'].data)
if np.any(nans):
warnings.warn('Ignoring {0} rows with NaN times'.format(np.sum(nans)))
tab = tab[~nans]
# Time column is dependent on source and we correct it here
reference_date = Time(hdu.header['BJDREFI'], hdu.header['BJDREFF'],
scale=hdu.header['TIMESYS'].lower(), format='jd')
time = reference_date + TimeDelta(tab['time'].data)
time.format = 'isot'
# Remove original time column
tab.remove_column('time')
return TimeSeries(time=time, data=tab)
registry.register_reader('kepler.fits', TimeSeries, kepler_fits_reader)
registry.register_reader('tess.fits', TimeSeries, kepler_fits_reader)
|
63b5bc4314c3d80b12b20b50b69388618f5e562bf58dc1debc4d525edbebd2e7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal
from astropy import units as u
from astropy.table import Table, QTable, vstack
from astropy.time import Time
from astropy.timeseries.sampled import TimeSeries
from astropy.timeseries.binned import BinnedTimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31', '2015-01-21T12:30:32', '2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1., 2., 11.], [3, 4, 1], ['x', 'y', 'z']], names=['a', 'b', 'c'])
class CommonTimeSeriesTests:
def test_stacking(self):
ts = vstack([self.series, self.series])
assert isinstance(ts, self.series.__class__)
def test_row_slicing(self):
ts = self.series[:2]
assert isinstance(ts, self.series.__class__)
def test_row_indexing(self):
self.series[0][self.time_attr] == Time('2015-01-21T12:30:32')
self.series[self.time_attr][0] == Time('2015-01-21T12:30:32')
def test_column_indexing(self):
assert_equal(self.series['a'], [1, 2, 11])
def test_column_slicing_notime(self):
tab = self.series['a', 'b']
assert not isinstance(tab, self.series.__class__)
assert isinstance(tab, QTable)
def test_add_column(self):
self.series['d'] = [1, 2, 3]
def test_add_row(self):
self.series.add_row(self._row)
def test_required_after_stacking(self):
# When stacking, we have to temporarily relax the checking of the
# columns in the time series, but we need to make sure that the
# checking works again afterwards
ts = vstack([self.series, self.series])
with pytest.raises(ValueError) as exc:
ts.remove_columns(ts.colnames)
assert 'TimeSeries object is invalid' in exc.value.args[0]
class TestTimeSeries(CommonTimeSeriesTests):
_row = {'time': '2016-03-22T12:30:40', 'a': 1., 'b': 2, 'c': 'a'}
def setup_method(self, method):
self.series = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
self.time_attr = 'time'
def test_column_slicing(self):
ts = self.series['time', 'a']
assert isinstance(ts, TimeSeries)
class TestBinnedTimeSeries(CommonTimeSeriesTests):
_row = {'time_bin_start': '2016-03-22T12:30:40',
'time_bin_size': 2 * u.s, 'a': 1., 'b': 2, 'c': 'a'}
def setup_method(self, method):
self.series = BinnedTimeSeries(time_bin_start=INPUT_TIME,
time_bin_size=3 * u.s,
data=PLAIN_TABLE)
self.time_attr = 'time_bin_start'
def test_column_slicing(self):
ts = self.series['time_bin_start', 'time_bin_size', 'a']
assert isinstance(ts, BinnedTimeSeries)
|
53a93b9fac4ae88a9ac62a8971b0b94f3093756cdf8a82b824412444bd67d593 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_equal
from astropy import units as u
from astropy.time import Time
from astropy.timeseries.sampled import TimeSeries
from astropy.timeseries.downsample import aggregate_downsample, reduceat
INPUT_TIME = Time(['2016-03-22T12:30:31', '2016-03-22T12:30:32',
'2016-03-22T12:30:33', '2016-03-22T12:30:34'])
ts = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4]], names=['a'])
ts_units = TimeSeries(time=INPUT_TIME, data=[[1, 2, 3, 4] * u.count], names=['a'])
def test_reduceat():
add_output = np.add.reduceat(np.arange(8),[0, 4, 1, 5, 2, 6, 3, 7])
# Similar to np.add for an array input.
sum_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.sum)
assert_equal(sum_output, add_output)
mean_output = reduceat(np.arange(8), np.arange(8)[::2], np.mean)
assert_equal(mean_output, np.array([0.5, 2.5, 4.5, 6.5]))
nanmean_output = reduceat(np.arange(8), [0, 4, 1, 5, 2, 6, 3, 7], np.mean)
assert_equal(nanmean_output, np.array([1.5, 4, 2.5, 5, 3.5, 6, 4.5, 7.]))
assert_equal(reduceat(np.arange(8), np.arange(8)[::2], np.mean),
reduceat(np.arange(8), np.arange(8)[::2], np.nanmean))
def test_timeseries_invalid():
with pytest.raises(TypeError) as exc:
aggregate_downsample(None)
assert exc.value.args[0] == ("time_series should be a TimeSeries")
with pytest.raises(TypeError) as exc:
aggregate_downsample(TimeSeries())
assert exc.value.args[0] == ("time_bin_size should be a astropy.unit quantity")
def test_downsample():
down_1 = aggregate_downsample(ts, time_bin_size=1*u.second)
u.isclose(down_1.time_bin_size, [1, 1, 1, 1]*u.second)
assert_equal(down_1.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:32.000',
'2016-03-22T12:30:33.000', '2016-03-22T12:30:34.000']))
assert_equal(down_1["a"].data, np.array([1, 2, 3, 4]))
down_2 = aggregate_downsample(ts, time_bin_size=2*u.second)
u.isclose(down_2.time_bin_size, [2, 2]*u.second)
assert_equal(down_2.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:33.000']))
assert_equal(down_2["a"].data, np.array([1, 3]))
down_3 = aggregate_downsample(ts, time_bin_size=3*u.second)
u.isclose(down_3.time_bin_size, [3, 3]*u.second)
assert_equal(down_3.time_bin_start.isot, Time(['2016-03-22T12:30:31.000', '2016-03-22T12:30:34.000']))
assert_equal(down_3["a"].data, np.array([2, 4]))
down_4 = aggregate_downsample(ts, time_bin_size=4*u.second)
u.isclose(down_4.time_bin_size, [4]*u.second)
assert_equal(down_4.time_bin_start.isot, Time(['2016-03-22T12:30:31.000']))
assert_equal(down_4["a"].data, np.array([2]))
down_units = aggregate_downsample(ts_units, time_bin_size=4*u.second)
u.isclose(down_units.time_bin_size, [4]*u.second)
assert_equal(down_units.time_bin_start.isot, Time(['2016-03-22T12:30:31.000']))
assert down_units["a"].unit.name == 'ct'
assert_equal(down_units["a"].data, np.array([2.5]))
|
ec1d9ac34ca9a35fd566f5603f9070aa041415e604bf6203427735724d427da8 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.table import Table, Column
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.utils.data import get_pkg_data_filename
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
INPUT_TIME = Time(['2016-03-22T12:30:31',
'2015-01-21T12:30:32',
'2016-03-22T12:30:40'])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=['a', 'b', 'c'])
CSV_FILE = get_pkg_data_filename('data/sampled.csv')
def test_empty_initialization():
ts = TimeSeries()
ts['time'] = Time([1, 2, 3], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'flux'")
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts['time'] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts['a'], [10, 2, 3])
assert_equal(ts['b'], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(TypeError) as exc:
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Either 'time' or 'time_start' should be specified"
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ['time', 'a', 'b', 'c']
def test_initialization_with_time_delta():
ts = TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format='sec'),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert_equal(ts.time.isot, ['2018-07-01T10:10:10.000',
'2018-07-01T10:10:13.000',
'2018-07-01T10:10:16.000'])
def test_initialization_missing_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time' is scalar, so 'time_delta' is required"
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "Cannot specify both 'time' and 'time_start'"
def test_initialization_invalid_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]], names=['a', 'b'])
assert exc.value.args[0] == "'time_delta' should be a Quantity or a TimeDelta"
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data['time'] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == set(['time', 'a', 'b', 'c'])
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert set(ts2.colnames) == set(['time', 'a'])
assert all(ts2.time == INPUT_TIME)
with pytest.raises(TypeError) as exc:
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError) as exc:
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert exc.value.args[0] == "'time' has been given both in the table and as a keyword argument"
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
assert exc.value.args[0] == ("'n_samples' has been given both and it is not the "
"same length as the input data.")
def test_initialization_length_mismatch():
with pytest.raises(ValueError) as exc:
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=['a', 'b'])
assert exc.value.args[0] == "Length of 'time' (3) should match data length (2)"
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(TypeError) as exc:
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format='sec'))
assert exc.value.args[0] == ("'time_delta' should not be specified since "
"'time' is an array")
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format='unix')
ts = TimeSeries(time=times)
ts['flux'] = [1, 4, 4, 3, 2, 3]
# Try without midpoint epoch, as it should default to the first time
tsf = ts.fold(period=3 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1, 1, -1, -1], rtol=1e-6)
# Try with midpoint epoch
tsf = ts.fold(period=4 * u.s, midpoint_epoch=Time(2.5, format='unix'))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-1.5, -0.5, 0.5, 1.5, -1.5, 1.5], rtol=1e-6)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1['a'] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ['time', 'a']
assert len(ts.indices) == 1
assert (ts.indices['time'].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale='tcb')
assert ts_tcb.time.scale == 'tcb'
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(['a'])
assert (df1['a'] == df2['a']).all()
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(None)
assert exc.value.args[0] == 'Input should be a pandas DataFrame'
df4 = pandas.DataFrame()
df4['a'] = [1, 2, 3]
with pytest.raises(TypeError) as exc:
TimeSeries.from_pandas(df4)
assert exc.value.args[0] == 'DataFrame does not have a DatetimeIndex'
def test_read_time_missing():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_column`` should be provided since the default Table readers are being used.'
def test_read_time_wrong():
with pytest.raises(ValueError) as exc:
TimeSeries.read(CSV_FILE, time_column='abc', format='csv')
assert exc.value.args[0] == "Time column 'abc' not found in the input data."
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column='Date', format='csv')
assert timeseries.colnames == ['time', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
assert len(timeseries) == 11
assert timeseries['time'].format == 'iso'
assert timeseries['A'].sum() == 266.5
@pytest.mark.remote_data(source='astropy')
def test_kepler_astropy():
filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits')
timeseries = TimeSeries.read(filename, format='kepler.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source='astropy')
def test_tess_astropy():
filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits')
timeseries = TimeSeries.read(filename, format='tess.fits')
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
with pytest.raises(ValueError) as exc:
ts.copy().add_column(Column([3, 4, 5], name='c'), index=0)
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'c'")
with pytest.raises(ValueError) as exc:
ts.copy().add_columns([Column([3, 4, 5], name='d'),
Column([3, 4, 5], name='e')], indexes=[0, 1])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'd'")
with pytest.raises(ValueError) as exc:
ts.copy().keep_columns(['a', 'b'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_column('time')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'a'")
with pytest.raises(ValueError) as exc:
ts.copy().remove_columns(['time', 'a'])
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'b'")
with pytest.raises(ValueError) as exc:
ts.copy().rename_column('time', 'banana')
assert exc.value.args[0] == ("TimeSeries object is invalid - expected "
"'time' as the first column but found 'banana'")
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME,
data=[[10, 2, 3], [4, 5, 6]],
names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
e0a7c57b683ad28f4fc5f0a6a2521a07bf2908efac576fa7c00bd49f767180c0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.utils.data import get_pkg_data_filename
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.binned import BinnedTimeSeries
from astropy.tests.helper import assert_quantity_allclose
CSV_FILE = get_pkg_data_filename('data/binned.csv')
def test_empty_initialization():
ts = BinnedTimeSeries()
ts['time_bin_start'] = Time([1, 2, 3], format='mjd')
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = BinnedTimeSeries()
with pytest.raises(ValueError) as exc:
ts['flux'] = [1, 2, 3]
assert exc.value.args[0] == ("BinnedTimeSeries object is invalid - expected "
"'time_bin_start' as the first column but found 'flux'")
def test_initialization_time_bin_invalid():
# Make sure things crash when time_bin_* is passed incorrectly.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data=[[1, 4, 3]])
assert exc.value.args[0] == ("'time_bin_start' has not been specified")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31', data=[[1, 4, 3]])
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_initialization_time_bin_both():
# Make sure things crash when time_bin_* is passed twice.
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_start": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31")
assert exc.value.args[0] == ("'time_bin_start' has been given both in the table "
"and as a keyword argument")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time_bin_size": ["2016-03-22T12:30:31"]},
time_bin_size=[1]*u.s)
assert exc.value.args[0] == ("'time_bin_size' has been given both in the table "
"and as a keyword argument")
def test_initialization_time_bin_size():
# Make sure things crash when time_bin_size has no units
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=1)
assert exc.value.args[0] == ("'time_bin_size' should be a Quantity or a TimeDelta")
# TimeDelta for time_bin_size
ts = BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start="2016-03-22T12:30:31",
time_bin_size=TimeDelta(1))
assert isinstance(ts.time_bin_size, u.quantity.Quantity)
def test_initialization_time_bin_start_scalar():
# Make sure things crash when time_bin_start is a scalar with no time_bin_size
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("'time_bin_start' is scalar, so 'time_bin_size' is required")
def test_initialization_n_bins():
# Make sure things crash with incorrect n_bins
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=Time(1, format='mjd'),
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'),
n_bins=10)
assert exc.value.args[0] == ("'n_bins' has been given and it is not the "
"same length as the input data.")
def test_initialization_non_scalar_time():
# Make sure things crash with incorrect size of time_bin_start
with pytest.raises(ValueError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31", "2016-03-22T12:30:32"],
time_bin_size=1*u.s,
time_bin_end=Time(1, format='mjd'))
assert exc.value.args[0] == ("Length of 'time_bin_start' (2) should match table length (1)")
with pytest.raises(TypeError) as exc:
BinnedTimeSeries(data={"time": ["2016-03-22T12:30:31"]},
time_bin_start=["2016-03-22T12:30:31"],
time_bin_size=None,
time_bin_end=None)
assert exc.value.args[0] == ("Either 'time_bin_size' or 'time_bin_end' should be specified")
def test_even_contiguous():
# Initialize a ``BinnedTimeSeries`` with even contiguous bins by specifying
# the bin width:
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:32.500',
'2016-03-22T12:30:35.500',
'2016-03-22T12:30:38.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:34.000',
'2016-03-22T12:30:37.000',
'2016-03-22T12:30:40.000'])
def test_uneven_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven contiguous bins by giving an
# end time:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:32',
'2016-03-22T12:30:40'],
time_bin_end='2016-03-22T12:30:55',
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:36.000',
'2016-03-22T12:30:47.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:40.000',
'2016-03-22T12:30:55.000'])
def test_uneven_non_contiguous():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins with
# lists of start times, bin sizes and data:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:38',
'2016-03-22T12:34:40'],
time_bin_size=[5, 100, 2]*u.s,
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:38.000',
'2016-03-22T12:34:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:33.500',
'2016-03-22T12:31:28.000',
'2016-03-22T12:34:41.000'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:36.000',
'2016-03-22T12:32:18.000',
'2016-03-22T12:34:42.000'])
def test_uneven_non_contiguous_full():
# Initialize a ``BinnedTimeSeries`` with uneven non-contiguous bins by
# specifying the start and end times for the bins:
ts = BinnedTimeSeries(time_bin_start=['2016-03-22T12:30:31',
'2016-03-22T12:30:33',
'2016-03-22T12:30:40'],
time_bin_end=['2016-03-22T12:30:32',
'2016-03-22T12:30:35',
'2016-03-22T12:30:41'],
data=[[1, 4, 3]])
assert_equal(ts.time_bin_start.isot, ['2016-03-22T12:30:31.000',
'2016-03-22T12:30:33.000',
'2016-03-22T12:30:40.000'])
assert_equal(ts.time_bin_center.isot, ['2016-03-22T12:30:31.500',
'2016-03-22T12:30:34.000',
'2016-03-22T12:30:40.500'])
assert_equal(ts.time_bin_end.isot, ['2016-03-22T12:30:32.000',
'2016-03-22T12:30:35.000',
'2016-03-22T12:30:41.000'])
def test_read_empty():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, format='csv')
assert exc.value.args[0] == '``time_bin_start_column`` should be provided since the default Table readers are being used.'
def test_read_no_size_end():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', format='csv')
assert exc.value.args[0] == 'Either `time_bin_end_column` or `time_bin_size_column` should be provided.'
def test_read_both_extra_bins():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column='END', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "Cannot specify both `time_bin_end_column` and `time_bin_size_column`."
def test_read_size_no_unit():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column='bin_size', format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read_start_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='abc', time_bin_size_column='bin_size', time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin start time column 'abc' not found in the input data."
def test_read_end_time_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_end_column="missing", format='csv')
assert exc.value.args[0] == "Bin end time column 'missing' not found in the input data."
def test_read_size_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="missing", time_bin_size_unit=u.second, format='csv')
assert exc.value.args[0] == "Bin size column 'missing' not found in the input data."
def test_read_time_unit_missing():
with pytest.raises(ValueError) as exc:
BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start', time_bin_size_column="bin_size", format='csv')
assert exc.value.args[0] == "The bin size unit should be specified as an astropy Unit using ``time_bin_size_unit``."
def test_read():
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_end_column='time_end', format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'bin_size', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
timeseries = BinnedTimeSeries.read(CSV_FILE, time_bin_start_column='time_start',
time_bin_size_column='bin_size',
time_bin_size_unit=u.second, format='csv')
assert timeseries.colnames == ['time_bin_start', 'time_bin_size', 'time_end', 'A', 'B', 'C', 'D', 'E', 'F']
assert len(timeseries) == 10
assert timeseries['B'].sum() == 1151.54
@pytest.mark.parametrize('cls', [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = BinnedTimeSeries(time_bin_start='2016-03-22T12:30:31',
time_bin_size=3 * u.s, data=[[1, 4, 3], [3, 4, 3]], names=['a', 'b'])
p1 = cls.from_timeseries(ts, 'a')
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time_bin_center.jd)
assert_equal(p1.y, ts['a'])
assert p1.dy is None
p2 = cls.from_timeseries(ts, 'a', uncertainty='b')
assert_quantity_allclose(p2.dy, ts['b'])
p3 = cls.from_timeseries(ts, 'a', uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
bd61cf90b214f0ea5e859ba6d54b3b8764e55a0b05ec14eb203baeb22c980b91 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ["BoxLeastSquares", "BoxLeastSquaresResults"]
import numpy as np
from astropy import units
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms.lombscargle.core import has_units, strip_units
from astropy import units as u
from . import methods
from astropy.timeseries.periodograms.base import BasePeriodogram
def validate_unit_consistency(reference_object, input_object):
if has_units(reference_object):
input_object = units.Quantity(input_object, unit=reference_object.unit)
else:
if has_units(input_object):
input_object = units.Quantity(input_object, unit=units.one)
input_object = input_object.value
return input_object
class BoxLeastSquares(BasePeriodogram):
"""Compute the box least squares periodogram
This method is a commonly used tool for discovering transiting exoplanets
or eclipsing binaries in photometric time series datasets. This
implementation is based on the "box least squares (BLS)" method described
in [1]_ and [2]_.
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times ``t``.
dy : float, array-like or `~astropy.units.Quantity`, optional
Error or sequence of observational errors associated with times ``t``.
Examples
--------
Generate noisy data with a transit:
>>> rand = np.random.RandomState(42)
>>> t = rand.uniform(0, 10, 500)
>>> y = np.ones_like(t)
>>> y[np.abs((t + 1.0)%2.0-1)<0.08] = 1.0 - 0.1
>>> y += 0.01 * rand.randn(len(t))
Compute the transit periodogram on a heuristically determined period grid
and find the period with maximum power:
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16)
>>> results.period[np.argmax(results.power)] # doctest: +FLOAT_CMP
1.9923406038842544
Compute the periodogram on a user-specified period grid:
>>> periods = np.linspace(1.9, 2.1, 5)
>>> results = model.power(periods, 0.16)
>>> results.power # doctest: +FLOAT_CMP
array([0.01421067, 0.02842475, 0.10867671, 0.05117755, 0.01783253])
If the inputs are AstroPy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.day
>>> y = y * u.dimensionless_unscaled
>>> model = BoxLeastSquares(t, y)
>>> results = model.autopower(0.16 * u.day)
>>> results.period.unit
Unit("d")
>>> results.power.unit
Unit(dimensionless)
References
----------
.. [1] Kovacs, Zucker, & Mazeh (2002), A&A, 391, 369
(arXiv:astro-ph/0206099)
.. [2] Hartman & Bakos (2016), Astronomy & Computing, 17, 1
(arXiv:1605.06811)
"""
def __init__(self, t, y, dy=None):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
def autoperiod(self, duration,
minimum_period=None, maximum_period=None,
minimum_n_transit=3, frequency_factor=1.0):
"""Determine a suitable grid of periods
This method uses a set of heuristics to select a conservative period
grid that is uniform in frequency. This grid might be too fine for
some user's needs depending on the precision requirements or the
sampling of the data. The grid can be made coarser by increasing
``frequency_factor``.
Parameters
----------
duration : float, array-like or `~astropy.units.Quantity`
The set of durations that will be considered.
minimum_period, maximum_period : float or `~astropy.units.Quantity`, optional
The minimum/maximum periods to search. If not provided, these will
be computed as described in the notes below.
minimum_n_transits : int, optional
If ``maximum_period`` is not provided, this is used to compute the
maximum period to search by asserting that any systems with at
least ``minimum_n_transits`` will be within the range of searched
periods. Note that this is not the same as requiring that
``minimum_n_transits`` be required for detection. The default
value is ``3``.
frequency_factor : float, optional
A factor to control the frequency spacing as described in the
notes below. The default value is ``1.0``.
Returns
-------
period : array-like or `~astropy.units.Quantity`
The set of periods computed using these heuristics with the same
units as ``t``.
Notes
-----
The default minimum period is chosen to be twice the maximum duration
because there won't be much sensitivity to periods shorter than that.
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(t) - min(t)) / minimum_n_transits
ensuring that any systems with at least ``minimum_n_transits`` are
within the range of searched periods.
The frequency spacing is given by
.. code-block:: python
df = frequency_factor * min(duration) / (max(t) - min(t))**2
so the grid can be made finer by decreasing ``frequency_factor`` or
coarser by increasing ``frequency_factor``.
"""
duration = self._validate_duration(duration)
baseline = strip_units((self._trel.max() - self._trel.min()))
min_duration = strip_units(np.min(duration))
# Estimate the required frequency spacing
# Because of the sparsity of a transit, this must be much finer than
# the frequency resolution for a sinusoidal fit. For a sinusoidal fit,
# df would be 1/baseline (see LombScargle), but here this should be
# scaled proportionally to the duration in units of baseline.
df = frequency_factor * min_duration / baseline**2
# If a minimum period is not provided, choose one that is twice the
# maximum duration because we won't be sensitive to any periods
# shorter than that.
if minimum_period is None:
minimum_period = 2.0 * strip_units(np.max(duration))
else:
minimum_period = validate_unit_consistency(self._trel, minimum_period)
minimum_period = strip_units(minimum_period)
# If no maximum period is provided, choose one by requiring that
# all signals with at least minimum_n_transit should be detectable.
if maximum_period is None:
if minimum_n_transit <= 1:
raise ValueError("minimum_n_transit must be greater than 1")
maximum_period = baseline / (minimum_n_transit-1)
else:
maximum_period = validate_unit_consistency(self._trel, maximum_period)
maximum_period = strip_units(maximum_period)
if maximum_period < minimum_period:
minimum_period, maximum_period = maximum_period, minimum_period
if minimum_period <= 0.0:
raise ValueError("minimum_period must be positive")
# Convert bounds to frequency
minimum_frequency = 1.0/strip_units(maximum_period)
maximum_frequency = 1.0/strip_units(minimum_period)
# Compute the number of frequencies and the frequency grid
nf = 1 + int(np.round((maximum_frequency - minimum_frequency)/df))
return 1.0/(maximum_frequency-df*np.arange(nf)) * self._t_unit()
def autopower(self, duration, objective=None, method=None, oversample=10,
minimum_n_transit=3, minimum_period=None,
maximum_period=None, frequency_factor=1.0):
"""Compute the periodogram at set of heuristically determined periods
This method calls :func:`BoxLeastSquares.autoperiod` to determine
the period grid and then :func:`BoxLeastSquares.power` to compute
the periodogram. See those methods for documentation of the arguments.
"""
period = self.autoperiod(duration,
minimum_n_transit=minimum_n_transit,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor)
return self.power(period, duration, objective=objective, method=method,
oversample=oversample)
def power(self, period, duration, objective=None, method=None,
oversample=10):
"""Compute the periodogram for a set of periods
Parameters
----------
period : array-like or `~astropy.units.Quantity`
The periods where the power should be computed
duration : float, array-like or `~astropy.units.Quantity`
The set of durations to test
objective : {'likelihood', 'snr'}, optional
The scalar that should be optimized to find the best fit phase,
duration, and depth. This can be either ``'likelihood'`` (default)
to optimize the log-likelihood of the model, or ``'snr'`` to
optimize the signal-to-noise with which the transit depth is
measured.
method : {'fast', 'slow'}, optional
The computational method used to compute the periodogram. This is
mainly included for the purposes of testing and most users will
want to use the optimized ``'fast'`` method (default) that is
implemented in Cython. ``'slow'`` is a brute-force method that is
used to test the results of the ``'fast'`` method.
oversample : int, optional
The number of bins per duration that should be used. This sets the
time resolution of the phase fit with larger values of
``oversample`` yielding a finer grid and higher computational cost.
Returns
-------
results : BoxLeastSquaresResults
The periodogram results as a :class:`BoxLeastSquaresResults`
object.
Raises
------
ValueError
If ``oversample`` is not an integer greater than 0 or if
``objective`` or ``method`` are not valid.
"""
period, duration = self._validate_period_and_duration(period, duration)
# Check for absurdities in the ``oversample`` choice
try:
oversample = int(oversample)
except TypeError:
raise ValueError("oversample must be an int, got {0}"
.format(oversample))
if oversample < 1:
raise ValueError("oversample must be greater than or equal to 1")
# Select the periodogram objective
if objective is None:
objective = "likelihood"
allowed_objectives = ["snr", "likelihood"]
if objective not in allowed_objectives:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(objective, allowed_objectives))
use_likelihood = (objective == "likelihood")
# Select the computational method
if method is None:
method = "fast"
allowed_methods = ["fast", "slow"]
if method not in allowed_methods:
raise ValueError(("Unrecognized method '{0}'\n"
"allowed methods are: {1}")
.format(method, allowed_methods))
# Format and check the input arrays
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Make sure that the period and duration arrays are C-order
period_fmt = np.ascontiguousarray(strip_units(period),
dtype=np.float64)
duration = np.ascontiguousarray(strip_units(duration),
dtype=np.float64)
# Select the correct implementation for the chosen method
if method == "fast":
bls = methods.bls_fast
else:
bls = methods.bls_slow
# Run the implementation
results = bls(
t, y - np.median(y), ivar, period_fmt, duration,
oversample, use_likelihood)
return self._format_results(objective, period, results)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{0} was provided as an absolute time but '
'the BoxLeastSquares class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{0} was provided as a relative time but '
'the BoxLeastSquares class was initialized '
'with absolute times.'.format(name))
times = validate_unit_consistency(self._trel, times)
return times
def _as_absolute_time_if_needed(self, name, times):
"""
Convert the provided times to absolute times using the current _tstart
value, if needed.
"""
if self._tstart is not None:
# Some time formats/scales can't represent dates/times too far
# off from the present, so we need to mask values offset by
# more than 100,000 yr (the periodogram algorithm can return
# transit times of e.g 1e300 for some periods).
reset = np.abs(times.to_value(u.year)) > 100000
times[reset] = 0
times = self._tstart + times
times[reset] = np.nan
return times
def model(self, t_model, period, duration, transit_time):
"""Compute the transit model at the given period, duration, and phase
Parameters
----------
t_model : array-like or `~astropy.units.Quantity` or `~astropy.time.Time`
Times at which to compute the model.
period : float or `~astropy.units.Quantity`
The period of the transits.
duration : float or `~astropy.units.Quantity`
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
y_model : array-like or `~astropy.units.Quantity`
The model evaluated at the times ``t_model`` with units of ``y``.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t_model = strip_units(self._as_relative_time('t_model', t_model))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# Compute the depth
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
y_in = np.sum(y[m_in] * ivar[m_in]) / np.sum(ivar[m_in])
y_out = np.sum(y[m_out] * ivar[m_out]) / np.sum(ivar[m_out])
# Evaluate the model
y_model = y_out + np.zeros_like(t_model)
m_model = np.abs((t_model-transit_time+hp) % period-hp) < 0.5*duration
y_model[m_model] = y_in
return y_model * self._y_unit()
def compute_stats(self, period, duration, transit_time):
"""Compute descriptive statistics for a given transit model
These statistics are commonly used for vetting of transit candidates.
Parameters
----------
period : float or `~astropy.units.Quantity`
The period of the transits.
duration : float or `~astropy.units.Quantity`
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
stats : dict
A dictionary containing several descriptive statistics:
- ``depth``: The depth and uncertainty (as a tuple with two
values) on the depth for the fiducial model.
- ``depth_odd``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period.
- ``depth_even``: The depth and uncertainty on the depth for a
model where the period is twice the fiducial period and the
phase is offset by one orbital period.
- ``depth_half``: The depth and uncertainty for a model with a
period of half the fiducial period.
- ``depth_phased``: The depth and uncertainty for a model with the
fiducial period and the phase offset by half a period.
- ``harmonic_amplitude``: The amplitude of the best fit sinusoidal
model.
- ``harmonic_delta_log_likelihood``: The difference in log
likelihood between a sinusoidal model and the transit model.
If ``harmonic_delta_log_likelihood`` is greater than zero, the
sinusoidal model is preferred.
- ``transit_times``: The mid-transit time for each transit in the
baseline.
- ``per_transit_count``: An array with a count of the number of
data points in each unique transit included in the baseline.
- ``per_transit_log_likelihood``: An array with the value of the
log likelihood for each unique transit included in the
baseline.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
t = np.ascontiguousarray(strip_units(self._trel), dtype=np.float64)
y = np.ascontiguousarray(strip_units(self.y), dtype=np.float64)
if self.dy is None:
ivar = np.ones_like(y)
else:
ivar = 1.0 / np.ascontiguousarray(strip_units(self.dy),
dtype=np.float64)**2
# This a helper function that will compute the depth for several
# different hypothesized transit models with different parameters
def _compute_depth(m, y_out=None, var_out=None):
if np.any(m) and (var_out is None or np.isfinite(var_out)):
var_m = 1.0 / np.sum(ivar[m])
y_m = np.sum(y[m] * ivar[m]) * var_m
if y_out is None:
return y_m, var_m
return y_out - y_m, np.sqrt(var_m + var_out)
return 0.0, np.inf
# Compute the depth of the fiducial model and the two models at twice
# the period
hp = 0.5*period
m_in = np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
m_out = ~m_in
m_odd = np.abs((t-transit_time) % (2*period) - period) \
< 0.5*duration
m_even = np.abs((t-transit_time+period) % (2*period) - period) \
< 0.5*duration
y_out, var_out = _compute_depth(m_out)
depth = _compute_depth(m_in, y_out, var_out)
depth_odd = _compute_depth(m_odd, y_out, var_out)
depth_even = _compute_depth(m_even, y_out, var_out)
y_in = y_out - depth[0]
# Compute the depth of the model at a phase of 0.5*period
m_phase = np.abs((t-transit_time) % period - hp) < 0.5*duration
depth_phase = _compute_depth(m_phase,
*_compute_depth((~m_phase) & m_out))
# Compute the depth of a model with a period of 0.5*period
m_half = np.abs((t-transit_time+0.25*period) % (0.5*period)
- 0.25*period) < 0.5*duration
depth_half = _compute_depth(m_half, *_compute_depth(~m_half))
# Compute the number of points in each transit
transit_id = np.round((t[m_in]-transit_time) / period).astype(int)
transit_times = period * np.arange(transit_id.min(),
transit_id.max()+1) + transit_time
unique_ids, unique_counts = np.unique(transit_id,
return_counts=True)
unique_ids -= np.min(transit_id)
transit_id -= np.min(transit_id)
counts = np.zeros(np.max(transit_id) + 1, dtype=int)
counts[unique_ids] = unique_counts
# Compute the per-transit log likelihood
ll = -0.5 * ivar[m_in] * ((y[m_in] - y_in)**2 - (y[m_in] - y_out)**2)
lls = np.zeros(len(counts))
for i in unique_ids:
lls[i] = np.sum(ll[transit_id == i])
full_ll = -0.5*np.sum(ivar[m_in] * (y[m_in] - y_in)**2)
full_ll -= 0.5*np.sum(ivar[m_out] * (y[m_out] - y_out)**2)
# Compute the log likelihood of a sine model
A = np.vstack((
np.sin(2*np.pi*t/period), np.cos(2*np.pi*t/period),
np.ones_like(t)
)).T
w = np.linalg.solve(np.dot(A.T, A * ivar[:, None]),
np.dot(A.T, y * ivar))
mod = np.dot(A, w)
sin_ll = -0.5*np.sum((y-mod)**2*ivar)
# Format the results
y_unit = self._y_unit()
ll_unit = 1
if self.dy is None:
ll_unit = y_unit * y_unit
return dict(
transit_times=self._as_absolute_time_if_needed('transit_times', transit_times * self._t_unit()),
per_transit_count=counts,
per_transit_log_likelihood=lls * ll_unit,
depth=(depth[0] * y_unit, depth[1] * y_unit),
depth_phased=(depth_phase[0] * y_unit, depth_phase[1] * y_unit),
depth_half=(depth_half[0] * y_unit, depth_half[1] * y_unit),
depth_odd=(depth_odd[0] * y_unit, depth_odd[1] * y_unit),
depth_even=(depth_even[0] * y_unit, depth_even[1] * y_unit),
harmonic_amplitude=np.sqrt(np.sum(w[:2]**2)) * y_unit,
harmonic_delta_log_likelihood=(sin_ll - full_ll) * ll_unit,
)
def transit_mask(self, t, period, duration, transit_time):
"""Compute which data points are in transit for a given parameter set
Parameters
----------
t_model : array-like or `~astropy.units.Quantity`
Times where the mask should be evaluated.
period : float or `~astropy.units.Quantity`
The period of the transits.
duration : float or `~astropy.units.Quantity`
The duration of the transit.
transit_time : float or `~astropy.units.Quantity` or `~astropy.time.Time`
The mid-transit time of a reference transit.
Returns
-------
transit_mask : array-like
A boolean array where ``True`` indicates and in transit point and
``False`` indicates and out-of-transit point.
"""
period, duration = self._validate_period_and_duration(period, duration)
transit_time = self._as_relative_time('transit_time', transit_time)
t = strip_units(self._as_relative_time('t', t))
period = float(strip_units(period))
duration = float(strip_units(duration))
transit_time = float(strip_units(transit_time))
hp = 0.5*period
return np.abs((t-transit_time+hp) % period - hp) < 0.5*duration
def _validate_inputs(self, t, y, dy):
"""Private method used to check the consistency of the inputs
Parameters
----------
t : array-like, `~astropy.units.Quantity`, `~astropy.time.Time`, or `~astropy.time.TimeDelta`
Sequence of observation times.
y : array-like or `~astropy.units.Quantity`
Sequence of observations associated with times t.
dy : float, array-like or `~astropy.units.Quantity`
Error or sequence of observational errors associated with times t.
Returns
-------
t, y, dy : array-like or `~astropy.units.Quantity` or `~astropy.time.Time`
The inputs with consistent shapes and units.
Raises
------
ValueError
If the dimensions are incompatible or if the units of dy cannot be
converted to the units of y.
"""
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if dy is not None:
dy = validate_unit_consistency(y, dy)
return t, y, dy
def _validate_duration(self, duration):
"""Private method used to check a set of test durations
Parameters
----------
duration : float, array-like or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
duration : array-like or `~astropy.units.Quantity`
The input reformatted with the correct shape and units.
Raises
------
ValueError
If the units of duration cannot be converted to the units of t.
"""
duration = np.atleast_1d(np.abs(duration))
if duration.ndim != 1 or duration.size == 0:
raise ValueError("duration must be 1-dimensional")
return validate_unit_consistency(self._trel, duration)
def _validate_period_and_duration(self, period, duration):
"""Private method used to check a set of periods and durations
Parameters
----------
period : float, array-like or `~astropy.units.Quantity`
The set of test periods.
duration : float, array-like or `~astropy.units.Quantity`
The set of durations that will be considered.
Returns
-------
period, duration : array-like or `~astropy.units.Quantity`
The inputs reformatted with the correct shapes and units.
Raises
------
ValueError
If the units of period or duration cannot be converted to the
units of t.
"""
duration = self._validate_duration(duration)
period = np.atleast_1d(np.abs(period))
if period.ndim != 1 or period.size == 0:
raise ValueError("period must be 1-dimensional")
period = validate_unit_consistency(self._trel, period)
if not np.min(period) > np.max(duration):
raise ValueError("The maximum transit duration must be shorter "
"than the minimum period")
return period, duration
def _format_results(self, objective, period, results):
"""A private method used to wrap and add units to the periodogram
Parameters
----------
objective : string
The name of the objective used in the optimization.
period : array-like or `~astropy.units.Quantity`
The set of trial periods.
results : tuple
The output of one of the periodogram implementations.
"""
(power, depth, depth_err, duration, transit_time, depth_snr,
log_likelihood) = results
if has_units(self._trel):
transit_time = units.Quantity(transit_time, unit=self._trel.unit)
transit_time = self._as_absolute_time_if_needed('transit_time', transit_time)
duration = units.Quantity(duration, unit=self._trel.unit)
if has_units(self.y):
depth = units.Quantity(depth, unit=self.y.unit)
depth_err = units.Quantity(depth_err, unit=self.y.unit)
depth_snr = units.Quantity(depth_snr, unit=units.one)
if self.dy is None:
if objective == "likelihood":
power = units.Quantity(power, unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood,
unit=self.y.unit**2)
else:
power = units.Quantity(power, unit=units.one)
log_likelihood = units.Quantity(log_likelihood, unit=units.one)
return BoxLeastSquaresResults(
objective, period, power, depth, depth_err, duration, transit_time,
depth_snr, log_likelihood)
def _t_unit(self):
if has_units(self._trel):
return self._trel.unit
else:
return 1
def _y_unit(self):
if has_units(self.y):
return self.y.unit
else:
return 1
class BoxLeastSquaresResults(dict):
"""The results of a BoxLeastSquares search
Attributes
----------
objective : string
The scalar used to optimize to find the best fit phase, duration, and
depth. See :func:`BoxLeastSquares.power` for more information.
period : array-like or `~astropy.units.Quantity`
The set of test periods.
power : array-like or `~astropy.units.Quantity`
The periodogram evaluated at the periods in ``period``. If
``objective`` is:
* ``'likelihood'``: the values of ``power`` are the
log likelihood maximized over phase, depth, and duration, or
* ``'snr'``: the values of ``power`` are the signal-to-noise with
which the depth is measured maximized over phase, depth, and
duration.
depth : array-like or `~astropy.units.Quantity`
The estimated depth of the maximum power model at each period.
depth_err : array-like or `~astropy.units.Quantity`
The 1-sigma uncertainty on ``depth``.
duration : array-like or `~astropy.units.Quantity`
The maximum power duration at each period.
transit_time : array-like or `~astropy.units.Quantity` or `~astropy.time.Time`
The maximum power phase of the transit in units of time. This
indicates the mid-transit time and it will always be in the range
(0, period).
depth_snr : array-like or `~astropy.units.Quantity`
The signal-to-noise with which the depth is measured at maximum power.
log_likelihood : array-like or `~astropy.units.Quantity`
The log likelihood of the maximum power model.
"""
def __init__(self, *args):
super().__init__(zip(
("objective", "period", "power", "depth", "depth_err",
"duration", "transit_time", "depth_snr", "log_likelihood"),
args
))
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __repr__(self):
if self.keys():
m = max(map(len, list(self.keys()))) + 1
return '\n'.join([k.rjust(m) + ': ' + repr(v)
for k, v in sorted(self.items())])
else:
return self.__class__.__name__ + "()"
def __dir__(self):
return list(self.keys())
|
8da5aea55cbf0b7a14a4d68ef57a3aa17cb363f0ba09e31bca13cef3064ca600 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from os.path import join
from distutils.core import Extension
BLS_ROOT = os.path.relpath(os.path.dirname(__file__))
def get_extensions():
ext = Extension(
"astropy.timeseries.periodograms.bls._impl",
sources=[
join(BLS_ROOT, "bls.c"),
join(BLS_ROOT, "_impl.pyx"),
],
include_dirs=["numpy"],
)
return [ext]
|
eca2013c8d737860562b7f9c453c9e4c9ef9d7068d3e534df2a7b451414cc63f | """
Utilities for computing periodogram statistics.
This is an internal module; users should access this functionality via the
``false_alarm_probability`` and ``false_alarm_level`` methods of the
``astropy.timeseries.LombScargle`` API.
"""
from functools import wraps
import numpy as np
def _weighted_sum(val, dy):
if dy is not None:
return (val / dy ** 2).sum()
else:
return val.sum()
def _weighted_mean(val, dy):
if dy is None:
return val.mean()
else:
return _weighted_sum(val, dy) / _weighted_sum(np.ones_like(val), dy)
def _weighted_var(val, dy):
return _weighted_mean(val ** 2, dy) - _weighted_mean(val, dy) ** 2
def _gamma(N):
from scipy.special import gammaln
# Note: this is closely approximated by (1 - 0.75 / N) for large N
return np.sqrt(2 / N) * np.exp(gammaln(N / 2) - gammaln((N - 1) / 2))
def _log_gamma(N):
from scipy.special import gammaln
return 0.5 * np.log(2 / N) + gammaln(N / 2) - gammaln((N - 1) / 2)
def vectorize_first_argument(func):
@wraps(func)
def new_func(x, *args, **kwargs):
x = np.asarray(x)
return np.array([func(xi, *args, **kwargs)
for xi in x.flat]).reshape(x.shape)
return new_func
def pdf_single(z, N, normalization, dH=1, dK=3):
"""Probability density function for Lomb-Scargle periodogram
Compute the expected probability density function of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : integers, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
pdf : np.ndarray
The expected probability density function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1)
elif normalization == 'model':
return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1)
elif normalization == 'log':
return 0.5 * Nk * np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization))
def fap_single(z, N, normalization, dH=1, dK=3):
"""Single-frequency false alarm probability for the Lomb-Scargle periodogram
This is equal to 1 - cdf, where cdf is the cumulative distribution.
The single-frequency false alarm probability should not be confused with
the false alarm probability for the largest peak.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : integers, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
false_alarm_probability : np.ndarray
The single-frequency false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
z = np.asarray(z)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return np.exp(-z)
elif normalization == 'standard':
return (1 - z) ** (0.5 * Nk)
elif normalization == 'model':
return (1 + z) ** (-0.5 * Nk)
elif normalization == 'log':
return np.exp(-0.5 * Nk * z)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization))
def inv_fap_single(fap, N, normalization, dH=1, dK=3):
"""Single-frequency inverse false alarm probability
This function computes the periodogram value associated with the specified
single-frequency false alarm probability. This should not be confused with
the false alarm level of the largest peak.
Parameters
----------
fap : array-like
The false alarm probability.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : integers, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
z : np.ndarray
The periodogram power corresponding to the single-peak false alarm
probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
fap = np.asarray(fap)
if dK - dH != 2:
raise NotImplementedError("Degrees of freedom != 2")
Nk = N - dK
if normalization == 'psd':
return -np.log(fap)
elif normalization == 'standard':
return 1 - fap ** (2 / Nk)
elif normalization == 'model':
return -1 + fap ** (-2 / Nk)
elif normalization == 'log':
return -2 / Nk * np.log(fap)
else:
raise ValueError("normalization='{0}' is not recognized"
"".format(normalization))
def cdf_single(z, N, normalization, dH=1, dK=3):
"""Cumulative distribution for the Lomb-Scargle periodogram
Compute the expected cumulative distribution of the periodogram
for the null hypothesis - i.e. data consisting of Gaussian noise.
Parameters
----------
z : array-like
The periodogram value.
N : int
The number of data points from which the periodogram was computed.
normalization : {'standard', 'model', 'log', 'psd'}
The periodogram normalization.
dH, dK : integers, optional
The number of parameters in the null hypothesis and the model.
Returns
-------
cdf : np.ndarray
The expected cumulative distribution function.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
All expressions used here are adapted from Table 1 of Baluev 2008 [1]_.
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
def tau_davies(Z, fmax, t, y, dy, normalization='standard', dH=1, dK=3):
"""tau factor for estimating Davies bound (Baluev 2008, Table 1)"""
N = len(t)
NH = N - dH # DOF for null hypothesis
NK = N - dK # DOF for periodic hypothesis
Dt = _weighted_var(t, dy)
Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline
W = fmax * Teff
Z = np.asarray(Z)
if normalization == 'psd':
# 'psd' normalization is same as Baluev's z
return W * np.exp(-Z) * np.sqrt(Z)
elif normalization == 'standard':
# 'standard' normalization is Z = 2/NH * z_1
return (_gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1))
* np.sqrt(0.5 * NH * Z))
elif normalization == 'model':
# 'model' normalization is Z = 2/NK * z_2
return (_gamma(NK) * W * (1 + Z) ** (-0.5 * NK)
* np.sqrt(0.5 * NK * Z))
elif normalization == 'log':
# 'log' normalization is Z = 2/NK * z_3
return (_gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5))
* np.sqrt(NK * np.sinh(0.5 * Z)))
else:
raise NotImplementedError("normalization={0}".format(normalization))
def fap_naive(Z, fmax, t, y, dy, normalization='standard'):
"""False Alarm Probability based on estimated number of indep frequencies"""
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
fap_s = fap_single(Z, N, normalization=normalization)
# result is 1 - (1 - fap_s) ** N_eff
# this is much more precise for small Z / large N
return -np.expm1(N_eff * np.log1p(-fap_s))
def inv_fap_naive(fap, fmax, t, y, dy, normalization='standard'):
"""Inverse FAP based on estimated number of indep frequencies"""
fap = np.asarray(fap)
N = len(t)
T = max(t) - min(t)
N_eff = fmax * T
#fap_s = 1 - (1 - fap) ** (1 / N_eff)
fap_s = -np.expm1(np.log(1 - fap) / N_eff)
return inv_fap_single(fap_s, N, normalization)
def fap_davies(Z, fmax, t, y, dy, normalization='standard'):
"""Davies upper-bound to the false alarm probability
(Eqn 5 of Baluev 2008)
"""
N = len(t)
fap_s = fap_single(Z, N, normalization=normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
return fap_s + tau
@vectorize_first_argument
def inv_fap_davies(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the davies upper-bound"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_davies(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p))
return res.x
def fap_baluev(Z, fmax, t, y, dy, normalization='standard'):
"""Alias-free approximation to false alarm probability
(Eqn 6 of Baluev 2008)
"""
fap_s = fap_single(Z, len(t), normalization)
tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization)
# result is 1 - (1 - fap_s) * np.exp(-tau)
# this is much more precise for small numbers
return -np.expm1(-tau) + fap_s * np.exp(-tau)
@vectorize_first_argument
def inv_fap_baluev(p, fmax, t, y, dy, normalization='standard'):
"""Inverse of the Baluev alias-free approximation"""
from scipy import optimize
args = (fmax, t, y, dy, normalization)
z0 = inv_fap_naive(p, *args)
func = lambda z, *args: fap_baluev(z, *args) - p
res = optimize.root(func, z0, args=args, method='lm')
if not res.success:
raise ValueError('inv_fap_baluev did not converge for p={0}'.format(p))
return res.x
def _bootstrap_max(t, y, dy, fmax, normalization, random_seed):
"""Generate a sequence of bootstrap estimates of the max"""
from .core import LombScargle
rng = np.random.RandomState(random_seed)
while True:
s = rng.randint(0, len(y), len(y)) # sample with replacement
ls_boot = LombScargle(t, y[s], dy if dy is None else dy[s],
normalization=normalization)
freq, power = ls_boot.autopower(maximum_frequency=fmax)
yield power.max()
def fap_bootstrap(Z, fmax, t, y, dy, normalization='standard',
n_bootstraps=1000, random_seed=None):
"""Bootstrap estimate of the false alarm probability"""
pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax,
normalization, random_seed),
float, n_bootstraps)
pmax.sort()
return 1 - np.searchsorted(pmax, Z) / len(pmax)
def inv_fap_bootstrap(fap, fmax, t, y, dy, normalization='standard',
n_bootstraps=1000, random_seed=None):
"""Bootstrap estimate of the inverse false alarm probability"""
fap = np.asarray(fap)
pmax = np.fromiter(_bootstrap_max(t, y, dy, fmax,
normalization, random_seed),
float, n_bootstraps)
pmax.sort()
return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int),
0, len(pmax) - 1)]
METHODS = {'single': fap_single,
'naive': fap_naive,
'davies': fap_davies,
'baluev': fap_baluev,
'bootstrap': fap_bootstrap}
def false_alarm_probability(Z, fmax, t, y, dy, normalization='standard',
method='baluev', method_kwds=None):
"""Compute the approximate false alarm probability for periodogram peaks Z
This gives an estimate of the false alarm probability for the largest value
in a periodogram, based on the null hypothesis of non-varying data with
Gaussian noise. The true probability cannot be computed analytically, so
each method available here is an approximation to the true value.
Parameters
----------
Z : array-like
The periodogram value.
fmax : float
The maximum frequency of the periodogram.
t, y, dy : array-like
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_level : compute the periodogram level for a particular fap
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == 'single':
return fap_single(Z, len(t), normalization)
elif method not in METHODS:
raise ValueError("Unrecognized method: {0}".format(method))
method = METHODS[method]
method_kwds = method_kwds or {}
return method(Z, fmax, t, y, dy, normalization, **method_kwds)
INV_METHODS = {'single': inv_fap_single,
'naive': inv_fap_naive,
'davies': inv_fap_davies,
'baluev': inv_fap_baluev,
'bootstrap': inv_fap_bootstrap}
def false_alarm_level(p, fmax, t, y, dy, normalization,
method='baluev', method_kwds=None):
"""Compute the approximate periodogram level given a false alarm probability
This gives an estimate of the periodogram level corresponding to a specified
false alarm probability for the largest peak, assuming a null hypothesis
of non-varying data with Gaussian noise. The true level cannot be computed
analytically, so each method available here is an approximation to the true
value.
Parameters
----------
p : array-like
The false alarm probability (0 < p < 1).
fmax : float
The maximum frequency of the periodogram.
t, y, dy : arrays
The data times, values, and errors.
normalization : {'standard', 'model', 'log', 'psd'}, optional
The periodogram normalization.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
z : np.ndarray
The periodogram level.
Notes
-----
For normalization='psd', the distribution can only be computed for
periodograms constructed with errors specified.
See Also
--------
false_alarm_probability : compute the fap for a given periodogram level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if method == 'single':
return inv_fap_single(p, len(t), normalization)
elif method not in INV_METHODS:
raise ValueError("Unrecognized method: {0}".format(method))
method = INV_METHODS[method]
method_kwds = method_kwds or {}
return method(p, fmax, t, y, dy, normalization, **method_kwds)
|
788524a147d7bd1198369ed483cb68bc503c33ad4c122d3e42a3de5a72a4b560 | """Main Lomb-Scargle Implementation"""
import numpy as np
from .implementations import lombscargle, available_methods
from .implementations.mle import periodic_fit, design_matrix
from . import _statistics
from astropy import units
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.timeseries.periodograms.base import BasePeriodogram
def has_units(obj):
return hasattr(obj, 'unit')
def get_unit(obj):
return getattr(obj, 'unit', 1)
def strip_units(*arrs):
strip = lambda a: None if a is None else np.asarray(a)
if len(arrs) == 1:
return strip(arrs[0])
else:
return map(strip, arrs)
class LombScargle(BasePeriodogram):
"""Compute the Lomb-Scargle Periodogram.
This implementations here are based on code presented in [1]_ and [2]_;
if you use this functionality in an academic application, citation of
those works would be appreciated.
Parameters
----------
t : array_like or Quantity
sequence of observation times
y : array_like or Quantity
sequence of observations associated with times t
dy : float, array_like or Quantity (optional)
error or sequence of observational errors associated with times t
fit_mean : bool (optional, default=True)
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if fit_mean = False
nterms : int (optional, default=1)
number of terms to use in the Fourier fit
normalization : {'standard', 'model', 'log', 'psd'}, optional
Normalization to use for the periodogram.
Examples
--------
Generate noisy periodic data:
>>> rand = np.random.RandomState(42)
>>> t = 100 * rand.rand(100)
>>> y = np.sin(2 * np.pi * t) + rand.randn(100)
Compute the Lomb-Scargle periodogram on an automatically-determined
frequency grid & find the frequency of max power:
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency[np.argmax(power)] # doctest: +FLOAT_CMP
1.0016662310392956
Compute the Lomb-Scargle periodogram at a user-specified frequency grid:
>>> freq = np.arange(0.8, 1.3, 0.1)
>>> LombScargle(t, y).power(freq) # doctest: +FLOAT_CMP
array([0.0204304 , 0.01393845, 0.35552682, 0.01358029, 0.03083737])
If the inputs are astropy Quantities with units, the units will be
validated and the outputs will also be Quantities with appropriate units:
>>> from astropy import units as u
>>> t = t * u.s
>>> y = y * u.mag
>>> frequency, power = LombScargle(t, y).autopower()
>>> frequency.unit
Unit("1 / s")
>>> power.unit
Unit(dimensionless)
Note here that the Lomb-Scargle power is always a unitless quantity,
because it is related to the :math:`\\chi^2` of the best-fit periodic
model at each frequency.
References
----------
.. [1] Vanderplas, J., Connolly, A. Ivezic, Z. & Gray, A. *Introduction to
astroML: Machine learning for astrophysics*. Proceedings of the
Conference on Intelligent Data Understanding (2012)
.. [2] VanderPlas, J. & Ivezic, Z. *Periodograms for Multiband Astronomical
Time Series*. ApJ 812.1:18 (2015)
"""
available_methods = available_methods()
def __init__(self, t, y, dy=None, fit_mean=True, center_data=True,
nterms=1, normalization='standard'):
# If t is a TimeDelta, convert it to a quantity. The units we convert
# to don't really matter since the user gets a Quantity back at the end
# so can convert to any units they like.
if isinstance(t, TimeDelta):
t = t.to('day')
# We want to expose self.t as being the times the user passed in, but
# if the times are absolute, we need to convert them to relative times
# internally, so we use self._trel and self._tstart for this.
self.t = t
if isinstance(self.t, Time):
self._tstart = self.t[0]
trel = (self.t - self._tstart).to(u.day)
else:
self._tstart = None
trel = self.t
self._trel, self.y, self.dy = self._validate_inputs(trel, y, dy)
self.fit_mean = fit_mean
self.center_data = center_data
self.nterms = nterms
self.normalization = normalization
def _validate_inputs(self, t, y, dy):
# Validate shapes of inputs
if dy is None:
t, y = np.broadcast_arrays(t, y, subok=True)
else:
t, y, dy = np.broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Inputs (t, y, dy) must be 1-dimensional")
# validate units of inputs if any is a Quantity
if any(has_units(arr) for arr in (t, y, dy)):
t, y = map(units.Quantity, (t, y))
if dy is not None:
dy = units.Quantity(dy)
try:
dy = units.Quantity(dy, unit=y.unit)
except units.UnitConversionError:
raise ValueError("Units of dy not equivalent "
"to units of y")
return t, y, dy
def _validate_frequency(self, frequency):
frequency = np.asanyarray(frequency)
if has_units(self._trel):
frequency = units.Quantity(frequency)
try:
frequency = units.Quantity(frequency, unit=1./self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of frequency not equivalent to "
"units of 1/t")
else:
if has_units(frequency):
raise ValueError("frequency have units while 1/t doesn't.")
return frequency
def _validate_t(self, t):
t = np.asanyarray(t)
if has_units(self._trel):
t = units.Quantity(t)
try:
t = units.Quantity(t, unit=self._trel.unit)
except units.UnitConversionError:
raise ValueError("Units of t not equivalent to "
"units of input self.t")
return t
def _power_unit(self, norm):
if has_units(self.y):
if self.dy is None and norm == 'psd':
return self.y.unit ** 2
else:
return units.dimensionless_unscaled
else:
return 1
def autofrequency(self, samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
return_freq_limits=False):
"""Determine a suitable frequency grid for data.
Note that this assumes the peak width is driven by the observational
baseline, which is generally a good assumption when the baseline is
much larger than the oscillation period.
If you are searching for periods longer than the baseline of your
observations, this may not perform well.
Even with a large baseline, be aware that the maximum frequency
returned is based on the concept of "average Nyquist frequency", which
may not be useful for irregularly-sampled data. The maximum frequency
can be adjusted via the nyquist_factor argument, or through the
maximum_frequency argument.
Parameters
----------
samples_per_peak : float (optional, default=5)
The approximate number of desired samples across the typical peak
nyquist_factor : float (optional, default=5)
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float (optional)
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float (optional)
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
return_freq_limits : bool (optional)
if True, return only the frequency limits rather than the full
frequency grid.
Returns
-------
frequency : ndarray or Quantity
The heuristically-determined optimal frequency bin
"""
baseline = self._trel.max() - self._trel.min()
n_samples = self._trel.size
df = 1.0 / baseline / samples_per_peak
if minimum_frequency is None:
minimum_frequency = 0.5 * df
if maximum_frequency is None:
avg_nyquist = 0.5 * n_samples / baseline
maximum_frequency = nyquist_factor * avg_nyquist
Nf = 1 + int(np.round((maximum_frequency - minimum_frequency) / df))
if return_freq_limits:
return minimum_frequency, minimum_frequency + df * (Nf - 1)
else:
return minimum_frequency + df * np.arange(Nf)
def autopower(self, method='auto', method_kwds=None,
normalization=None, samples_per_peak=5,
nyquist_factor=5, minimum_frequency=None,
maximum_frequency=None):
"""Compute Lomb-Scargle power at automatically-determined frequencies.
Parameters
----------
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
samples_per_peak : float (optional, default=5)
The approximate number of desired samples across the typical peak
nyquist_factor : float (optional, default=5)
The multiple of the average nyquist frequency used to choose the
maximum frequency if maximum_frequency is not provided.
minimum_frequency : float (optional)
If specified, then use this minimum frequency rather than one
chosen based on the size of the baseline.
maximum_frequency : float (optional)
If specified, then use this maximum frequency rather than one
chosen based on the average nyquist frequency.
Returns
-------
frequency, power : ndarrays
The frequency and Lomb-Scargle power
"""
frequency = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency)
power = self.power(frequency,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=True)
return frequency, power
def power(self, frequency, normalization=None, method='auto',
assume_regular_frequency=False, method_kwds=None):
"""Compute the Lomb-Scargle power at the given frequencies.
Parameters
----------
frequency : array_like or Quantity
frequencies (not angular frequencies) at which to evaluate the
periodogram. Note that in order to use method='fast', frequencies
must be regularly-spaced.
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- 'slow': use the O[N^2] pure-python implementation
- 'cython': use the O[N^2] cython implementation. This is slightly
faster than method='slow', but much more memory efficient.
- 'chi2': use the O[N^2] chi2/linear-fitting implementation
- 'fastchi2': use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless ``assume_regular_frequency`` is set to True.
- 'scipy': use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool (optional)
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : {'standard', 'model', 'log', 'psd'}, optional
If specified, override the normalization specified at instantiation.
fit_mean : bool (optional, default=True)
If True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in
the case of incomplete phase coverage.
center_data : bool (optional, default=True)
If True, pre-center the data by subtracting the weighted mean of
the input data. This is especially important if fit_mean = False.
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
Returns
-------
power : ndarray
The Lomb-Scargle power at the specified frequency
"""
if normalization is None:
normalization = self.normalization
frequency = self._validate_frequency(frequency)
power = lombscargle(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms,
normalization=normalization,
method=method, method_kwds=method_kwds,
assume_regular_frequency=assume_regular_frequency)
return power * self._power_unit(normalization)
def _as_relative_time(self, name, times):
"""
Convert the provided times (if absolute) to relative times using the
current _tstart value. If the times provided are relative, they are
returned without conversion (though we still do some checks).
"""
if isinstance(times, TimeDelta):
times = times.to('day')
if self._tstart is None:
if isinstance(times, Time):
raise TypeError('{0} was provided as an absolute time but '
'the LombScargle class was initialized '
'with relative times.'.format(name))
else:
if isinstance(times, Time):
times = (times - self._tstart).to(u.day)
else:
raise TypeError('{0} was provided as a relative time but '
'the LombScargle class was initialized '
'with absolute times.'.format(name))
return times
def model(self, t, frequency):
"""Compute the Lomb-Scargle model at the given frequency.
The model at a particular frequency is a linear model:
model = offset + dot(design_matrix, model_parameters)
Parameters
----------
t : array_like or Quantity, length n_samples
times at which to compute the model
frequency : float
the frequency for the model
Returns
-------
y : np.ndarray, length n_samples
The model fit corresponding to the input times
See Also
--------
design_matrix
offset
model_parameters
"""
frequency = self._validate_frequency(frequency)
t = self._validate_t(self._as_relative_time('t', t))
y_fit = periodic_fit(*strip_units(self._trel, self.y, self.dy),
frequency=strip_units(frequency),
t_fit=strip_units(t),
center_data=self.center_data,
fit_mean=self.fit_mean,
nterms=self.nterms)
return y_fit * get_unit(self.y)
def offset(self):
"""Return the offset of the model
The offset of the model is the (weighted) mean of the y values.
Note that if self.center_data is False, the offset is 0 by definition.
Returns
-------
offset : scalar
See Also
--------
design_matrix
model
model_parameters
"""
y, dy = strip_units(self.y, self.dy)
if dy is None:
dy = 1
dy = np.broadcast_to(dy, y.shape)
if self.center_data:
w = dy ** -2.0
y_mean = np.dot(y, w) / w.sum()
else:
y_mean = 0
return y_mean * get_unit(self.y)
def model_parameters(self, frequency, units=True):
r"""Compute the best-fit model parameters at the given frequency.
The model described by these parameters is:
.. math::
y(t; f, \vec{\theta}) = \theta_0 + \sum_{n=1}^{\tt nterms} [\theta_{2n-1}\sin(2\pi n f t) + \theta_{2n}\cos(2\pi n f t)]
where :math:`\vec{\theta}` is the array of parameters returned by this function.
Parameters
----------
frequency : float
the frequency for the model
units : bool
If True (default), return design matrix with data units.
Returns
-------
theta : np.ndarray (n_parameters,)
The best-fit model parameters at the given frequency.
See Also
--------
design_matrix
model
offset
"""
frequency = self._validate_frequency(frequency)
t, y, dy = strip_units(self._trel, self.y, self.dy)
if self.center_data:
y = y - strip_units(self.offset())
dy = np.ones_like(y) if dy is None else np.asarray(dy)
X = self.design_matrix(frequency)
parameters = np.linalg.solve(np.dot(X.T, X),
np.dot(X.T, y / dy))
if units:
parameters = get_unit(self.y) * parameters
return parameters
def design_matrix(self, frequency, t=None):
"""Compute the design matrix for a given frequency
Parameters
----------
frequency : float
the frequency for the model
t : array_like or `~astropy.units.Quantity` or `~astropy.time.Time`, length n_samples
times at which to compute the model (optional). If not specified,
then the times and uncertainties of the input data are used
Returns
-------
X : np.ndarray (len(t), n_parameters)
The design matrix for the model at the given frequency.
See Also
--------
model
model_parameters
offset
"""
if t is None:
t, dy = strip_units(self._trel, self.dy)
else:
t, dy = strip_units(self._validate_t(self._as_relative_time('t', t)), None)
return design_matrix(t, frequency, dy,
nterms=self.nterms,
bias=self.fit_mean)
def distribution(self, power, cumulative=False):
"""Expected periodogram distribution under the null hypothesis.
This computes the expected probability distribution or cumulative
probability distribution of periodogram power, under the null
hypothesis of a non-varying signal with Gaussian noise. Note that
this is not the same as the expected distribution of peak values;
for that see the ``false_alarm_probability()`` method.
Parameters
----------
power : array_like
The periodogram power at which to compute the distribution.
cumulative : bool (optional)
If True, then return the cumulative distribution.
See Also
--------
false_alarm_probability
false_alarm_level
Returns
-------
dist : np.ndarray
The probability density or cumulative probability associated with
the provided powers.
"""
dH = 1 if self.fit_mean or self.center_data else 0
dK = dH + 2 * self.nterms
dist = _statistics.cdf_single if cumulative else _statistics.pdf_single
return dist(power, len(self._trel), self.normalization, dH=dH, dK=dK)
def false_alarm_probability(self, power, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""False alarm probability of periodogram maxima under the null hypothesis.
This gives an estimate of the false alarm probability given the height
of the largest peak in the periodogram, based on the null hypothesis
of non-varying data with Gaussian noise.
Parameters
----------
power : array-like
The periodogram value.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use.
maximum_frequency : float
The maximum frequency of the periodogram.
method_kwds : dict (optional)
Additional method-specific keywords.
Returns
-------
false_alarm_probability : np.ndarray
The false alarm probability
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_level
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_probability(power,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
def false_alarm_level(self, false_alarm_probability, method='baluev',
samples_per_peak=5, nyquist_factor=5,
minimum_frequency=None, maximum_frequency=None,
method_kwds=None):
"""Level of maximum at a given false alarm probability.
This gives an estimate of the periodogram level corresponding to a
specified false alarm probability for the largest peak, assuming a
null hypothesis of non-varying data with Gaussian noise.
Parameters
----------
false_alarm_probability : array-like
The false alarm probability (0 < fap < 1).
maximum_frequency : float
The maximum frequency of the periodogram.
method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional
The approximation method to use; default='baluev'.
method_kwds : dict, optional
Additional method-specific keywords.
Returns
-------
power : np.ndarray
The periodogram peak height corresponding to the specified
false alarm probability.
Notes
-----
The true probability distribution for the largest peak cannot be
determined analytically, so each method here provides an approximation
to the value. The available methods are:
- "baluev" (default): the upper-limit to the alias-free probability,
using the approach of Baluev (2008) [1]_.
- "davies" : the Davies upper bound from Baluev (2008) [1]_.
- "naive" : the approximate probability based on an estimated
effective number of independent frequencies.
- "bootstrap" : the approximate probability based on bootstrap
resamplings of the input data.
Note also that for normalization='psd', the distribution can only be
computed for periodograms constructed with errors specified.
See Also
--------
distribution
false_alarm_probability
References
----------
.. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
"""
if self.nterms != 1:
raise NotImplementedError("false alarm probability is not "
"implemented for multiterm periodograms.")
if not (self.fit_mean or self.center_data):
raise NotImplementedError("false alarm probability is implemented "
"only for periodograms of centered data.")
fmin, fmax = self.autofrequency(samples_per_peak=samples_per_peak,
nyquist_factor=nyquist_factor,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
return_freq_limits=True)
return _statistics.false_alarm_level(false_alarm_probability,
fmax=fmax,
t=self._trel, y=self.y, dy=self.dy,
normalization=self.normalization,
method=method,
method_kwds=method_kwds)
|
7cf8f1d6d913e6fdf6b1f902f048ce380e9082fb465acfa9fce518c8cd0ef58e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.time import Time, TimeDelta
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.bls import BoxLeastSquares
from astropy.timeseries.periodograms.lombscargle.core import has_units
def assert_allclose_blsresults(blsresult, other, **kwargs):
"""Assert that another BoxLeastSquaresResults object is consistent
This method loops over all attributes and compares the values using
:func:`~astropy.tests.helper.assert_quantity_allclose` function.
Parameters
----------
other : BoxLeastSquaresResults
The other results object to compare.
"""
for k, v in blsresult.items():
if k not in other:
raise AssertionError("missing key '{0}'".format(k))
if k == "objective":
assert v == other[k], (
"Mismatched objectives. Expected '{0}', got '{1}'"
.format(v, other[k])
)
continue
assert_quantity_allclose(v, other[k], **kwargs)
@pytest.fixture
def data():
rand = np.random.RandomState(123)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
dy = rand.uniform(0.005, 0.01, len(t))
period = 2.0
transit_time = 0.5
duration = 0.16
depth = 0.2
m = np.abs((t-transit_time+0.5*period) % period-0.5*period) < 0.5*duration
y[m] = 1.0 - depth
y += dy * rand.randn(len(t))
return t, y, dy, dict(period=period, transit_time=transit_time,
duration=duration, depth=depth)
def test_32bit_bug():
rand = np.random.RandomState(42)
t = rand.uniform(0, 10, 500)
y = np.ones_like(t)
y[np.abs((t + 1.0) % 2.0-1) < 0.08] = 1.0 - 0.1
y += 0.01 * rand.randn(len(t))
model = BoxLeastSquares(t, y)
results = model.autopower(0.16)
assert np.allclose(results.period[np.argmax(results.power)],
1.9923406038842544)
periods = np.linspace(1.9, 2.1, 5)
results = model.power(periods, 0.16)
assert np.allclose(
results.power,
np.array([0.01421067, 0.02842475, 0.10867671, 0.05117755, 0.01783253])
)
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_correct_model(data, objective):
t, y, dy, params = data
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 0.1,
np.log(params["period"]) + 0.1, 1000))
results = model.power(periods, params["duration"], objective=objective)
ind = np.argmax(results.power)
for k, v in params.items():
assert_allclose(results[k][ind], v, atol=0.01)
chi = (results.depth[ind]-params["depth"]) / results.depth_err[ind]
assert np.abs(chi) < 1
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
@pytest.mark.parametrize("offset", [False, True])
def test_fast_method(data, objective, offset):
t, y, dy, params = data
if offset:
t = t - params["transit_time"] + params["period"]
model = BoxLeastSquares(t, y, dy)
periods = np.exp(np.linspace(np.log(params["period"]) - 1,
np.log(params["period"]) + 1, 10))
durations = params["duration"]
results = model.power(periods, durations, objective=objective)
assert_allclose_blsresults(results, model.power(periods, durations,
method="slow",
objective=objective))
def test_input_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * y_unit, dy * u.one)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y * u.one, dy * y_unit)
with pytest.raises(u.UnitConversionError):
BoxLeastSquares(t * t_unit, y, dy * y_unit)
model = BoxLeastSquares(t*t_unit, y * u.one, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y * y_unit, dy)
assert model.dy.unit == model.y.unit
model = BoxLeastSquares(t*t_unit, y*y_unit)
assert model.dy is None
def test_period_units(data):
t, y, dy, params = data
t_unit = u.day
y_unit = u.mag
model = BoxLeastSquares(t * t_unit, y * y_unit, dy)
p = model.autoperiod(params["duration"])
assert p.unit == t_unit
p = model.autoperiod(params["duration"] * 24 * u.hour)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
model.autoperiod(params["duration"] * u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], minimum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], maximum_period=0.5)
assert p.unit == t_unit
with pytest.raises(u.UnitConversionError):
p = model.autoperiod(params["duration"], maximum_period=0.5*u.mag)
p = model.autoperiod(params["duration"], minimum_period=0.5,
maximum_period=1.5)
p2 = model.autoperiod(params["duration"], maximum_period=0.5,
minimum_period=1.5)
assert_quantity_allclose(p, p2)
@pytest.mark.parametrize("method", ["fast", "slow"])
@pytest.mark.parametrize("with_err", [True, False])
@pytest.mark.parametrize("t_unit", [None, u.day])
@pytest.mark.parametrize("y_unit", [None, u.mag])
@pytest.mark.parametrize("objective", ["likelihood", "snr"])
def test_results_units(data, method, with_err, t_unit, y_unit, objective):
t, y, dy, params = data
periods = np.linspace(params["period"]-1.0, params["period"]+1.0, 3)
if t_unit is not None:
t = t * t_unit
if y_unit is not None:
y = y * y_unit
dy = dy * y_unit
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(periods, params["duration"], method=method,
objective=objective)
if t_unit is None:
assert not has_units(results.period)
assert not has_units(results.duration)
assert not has_units(results.transit_time)
else:
assert results.period.unit == t_unit
assert results.duration.unit == t_unit
assert results.transit_time.unit == t_unit
if y_unit is None:
assert not has_units(results.power)
assert not has_units(results.depth)
assert not has_units(results.depth_err)
assert not has_units(results.depth_snr)
assert not has_units(results.log_likelihood)
else:
assert results.depth.unit == y_unit
assert results.depth_err.unit == y_unit
assert results.depth_snr.unit == u.one
if dy is None:
assert results.log_likelihood.unit == y_unit * y_unit
if objective == "snr":
assert results.power.unit == u.one
else:
assert results.power.unit == y_unit * y_unit
else:
assert results.log_likelihood.unit == u.one
assert results.power.unit == u.one
def test_autopower(data):
t, y, dy, params = data
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model = BoxLeastSquares(t, y, dy)
period = model.autoperiod(duration)
results1 = model.power(period, duration)
results2 = model.autopower(duration)
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize("with_units", [True, False])
def test_model(data, with_units):
t, y, dy, params = data
# Compute the model using linear regression
A = np.zeros((len(t), 2))
p = params["period"]
dt = np.abs((t-params["transit_time"]+0.5*p) % p-0.5*p)
m_in = dt < 0.5*params["duration"]
A[~m_in, 0] = 1.0
A[m_in, 1] = 1.0
w = np.linalg.solve(np.dot(A.T, A / dy[:, None]**2),
np.dot(A.T, y / dy**2))
model_true = np.dot(A, w)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
model_true = model_true * u.mag
# Compute the model using the periodogram
pgram = BoxLeastSquares(t, y, dy)
model = pgram.model(t, p, params["duration"], params["transit_time"])
# Make sure that the transit mask is consistent with the model
transit_mask = pgram.transit_mask(t, p, params["duration"],
params["transit_time"])
transit_mask0 = (model - model.max()) < 0.0
assert_allclose(transit_mask, transit_mask0)
assert_quantity_allclose(model, model_true)
@pytest.mark.parametrize("shape", [(1,), (2,), (3,), (2, 3)])
def test_shapes(data, shape):
t, y, dy, params = data
duration = params["duration"]
model = BoxLeastSquares(t, y, dy)
period = np.empty(shape)
period.flat = np.linspace(params["period"]-1, params["period"]+1,
period.size)
if len(period.shape) > 1:
with pytest.raises(ValueError):
results = model.power(period, duration)
else:
results = model.power(period, duration)
for k, v in results.items():
if k == "objective":
continue
assert v.shape == shape
@pytest.mark.parametrize("with_units", [True, False])
@pytest.mark.parametrize("with_err", [True, False])
def test_compute_stats(data, with_units, with_err):
t, y, dy, params = data
y_unit = 1
if with_units:
y_unit = u.mag
t = t * u.day
y = y * u.mag
dy = dy * u.mag
params["period"] = params["period"] * u.day
params["duration"] = params["duration"] * u.day
params["transit_time"] = params["transit_time"] * u.day
params["depth"] = params["depth"] * u.mag
if not with_err:
dy = None
model = BoxLeastSquares(t, y, dy)
results = model.power(params["period"], params["duration"],
oversample=1000)
stats = model.compute_stats(params["period"], params["duration"],
params["transit_time"])
# Test the calculated transit times
tt = params["period"] * np.arange(int(t.max() / params["period"]) + 1)
tt += params["transit_time"]
assert_quantity_allclose(tt, stats["transit_times"])
# Test that the other parameters are consistent with the periodogram
assert_allclose(stats["per_transit_count"], np.array([9, 7, 7, 7, 8]))
assert_quantity_allclose(np.sum(stats["per_transit_log_likelihood"]),
results["log_likelihood"])
assert_quantity_allclose(stats["depth"][0], results["depth"])
# Check the half period result
results_half = model.power(0.5*params["period"], params["duration"],
oversample=1000)
assert_quantity_allclose(stats["depth_half"][0], results_half["depth"])
# Skip the uncertainty tests when the input errors are None
if not with_err:
assert_quantity_allclose(stats["harmonic_amplitude"],
0.029945029964964204 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-0.5875918155223113 * y_unit * y_unit)
return
assert_quantity_allclose(stats["harmonic_amplitude"],
0.033027988742275853 * y_unit)
assert_quantity_allclose(stats["harmonic_delta_log_likelihood"],
-12407.505922833765)
assert_quantity_allclose(stats["depth"][1], results["depth_err"])
assert_quantity_allclose(stats["depth_half"][1], results_half["depth_err"])
for f, k in zip((1.0, 1.0, 1.0, 0.0),
("depth", "depth_even", "depth_odd", "depth_phased")):
assert np.abs((stats[k][0]-f*params["depth"]) / stats[k][1]) < 1.0
def test_negative_times(data):
t, y, dy, params = data
mu = np.mean(t)
duration = params["duration"] + np.linspace(-0.1, 0.1, 3)
model1 = BoxLeastSquares(t, y, dy)
results1 = model1.autopower(duration)
# Compute the periodogram with offset (negative) times
model2 = BoxLeastSquares(t - mu, y, dy)
results2 = model2.autopower(duration)
# Shift the transit times back into the unshifted coordinates
results2.transit_time = (results2.transit_time + mu) % results2.period
assert_allclose_blsresults(results1, results2)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy, params = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same.
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of BoxLeastSquares, one with absolute and one
# with relative times.
bls1 = BoxLeastSquares(t, y, dy)
bls2 = BoxLeastSquares(trel, y, dy)
results1 = bls1.autopower(0.16 * u.day)
results2 = bls2.autopower(0.16 * u.day)
# All the results should match except transit time which should be
# absolute instead of relative in the first case.
for key in results1:
if key == 'transit_time':
assert_quantity_allclose((results1[key] - start).to(u.day), results2[key])
elif key == 'objective':
assert results1[key] == results2[key]
else:
assert_allclose(results1[key], results2[key])
# Check that model evaluation works fine
model1 = bls1.model(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
model2 = bls2.model(trel, 0.2 * u.day, 0.05 * u.day, TimeDelta(1 * u.day))
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
bls1.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t_model was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.model(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.model(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t_model was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check compute_stats
stats1 = bls1.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
stats2 = bls2.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
for key in stats1:
if key == 'transit_times':
assert_quantity_allclose((stats1[key] - start).to(u.day), stats2[key])
elif key.startswith('depth'):
for value1, value2 in zip(stats1[key], stats2[key]):
assert_quantity_allclose(value1, value2)
else:
assert_allclose(stats1[key], stats2[key])
# Check compute_stats validation
with pytest.raises(TypeError) as exc:
bls1.compute_stats(0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.compute_stats(0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
# Check transit_mask
mask1 = bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
mask2 = bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert_equal(mask1, mask2)
# Check transit_mask validation
with pytest.raises(TypeError) as exc:
bls1.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('transit_time was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls1.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('t was provided as a relative time '
'but the BoxLeastSquares class was initialized '
'with absolute times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(trel, 0.2 * u.day, 0.05 * u.day, Time('2019-06-04T12:34:56'))
assert exc.value.args[0] == ('transit_time was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
with pytest.raises(TypeError) as exc:
bls2.transit_mask(t, 0.2 * u.day, 0.05 * u.day, 1 * u.day)
assert exc.value.args[0] == ('t was provided as an absolute time '
'but the BoxLeastSquares class was initialized '
'with relative times.')
|
6fdf11329b5acc2c7c3273525c74951159b722c32b614117add44717d2025e70 | """
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
__all__ = ['lombscargle', 'available_methods']
import numpy as np
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .scipy_impl import lombscargle_scipy
from .chi2_impl import lombscargle_chi2
from .fastchi2_impl import lombscargle_fastchi2
from .cython_impl import lombscargle_cython
METHODS = {'slow': lombscargle_slow,
'fast': lombscargle_fast,
'chi2': lombscargle_chi2,
'scipy': lombscargle_scipy,
'fastchi2': lombscargle_fastchi2,
'cython': lombscargle_cython}
def available_methods():
methods = ['auto', 'slow', 'chi2', 'cython', 'fast', 'fastchi2']
# Scipy required for scipy algorithm (obviously)
try:
import scipy
except ImportError:
pass
else:
methods.append('scipy')
return methods
def _is_regular(frequency):
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = np.diff(frequency)
return np.allclose(diff[0], diff)
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array_like or Quantity
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalars
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not (assume_regular_frequency or _is_regular(frequency)):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def validate_method(method, dy, fit_mean, nterms,
frequency, assume_regular_frequency):
"""
Validate the method argument, and if method='auto'
choose the appropriate method
"""
methods = available_methods()
prefer_fast = (len(frequency) > 200
and (assume_regular_frequency or _is_regular(frequency)))
prefer_scipy = 'scipy' in methods and dy is None and not fit_mean
# automatically choose the appropriate method
if method == 'auto':
if nterms != 1:
if prefer_fast:
method = 'fastchi2'
else:
method = 'chi2'
elif prefer_fast:
method = 'fast'
elif prefer_scipy:
method = 'scipy'
else:
method = 'cython'
if method not in METHODS:
raise ValueError("invalid method: {0}".format(method))
return method
def lombscargle(t, y, dy=None,
frequency=None,
method='auto',
assume_regular_frequency=False,
normalization='standard',
fit_mean=True, center_data=True,
method_kwds=None, nterms=1):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array_like
sequence of observation times
y : array_like
sequence of observations associated with times t
dy : float or array_like (optional)
error or sequence of observational errors associated with times t
frequency : array_like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
``assume_regular_frequency`` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool (optional)
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : string (optional, default='standard')
Normalization to use for the periodogram.
Options are 'standard' or 'psd'.
fit_mean : bool (optional, default=True)
if True, include a constant offset as part of the model at each
frequency. This can lead to more accurate results, especially in the
case of incomplete phase coverage.
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_mean = False`
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
nterms : int (default=1)
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array_like
Lomb-Scargle power associated with each frequency omega
"""
# frequencies should be one-dimensional arrays
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(frequency=frequency,
center_data=center_data,
fit_mean=fit_mean,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}))
method = validate_method(method, dy=dy, fit_mean=fit_mean, nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency)
# scipy doesn't support dy or fit_mean=True
if method == 'scipy':
if kwds.pop('fit_mean'):
raise ValueError("scipy method does not support fit_mean=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports "
"uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith('fast'):
f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'),
assume_regular_frequency)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith('chi2'):
if kwds.pop('nterms') != 1:
raise ValueError("nterms != 1 only supported with 'chi2' "
"or 'fastchi2' methods")
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape)
|
9db667ea5f97df6f07c951c7b45d0042d21944bf09d5216d0b91ac3bb0fcddf9 | from math import factorial
import numpy as np
def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
return 1 << int(N - 1).bit_length()
def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result
def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the trade-off between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
if df <= 0:
raise ValueError("df must be positive")
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
if Mfft <= 0:
raise ValueError("Mfft must be positive")
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)[:N]
if t0 != 0:
f = f0 + df * np.arange(N)
fftgrid *= np.exp(2j * np.pi * t0 * f)
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C
|
30cdf5aa45c71c3335002144b1cc9a36f0e6fd4f39988b9ada7e3c33c38ad0ea | import numpy as np
import pytest
from numpy.testing import assert_allclose
try:
import scipy
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
from astropy.timeseries.periodograms.lombscargle import LombScargle
from astropy.timeseries.periodograms.lombscargle._statistics import (cdf_single, pdf_single, fap_single, inv_fap_single,
METHODS)
from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref
METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42})
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 5 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.fixture
def null_data(N=1000, dy=1, rseed=0):
"""Generate null hypothesis data"""
rng = np.random.RandomState(rseed)
t = 100 * rng.rand(N)
dy = 0.5 * dy * (1 + rng.rand(N))
y = dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('with_errors', [True, False])
def test_distribution(null_data, normalization, with_errors, fmax=40):
t, y, dy = null_data
if not with_errors:
dy = None
N = len(t)
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
z = np.linspace(0, power.max(), 1000)
# Test that pdf and cdf are consistent
dz = z[1] - z[0]
z_mid = z[:-1] + 0.5 * dz
pdf = ls.distribution(z_mid)
cdf = ls.distribution(z, cumulative=True)
assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8)
# psd normalization without specified errors produces bad results
if not (normalization == 'psd' and not with_errors):
# Test that observed power is distributed according to the theoretical pdf
hist, bins = np.histogram(power, 30, normed=True)
midpoints = 0.5 * (bins[1:] + bins[:-1])
pdf = ls.distribution(midpoints)
assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0])
@pytest.mark.parametrize('N', [10, 100, 1000])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_inverse_single(N, normalization):
fap = np.linspace(0, 1, 100)
z = inv_fap_single(fap, N, normalization)
fap_out = fap_single(z, N, normalization)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
def test_inverse_bootstrap(null_data, normalization, use_errs, fmax=5):
t, y, dy = null_data
if not use_errs:
dy = None
fap = np.linspace(0, 1, 10)
method = 'bootstrap'
method_kwds = METHOD_KWDS['bootstrap']
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method, method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
# atol = 1 / n_bootstraps
assert_allclose(fap, fap_out, atol=0.05)
@pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'}))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('N', [10, 100, 1000])
def test_inverses(method, normalization, use_errs, N, T=5, fmax=5):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
t, y, dy = make_data(N, rseed=543)
if not use_errs:
dy = None
method_kwds = METHOD_KWDS.get(method, None)
fap = np.logspace(-10, 0, 10)
ls = LombScargle(t, y, dy, normalization=normalization)
z = ls.false_alarm_level(fap, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax,
method=method,
method_kwds=method_kwds)
assert_allclose(fap, fap_out)
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_false_alarm_smoketest(method, normalization):
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy = make_data()
fmax = 5
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert len(fap) == len(Z)
if method != 'davies':
assert np.all(fap <= 1)
assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing
@pytest.mark.parametrize('method', sorted(METHODS))
@pytest.mark.parametrize('use_errs', [True, False])
@pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'}))
def test_false_alarm_equivalence(method, normalization, use_errs):
# Note: the PSD normalization is not equivalent to the others, in that it
# depends on the absolute errors rather than relative errors. Because the
# scaling contributes to the distribution, it cannot be converted directly
# from any of the three normalized versions.
if not HAS_SCIPY and method in ['baluev', 'davies']:
pytest.skip("SciPy required")
kwds = METHOD_KWDS.get(method, None)
t, y, dy = make_data()
if not use_errs:
dy = None
fmax = 5
ls = LombScargle(t, y, dy, normalization=normalization)
freq, power = ls.autopower(maximum_frequency=fmax)
Z = np.linspace(power.min(), power.max(), 30)
fap = ls.false_alarm_probability(Z, maximum_frequency=fmax,
method=method, method_kwds=kwds)
# Compute the equivalent Z values in the standard normalization
# and check that the FAP is consistent
Z_std = convert_normalization(Z, len(t),
from_normalization=normalization,
to_normalization='standard',
chi2_ref=compute_chi2_ref(y, dy))
ls = LombScargle(t, y, dy, normalization='standard')
fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax,
method=method, method_kwds=kwds)
assert_allclose(fap, fap_std, rtol=0.1)
|
2141300a45c64efa6dc4ac897fde4f555fd69f99caf75c36c4d2956839e152af | import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref
from astropy.timeseries.periodograms.lombscargle.core import LombScargle
NORMALIZATIONS = ['standard', 'model', 'log', 'psd']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 5 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('norm_in', NORMALIZATIONS)
@pytest.mark.parametrize('norm_out', NORMALIZATIONS)
def test_convert_normalization(norm_in, norm_out, data):
t, y, dy = data
_, power_in = LombScargle(t, y, dy).autopower(maximum_frequency=5,
normalization=norm_in)
_, power_out = LombScargle(t, y, dy).autopower(maximum_frequency=5,
normalization=norm_out)
power_in_converted = convert_normalization(power_in, N=len(t),
from_normalization=norm_in,
to_normalization=norm_out,
chi2_ref = compute_chi2_ref(y, dy))
assert_allclose(power_in_converted, power_out)
|
16bc38e642ad5f6931417b6149be161ecf12d85d7bd7bf57d4c88e351429441b | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.lombscargle import LombScargle
ALL_METHODS = LombScargle.available_methods
ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto']
FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method]
NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method]
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.RandomState(rseed)
t = 20 * period * rng.rand(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.rand(N))
y += dy * rng.randn(N)
return t, y, dy
@pytest.mark.parametrize('minimum_frequency', [None, 1.0])
@pytest.mark.parametrize('maximum_frequency', [None, 5.0])
@pytest.mark.parametrize('nyquist_factor', [1, 10])
@pytest.mark.parametrize('samples_per_peak', [1, 5])
def test_autofrequency(data, minimum_frequency, maximum_frequency,
nyquist_factor, samples_per_peak):
t, y, dy = data
baseline = t.max() - t.min()
freq = LombScargle(t, y, dy).autofrequency(samples_per_peak,
nyquist_factor,
minimum_frequency,
maximum_frequency)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1. / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5*df)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_all_methods(data, method, center_data, fit_mean,
errors, with_units, normalization):
if method == 'scipy' and (fit_mean or errors != 'none'):
return
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError("Unrecognized error type: '{0}'".format(errors))
kwds = {}
ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean,
normalization=normalization)
P_expected = ls.power(frequency)
# don't use the fft approximation here; we'll test this elsewhere
if method in FAST_METHODS:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
if with_units:
if normalization == 'psd' and errors == 'none':
assert P_method.unit == y.unit ** 2
else:
assert P_method.unit == u.dimensionless_unscaled
else:
assert not hasattr(P_method, 'unit')
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_integer_inputs(data, method, center_data, fit_mean, with_errors,
normalization):
if method == 'scipy' and (fit_mean or with_errors):
return
t, y, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype('int32')
frequency = 1E-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data,
fit_mean=fit_mean,
normalization=normalization)
P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method)
P_int = LombScargle(t_int, y_int, dy_int,
**kwds).power(frequency, method=method)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize('method', NTERMS_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 2, 4])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_nterms_methods(method, center_data, fit_mean, errors,
nterms, normalization, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError("Unrecognized error type: '{0}'".format(errors))
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms,
normalization=normalization)
if nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, method=method)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_expected = ls.power(frequency)
# don't use fast fft approximations here
kwds = {}
if 'fast' in method:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25)
@pytest.mark.parametrize('method', FAST_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_fast_approximations(method, center_data, fit_mean,
errors, nterms, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError("Unrecognized error type: '{0}'".format(errors))
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms,
normalization='standard')
# use only standard normalization because we compare via absolute tolerance
kwds = dict(method=method)
if method == 'fast' and nterms != 1:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value)
elif nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, **kwds)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_fast = ls.power(frequency, **kwds)
kwds['method_kwds'] = dict(use_fft=False)
P_slow = ls.power(frequency, **kwds)
assert_allclose(P_fast, P_slow, atol=0.008)
@pytest.mark.parametrize('method', LombScargle.available_methods)
@pytest.mark.parametrize('shape', [(), (1,), (2,), (3,), (2, 3)])
def test_output_shapes(method, shape, data):
t, y, dy = data
freq = np.asarray(np.zeros(shape))
freq.flat = np.arange(1, freq.size + 1)
PLS = LombScargle(t, y, fit_mean=False).power(freq, method=method)
assert PLS.shape == shape
@pytest.mark.parametrize('method', LombScargle.available_methods)
def test_errors_on_unit_mismatch(method, data):
t, y, dy = data
t = t * u.second
y = y * u.mag
frequency = np.linspace(0.5, 1.5, 10)
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, fit_mean=False).power(frequency, method=method)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy, fit_mean=False).power(frequency / t.unit)
assert str(err.value).startswith('Units of dy not equivalent')
# we don't test all normalizations here because they are tested above
# only test method='auto' because unit handling does not depend on method
@pytest.mark.parametrize('with_error', [True, False])
def test_unit_conversions(data, with_error):
t, y, dy = data
t_day = t * u.day
t_hour = u.Quantity(t_day, 'hour')
y_meter = y * u.meter
y_millimeter = u.Quantity(y_meter, 'millimeter')
# sanity check on inputs
assert_quantity_allclose(t_day, t_hour)
assert_quantity_allclose(y_meter, y_millimeter)
if with_error:
dy = dy * u.meter
else:
dy = None
freq_day, P1 = LombScargle(t_day, y_meter, dy).autopower()
freq_hour, P2 = LombScargle(t_hour, y_millimeter, dy).autopower()
# Check units of frequency
assert freq_day.unit == 1. / u.day
assert freq_hour.unit == 1. / u.hour
# Check that results match
assert_quantity_allclose(freq_day, freq_hour)
assert_quantity_allclose(P1, P2)
# Check that switching frequency units doesn't change things
P3 = LombScargle(t_day, y_meter, dy).power(freq_hour)
P4 = LombScargle(t_hour, y_meter, dy).power(freq_day)
assert_quantity_allclose(P3, P4)
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('freq', [1.0, 2.0])
def test_model(fit_mean, with_units, freq):
rand = np.random.RandomState(0)
t = 10 * rand.rand(40)
params = 10 * rand.rand(3)
y = np.zeros_like(t)
if fit_mean:
y += params[0]
y += params[1] * np.sin(2 * np.pi * freq * (t - params[2]))
if with_units:
t = t * u.day
y = y * u.mag
freq = freq / u.day
ls = LombScargle(t, y, center_data=False, fit_mean=fit_mean)
y_fit = ls.model(t, freq)
assert_quantity_allclose(y_fit, y)
@pytest.mark.parametrize('t_unit', [u.second, u.day])
@pytest.mark.parametrize('frequency_unit', [u.Hz, 1. / u.second])
@pytest.mark.parametrize('y_unit', [u.mag, u.jansky])
def test_model_units_match(data, t_unit, frequency_unit, y_unit):
t, y, dy = data
t_fit = t[:5]
frequency = 1.0
t = t * t_unit
t_fit = t_fit * t_unit
y = y * y_unit
dy = dy * y_unit
frequency = frequency * frequency_unit
ls = LombScargle(t, y, dy)
y_fit = ls.model(t_fit, frequency)
assert y_fit.unit == y_unit
def test_model_units_mismatch(data):
t, y, dy = data
frequency = 1.0
t_fit = t[:5]
t = t * u.second
t_fit = t_fit * u.second
y = y * u.mag
frequency = 1.0 / t.unit
# this should fail because frequency and 1/t units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model(t_fit, frequency=1.0)
assert str(err.value).startswith('Units of frequency not equivalent')
# this should fail because t and t_fit units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y).model([1, 2], frequency)
assert str(err.value).startswith('Units of t not equivalent')
# this should fail because dy and y units do not match
with pytest.raises(ValueError) as err:
LombScargle(t, y, dy).model(t_fit, frequency)
assert str(err.value).startswith('Units of dy not equivalent')
def test_autopower(data):
t, y, dy = data
ls = LombScargle(t, y, dy)
kwargs = dict(samples_per_peak=6, nyquist_factor=2,
minimum_frequency=2, maximum_frequency=None)
freq1 = ls.autofrequency(**kwargs)
power1 = ls.power(freq1)
freq2, power2 = ls.autopower(**kwargs)
assert_allclose(freq1, freq2)
assert_allclose(power1, power2)
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_model_parameters(data, nterms, fit_mean, center_data,
errors, with_units):
if nterms == 0 and not fit_mean:
return
t, y, dy = data
frequency = 1.5
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError("Unrecognized error type: '{0}'".format(errors))
ls = LombScargle(t, y, dy,
nterms=nterms,
fit_mean=fit_mean,
center_data=center_data)
tfit = np.linspace(0, 20, 10)
if with_units:
tfit = tfit * u.day
model = ls.model(tfit, frequency)
params = ls.model_parameters(frequency)
design = ls.design_matrix(frequency, t=tfit)
offset = ls.offset()
assert len(params) == int(fit_mean) + 2 * nterms
assert_quantity_allclose(offset + design.dot(params), model)
@pytest.mark.parametrize('timedelta', [False, True])
def test_absolute_times(data, timedelta):
# Make sure that we handle absolute times correctly. We also check that
# TimeDelta works properly when timedelta is True.
# The example data uses relative times
t, y, dy = data
# FIXME: There seems to be a numerical stability issue in that if we run
# the algorithm with the same values but offset in time, the transit_time
# is not offset by a fixed amount. To avoid this issue in this test, we
# make sure the first time is also the smallest so that internally the
# values of the relative time should be the same.
t[0] = 0.
# Add units
t = t * u.day
y = y * u.mag
dy = dy * u.mag
# We now construct a set of absolute times but keeping the rest the same
start = Time('2019-05-04T12:34:56')
trel = TimeDelta(t) if timedelta else t
t = trel + start
# and we set up two instances of LombScargle, one with absolute and one
# with relative times.
ls1 = LombScargle(t, y, dy)
ls2 = LombScargle(trel, y, dy)
kwargs = dict(samples_per_peak=6, nyquist_factor=2,
minimum_frequency=2 / u.day, maximum_frequency=None)
freq1 = ls1.autofrequency(**kwargs)
freq2 = ls2.autofrequency(**kwargs)
assert_quantity_allclose(freq1, freq2)
power1 = ls1.power(freq1)
power2 = ls2.power(freq2)
assert_quantity_allclose(power1, power2)
freq1, power1 = ls1.autopower(**kwargs)
freq2, power2 = ls2.autopower(**kwargs)
assert_quantity_allclose(freq1, freq2)
assert_quantity_allclose(power1, power2)
model1 = ls1.model(t, 2 / u.day)
model2 = ls2.model(trel, 2 / u.day)
assert_quantity_allclose(model1, model2)
# Check model validation
with pytest.raises(TypeError) as exc:
ls1.model(trel, 2 / u.day)
assert exc.value.args[0] == ('t was provided as a relative time but the '
'LombScargle class was initialized with '
'absolute times.')
with pytest.raises(TypeError) as exc:
ls2.model(t, 2 / u.day)
assert exc.value.args[0] == ('t was provided as an absolute time but the '
'LombScargle class was initialized with '
'relative times.')
# Check design matrix
design1 = ls1.design_matrix(2 / u.day, t=t)
design2 = ls2.design_matrix(2 / u.day, t=trel)
assert_quantity_allclose(design1, design2)
# Check design matrix validation
with pytest.raises(TypeError) as exc:
ls1.design_matrix(2 / u.day, t=trel)
assert exc.value.args[0] == ('t was provided as a relative time but the '
'LombScargle class was initialized with '
'absolute times.')
with pytest.raises(TypeError) as exc:
ls2.design_matrix(2 / u.day, t=t)
assert exc.value.args[0] == ('t was provided as an absolute time but the '
'LombScargle class was initialized with '
'relative times.')
|
01c400ef7794bd955bbdef345bddb6eef6146eac18a865bd76d44aada8a98e29 |
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.timeseries.periodograms.lombscargle.implementations.utils import extirpolate, bitceil, trig_sum
@pytest.mark.parametrize('N', 2 ** np.arange(1, 12))
@pytest.mark.parametrize('offset', [-1, 0, 1])
def test_bitceil(N, offset):
assert_equal(bitceil(N + offset),
int(2 ** np.ceil(np.log2(N + offset))))
@pytest.fixture
def extirpolate_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate(N, M, extirpolate_data):
x, y, f = extirpolate_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def extirpolate_int_data():
rng = np.random.RandomState(0)
x = 100 * rng.rand(50)
x[:25] = x[:25].astype(int)
y = np.sin(x)
f = lambda x: np.sin(x / 10)
return x, y, f
@pytest.mark.parametrize('N', [100, None])
@pytest.mark.parametrize('M', [5])
def test_extirpolate_with_integers(N, M, extirpolate_int_data):
x, y, f = extirpolate_int_data
y_hat = extirpolate(x, y, N, M)
x_hat = np.arange(len(y_hat))
assert_allclose(np.dot(f(x), y), np.dot(f(x_hat), y_hat))
@pytest.fixture
def trig_sum_data():
rng = np.random.RandomState(0)
t = 10 * rng.rand(50)
h = np.sin(t)
return t, h
@pytest.mark.parametrize('f0', [0, 1])
@pytest.mark.parametrize('adjust_t', [True, False])
@pytest.mark.parametrize('freq_factor', [1, 2])
@pytest.mark.parametrize('df', [0.1])
def test_trig_sum(f0, adjust_t, freq_factor, df, trig_sum_data):
t, h = trig_sum_data
tfit = t - t.min() if adjust_t else t
S1, C1 = trig_sum(tfit, h, df, N=1000, use_fft=True,
f0=f0, freq_factor=freq_factor, oversampling=10)
S2, C2 = trig_sum(tfit, h, df, N=1000, use_fft=False,
f0=f0, freq_factor=freq_factor, oversampling=10)
assert_allclose(S1, S2, atol=1E-2)
assert_allclose(C1, C2, atol=1E-2)
|
5579bdfaaf74a563845eae93eb4568ae107ec579f0779a1b227ebfbe2fc99395 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.timeseries.periodograms.lombscargle.implementations.mle import design_matrix, periodic_fit
@pytest.fixture
def t():
rand = np.random.RandomState(42)
return 10 * rand.rand(10)
@pytest.mark.parametrize('freq', [1.0, 2])
@pytest.mark.parametrize('dy', [None, 2.0])
@pytest.mark.parametrize('bias', [True, False])
def test_design_matrix(t, freq, dy, bias):
X = design_matrix(t, freq, dy, bias=bias)
assert X.shape == (t.shape[0], 2 + bool(bias))
if bias:
assert_allclose(X[:, 0], 1. / (dy or 1.0))
assert_allclose(X[:, -2], np.sin(2 * np.pi * freq * t) / (dy or 1.0))
assert_allclose(X[:, -1], np.cos(2 * np.pi * freq * t) / (dy or 1.0))
@pytest.mark.parametrize('nterms', range(4))
def test_multiterm_design_matrix(t, nterms):
dy = 2.0
freq = 1.5
X = design_matrix(t, freq, dy=dy, bias=True, nterms=nterms)
assert X.shape == (t.shape[0], 1 + 2 * nterms)
assert_allclose(X[:, 0], 1. / dy)
for i in range(1, nterms + 1):
assert_allclose(X[:, 2 * i - 1], np.sin(2 * np.pi * i * freq * t) / dy)
assert_allclose(X[:, 2 * i], np.cos(2 * np.pi * i * freq * t) / dy)
@pytest.mark.parametrize('nterms', range(1, 4))
@pytest.mark.parametrize('freq', [1, 2])
@pytest.mark.parametrize('fit_mean', [True, False])
def test_exact_mle_fit(nterms, freq, fit_mean):
rand = np.random.RandomState(42)
t = 10 * rand.rand(30)
theta = -1 + rand.rand(2 * nterms + 1)
y = np.zeros(t.shape)
if fit_mean:
y = theta[0] * np.ones(t.shape)
for i in range(1, nterms + 1):
y += theta[2 * i - 1] * np.sin(2 * np.pi * i * freq * t)
y += theta[2 * i] * np.cos(2 * np.pi * i * freq * t)
y_fit = periodic_fit(t, y, dy=1, frequency=freq, t_fit=t, nterms=nterms,
center_data=False, fit_mean=fit_mean)
assert_allclose(y, y_fit)
|
22281b8b65eb4017ea26e2f385ba2601aeda5953cefa1c95b5795168bf030ecc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from unittest import mock
import pytest
from astropy.io.fits import HDUList, Header, PrimaryHDU, BinTableHDU
from astropy.utils.data import get_pkg_data_filename
from astropy.timeseries.io.kepler import kepler_fits_reader
def fake_header(extver, version, timesys, telescop):
return Header({"SIMPLE": "T",
"BITPIX": 8,
"NAXIS": 0,
"EXTVER": extver,
"VERSION": version,
'TIMESYS': "{}".format(timesys),
"TELESCOP": "{}".format(telescop)})
def fake_hdulist(extver=1, version=2, timesys="TDB", telescop="KEPLER"):
new_header = fake_header(extver, version, timesys, telescop)
return [HDUList(hdus=[PrimaryHDU(header=new_header),
BinTableHDU(header=new_header, name="LIGHTCURVE")])]
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(telescop="MadeUp"))
def test_raise_telescop_wrong(mock_file):
with pytest.raises(NotImplementedError) as exc:
kepler_fits_reader(None)
assert exc.value.args[0] == ("MadeUp is not implemented, only KEPLER or TESS are "
"supported through this reader")
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2))
def test_raise_extversion_kepler(mock_file):
with pytest.raises(NotImplementedError) as exc:
kepler_fits_reader(None)
assert exc.value.args[0] == ("Support for KEPLER v2 files not yet "
"implemented")
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(extver=2, telescop="TESS"))
def test_raise_extversion_tess(mock_file):
with pytest.raises(NotImplementedError) as exc:
kepler_fits_reader(None)
assert exc.value.args[0] == ("Support for TESS v2 files not yet "
"implemented")
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB"))
def test_raise_timesys_kepler(mock_file):
with pytest.raises(NotImplementedError) as exc:
kepler_fits_reader(None)
assert exc.value.args[0] == ("Support for TCB time scale not yet "
"implemented in KEPLER reader")
@mock.patch("astropy.io.fits.open", side_effect=fake_hdulist(timesys="TCB", telescop="TESS"))
def test_raise_timesys_tess(mock_file):
with pytest.raises(NotImplementedError) as exc:
kepler_fits_reader(None)
assert exc.value.args[0] == ("Support for TCB time scale not yet "
"implemented in TESS reader")
@pytest.mark.remote_data(source='astropy')
def test_kepler_astropy():
filename = get_pkg_data_filename('timeseries/kplr010666592-2009131110544_slc.fits')
timeseries = kepler_fits_reader(filename)
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source='astropy')
def test_tess_astropy():
filename = get_pkg_data_filename('timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits')
timeseries = kepler_fits_reader(filename)
assert timeseries["time"].format == 'isot'
assert timeseries["time"].scale == 'tdb'
assert timeseries["sap_flux"].unit.to_string() == 'electron / s'
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
|
fff9d0d34ebc260c70f2fcc77b0d7e6e1aa2ba2b6a18ff7394a8f585b8c31318 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy import constants as const
from astropy.tests.helper import pickle_protocol, check_pickling_recovery # noqa
originals = [const.Constant('h_fake', 'Not Planck',
0.0, 'J s', 0.0, 'fakeref',
system='si'),
const.h,
const.e]
xfails = [True, True, True]
@pytest.mark.parametrize(("original", "xfail"), zip(originals, xfails))
def test_new_constant(pickle_protocol, original, xfail):
if xfail:
pytest.xfail()
check_pickling_recovery(original, pickle_protocol)
|
64dfc453b5a5b8b1484657b885467671e5cc601a8525805ada9c05c60fd219eb | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from astropy.constants import Constant
from astropy.units import Quantity as Q
def test_c():
from astropy.constants.codata2010 import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from astropy.constants.codata2010 import h
from astropy.constants import h as h_current
# check that the value is the CODATA2010 value
assert abs(h.value - 6.62606957e-34) < 1e-43
assert abs(h.si.value - 6.62606957e-34) < 1e-43
assert abs(h.cgs.value - 6.62606957e-27) < 1e-36
# Check it is different than the current value
assert abs(h.value - h_current.value) > 4e-42
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
from astropy.constants.astropyconst13 import e
# A test quantity
E = Q(100.00000348276221, 'V/m')
# e.cgs is too ambiguous and should not work at all
with pytest.raises(TypeError):
e.cgs * E
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, 'eV/m')
assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m')
assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m')
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from astropy.constants.astropyconst13 import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == 'acceleration'
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from astropy.constants.astropyconst13 import b_wien
from astropy import units as u
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from astropy import units as u
from astropy.constants import astropyconst13 as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from astropy import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from astropy.constants import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, 'reference')
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, 'reference')
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
def test_context_manager():
from astropy import constants as const
with const.set_enabled_constants('astropyconst13'):
assert const.h.value == 6.62606957e-34 # CODATA2010
assert const.h.value == 6.626070040e-34 # CODATA2014
with pytest.raises(ValueError):
with const.set_enabled_constants('notreal'):
const.h
|
e89de87af302f85a84f5bffaa686630339caeb13317c31bd0b09873ddb6f63a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import pytest
from astropy.constants import Constant
from astropy.units import Quantity as Q
def test_c():
from astropy.constants import c
# c is an exactly defined constant, so it shouldn't be changing
assert c.value == 2.99792458e8 # default is S.I.
assert c.si.value == 2.99792458e8
assert c.cgs.value == 2.99792458e10
# make sure it has the necessary attributes and they're not blank
assert c.uncertainty == 0 # c is a *defined* quantity
assert c.name
assert c.reference
assert c.unit
def test_h():
from astropy.constants import h
# check that the value is fairly close to what it should be (not exactly
# checking because this might get updated in the future)
assert abs(h.value - 6.626e-34) < 1e-38
assert abs(h.si.value - 6.626e-34) < 1e-38
assert abs(h.cgs.value - 6.626e-27) < 1e-31
# make sure it has the necessary attributes and they're not blank
assert h.uncertainty
assert h.name
assert h.reference
assert h.unit
def test_e():
"""Tests for #572 demonstrating how EM constants should behave."""
from astropy.constants import e
# A test quantity
E = Q(100, 'V/m')
# Without specifying a system e should not combine with other quantities
pytest.raises(TypeError, lambda: e * E)
# Try it again (as regression test on a minor issue mentioned in #745 where
# repeated attempts to use e in an expression resulted in UnboundLocalError
# instead of TypeError)
pytest.raises(TypeError, lambda: e * E)
# e.cgs is too ambiguous and should not work at all
pytest.raises(TypeError, lambda: e.cgs * E)
assert isinstance(e.si, Q)
assert isinstance(e.gauss, Q)
assert isinstance(e.esu, Q)
assert e.si * E == Q(100, 'eV/m')
assert e.gauss * E == Q(e.gauss.value * E.value, 'Fr V/m')
assert e.esu * E == Q(e.esu.value * E.value, 'Fr V/m')
def test_g0():
"""Tests for #1263 demonstrating how g0 constant should behave."""
from astropy.constants import g0
# g0 is an exactly defined constant, so it shouldn't be changing
assert g0.value == 9.80665 # default is S.I.
assert g0.si.value == 9.80665
assert g0.cgs.value == 9.80665e2
# make sure it has the necessary attributes and they're not blank
assert g0.uncertainty == 0 # g0 is a *defined* quantity
assert g0.name
assert g0.reference
assert g0.unit
# Check that its unit have the correct physical type
assert g0.unit.physical_type == 'acceleration'
def test_b_wien():
"""b_wien should give the correct peak wavelength for
given blackbody temperature. The Sun is used in this test.
"""
from astropy.constants import b_wien
from astropy import units as u
t = 5778 * u.K
w = (b_wien / t).to(u.nm)
assert round(w.value) == 502
def test_unit():
from astropy import units as u
from astropy import constants as const
for key, val in vars(const).items():
if isinstance(val, Constant):
# Getting the unit forces the unit parser to run. Confirm
# that none of the constants defined in astropy have
# invalid unit.
assert not isinstance(val.unit, u.UnrecognizedUnit)
def test_copy():
from astropy import constants as const
cc = copy.deepcopy(const.c)
assert cc == const.c
cc = copy.copy(const.c)
assert cc == const.c
def test_view():
"""Check that Constant and Quantity views can be taken (#3537, #3538)."""
from astropy.constants import c
c2 = c.view(Constant)
assert c2 == c
assert c2.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c2.uncertainty == 0 # c is a *defined* quantity
assert c2.name == c.name
assert c2.reference == c.reference
assert c2.unit == c.unit
q1 = c.view(Q)
assert q1 == c
assert q1.value == c.value
assert type(q1) is Q
assert not hasattr(q1, 'reference')
q2 = Q(c)
assert q2 == c
assert q2.value == c.value
assert type(q2) is Q
assert not hasattr(q2, 'reference')
c3 = Q(c, subok=True)
assert c3 == c
assert c3.value == c.value
# make sure it has the necessary attributes and they're not blank
assert c3.uncertainty == 0 # c is a *defined* quantity
assert c3.name == c.name
assert c3.reference == c.reference
assert c3.unit == c.unit
c4 = Q(c, subok=True, copy=False)
assert c4 is c
|
071ec161b6ec6c38abcedc9566569e7725f414a6593c46342bf55ad0fe03ede5 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from astropy.tests.helper import pickle_protocol, check_pickling_recovery
from astropy import cosmology as cosm
originals = [cosm.FLRW]
xfails = [False]
@pytest.mark.parametrize(("original", "xfail"),
zip(originals, xfails))
def test_flrw(pickle_protocol, original, xfail):
if xfail:
pytest.xfail()
check_pickling_recovery(original, pickle_protocol)
|
f06ba7e6c7372c7137ef5b3efd055a24719a480cce751e6be532974c3e4c7d04 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import pytest
import numpy as np
from astropy.cosmology import core, funcs
from astropy.units import allclose
from astropy.utils.compat import NUMPY_LT_1_14
from astropy import units as u
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_init():
""" Tests to make sure the code refuses inputs it is supposed to"""
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
Tcmb0=u.Quantity([0.0, 2], u.K))
with pytest.raises(ValueError):
h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)
cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Ob(1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Odm(1)
with pytest.raises(TypeError):
core.default_cosmology.validate(4)
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,
Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H0.unit == u.km / u.Mpc / u.s
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb0.unit == u.K
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu0.unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
def test_clone():
""" Test clone operation"""
cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
z = np.linspace(0.1, 3, 15)
# First, test with no changes, which should return same object
newclone = cosmo.clone()
assert newclone is cosmo
# Now change H0
# Note that H0 affects Ode0 because it changes Ogamma0
newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)
assert newclone is not cosmo
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
Tcmb0=2.8 * u.K)
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
assert allclose(newclone.Tcmb0, 2.8 * u.K)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert allclose(newclone.H0, cosmo.H0)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ode0, cosmo.Ode0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.w0, cosmo.w0)
assert allclose(newclone.w0, -1.1)
assert not allclose(newclone.wa, cosmo.wa)
assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
newclone = cosmo.clone(not_an_arg=4)
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
def test_repr():
""" Test string representation of built in classes"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '
'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0. 0. 0.' if NUMPY_LT_1_14 else
'0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV))
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '
'Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0.01 0.01 0.01' if NUMPY_LT_1_14 else
'0.01 0.01 0.01')
assert str(cosmo) == expected
cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05)
expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '
'Tcmb0=3 K, Neff=3.04, m_nu=[{}] eV, Ob0=0.05)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1')
expected = ('wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, '
'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')
expected = ('FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, '
'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3')
expected = ('w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, '
'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',
Ob0=0.0456789)
expected = ('Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, '
'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, '
'Ob0=0.0457)')
assert str(cosmo) == expected
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,
zp=0.3, name='test5')
expected = ('wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, '
'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, '
'Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725,
m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))
expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '
'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0.001 0.01 0.015' if NUMPY_LT_1_14 else
'0.001 0.01 0.015')
assert str(cosmo) == expected
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_z1():
""" Test a flat cosmology at z=1 against several other on-line
calculators.
"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
z = 1
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (http://adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(z),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(z),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(z),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(z),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(z),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Ode(1), 0)
# Ogamma0 and Onu
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0,
name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0,
m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a mathematica
computation"""
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.w0, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,
Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.9)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparision is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
""" Test that efunc and inv_efunc give inverse values"""
# Note that all of the subclasses here don't need
# scipy because they don't need to call de_density_scale
# The test following this tests the case where that is needed.
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# Below are the 'standard' included cosmologies
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
assert allclose(cosmo.w(1.0), -1.)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wa, -0.5)
assert allclose(cosmo.w(1.0), -1.25)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.wp, -0.9)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.w(0.5), -0.9)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.critical_density0,
9.309668456020899e-30 * u.g / u.cm**3)
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(tcos.critical_density([1, 5]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
assert allclose(tcos.critical_density([1., 5.]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
c_dS = core.LambdaCDM(100, 0, 1, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = core.LambdaCDM(100, 1, 0, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = core.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
-969.34452994,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05,
Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV))
assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Alternative no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K,
m_nu=u.Quantity(0.4, u.eV))
assert not tcos.has_massive_nu
assert tcos.m_nu is None
# Test basic setting, retrieval of values
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K,
m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725,
m_nu=u.Quantity(0.1, u.eV), Neff=3.1)
assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behavior when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
with pytest.warns(UserWarning, match='fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
with pytest.warns(UserWarning, match='fval is not bracketed'):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_roundtrip():
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck13 cosmology
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone isn't a redshift-dependent method
skip = ('Ok',
'angular_diameter_distance_z1z2',
'clone',
'de_density_scale', 'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
print('Round-trip testing {0}'.format(name))
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test distance functions between two redshifts
z2 = 2.0
func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2),
lambda z1:
core.Planck13._comoving_transverse_distance_z1z2(z1, z2),
lambda z1:
core.Planck13.angular_diameter_distance_z1z2(z1, z2)]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
@pytest.mark.skipif('not HAS_SCIPY')
def test_elliptic_comoving_distance_z1z2():
"""Regression test for #8388."""
cosmo = core.LambdaCDM(70., 2.3, 0.05, Tcmb0=0)
z = 0.2
assert allclose(cosmo.comoving_distance(z),
cosmo._integral_comoving_distance_z1z2(0., z))
assert allclose(cosmo._elliptic_comoving_distance_z1z2(0., z),
cosmo._integral_comoving_distance_z1z2(0., z))
|
4ba76341ddabbb736b9551c0ebe9b2d1642984a7cbfa2046922a3067d289822e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file is the main file used when running tests with pytest directly,
# in particular if running e.g. ``pytest docs/``.
from importlib.util import find_spec
import os
import pkg_resources
import tempfile
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
import astropy
pytest_plugins = [
'astropy.tests.plugins.display',
]
if find_spec('asdf') is not None:
from asdf import __version__ as asdf_version
if asdf_version >= astropy.__minimum_asdf_version__:
entry_points = []
for entry_point in pkg_resources.iter_entry_points('pytest11'):
entry_points.append(entry_point.name)
if "asdf_schema_tester" not in entry_points:
pytest_plugins += ['asdf.tests.schema_tester']
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
|
ecc9c6b1fa6e53f91ffe90be4c11681bc39d32483486c306549e67c1810e03c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
# Prior to Astropy 3.2, astropy was imported during setup.py commands. If we are
# in setup mode, then astropy-helpers defines an _ASTROPY_SETUP_ variable, which
# we used to use to conditionally import C extensions for example. However, the
# behavior of importing the package during the setup process is not good
# practice and we therefore now explicitly prevent the package from being
# imported in that case to prevent any regressions. We use _ASTROPY_CORE_SETUP_
# (defined in setup.py) rather than _ASTROPY_SETUP_ since the latter is also
# set up for affiliated packages, and those need to be able to import the
# (installed) core package during e.g. python setup.py test.
try:
_ASTROPY_CORE_SETUP_
except NameError:
pass
else:
raise RuntimeError("The astropy package cannot be imported during setup")
import sys
import os
from warnings import warn
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.13.0'
# ASDF is an optional dependency, but this is the minimum version that is
# compatible with Astropy when it is installed.
__minimum_asdf_version__ = '2.3.0'
class UnsupportedPythonError(Exception):
pass
# This is the same check as the one at the top of setup.py
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState
class base_constants_version(ScienceState):
"""
Base class for the real version-setters below
"""
_value = 'test'
_versions = dict(test='test')
@classmethod
def validate(cls, value):
if value not in cls._versions:
raise ValueError('Must be one of {}'
.format(list(cls._versions.keys())))
return cls._versions[value]
@classmethod
def set(cls, value):
"""
Set the current constants value.
"""
import sys
if 'astropy.units' in sys.modules:
raise RuntimeError('astropy.units is already imported')
if 'astropy.constants' in sys.modules:
raise RuntimeError('astropy.constants is already imported')
class _Context:
def __init__(self, parent, value):
self._value = value
self._parent = parent
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self._parent._value = self._value
def __repr__(self):
return ('<ScienceState {0}: {1!r}>'
.format(self._parent.__name__, self._parent._value))
ctx = _Context(cls, cls._value)
value = cls.validate(value)
cls._value = value
return ctx
class physical_constants(base_constants_version):
"""
The version of physical constants to use
"""
# Maintainers: update when new constants are added
_value = 'codata2014'
_versions = dict(codata2018='codata2018', codata2014='codata2014',
codata2010='codata2010', astropyconst40='codata2018',
astropyconst20='codata2014', astropyconst13='codata2010')
class astronomical_constants(base_constants_version):
"""
The version of astronomical constants to use
"""
# Maintainers: update when new constants are added
_value = 'iau2015'
_versions = dict(iau2015='iau2015', iau2012='iau2012',
astropyconst40='iau2015', astropyconst20='iau2015',
astropyconst13='iau2012')
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file, 'r') as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf', 'physical_constants',
'astronomical_constants']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
4b5245728dbee3f61ffb82c3f96a059d0eba7a820f778380a60768f8bb31c9ea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import os
import builtins
import tempfile
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
from astropy.tests.helper import enable_deprecations_as_exceptions
try:
import matplotlib
except ImportError:
HAS_MATPLOTLIB = False
else:
HAS_MATPLOTLIB = True
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
if HAS_MATPLOTLIB:
matplotlib.use('Agg')
matplotlibrc_cache = {}
def pytest_configure(config):
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration. Note that this
# is also set in the test runner, but we need to also set it here for
# things to work properly in parallel mode
builtins._xdg_config_home_orig = os.environ.get('XDG_CONFIG_HOME')
builtins._xdg_cache_home_orig = os.environ.get('XDG_CACHE_HOME')
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
def pytest_unconfigure(config):
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
if builtins._xdg_config_home_orig is None:
os.environ.pop('XDG_CONFIG_HOME')
else:
os.environ['XDG_CONFIG_HOME'] = builtins._xdg_config_home_orig
if builtins._xdg_cache_home_orig is None:
os.environ.pop('XDG_CACHE_HOME')
else:
os.environ['XDG_CACHE_HOME'] = builtins._xdg_cache_home_orig
PYTEST_HEADER_MODULES['Cython'] = 'cython'
PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'
|
03805ed64da383f132c8645eebd6b5abe892d3dd48427c8a286a145a9e9f8e4a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file needs to be included here to make sure commands such
# as ``python setup.py test ... -t docs/...`` works, since this
# will ignore the conftest.py file at the root of the repository
# and the one in astropy/conftest.py
import os
import tempfile
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('astropy_config')
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('astropy_cache')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'astropy'))
os.mkdir(os.path.join(os.environ['XDG_CACHE_HOME'], 'astropy'))
# Note that we don't need to change the environment variables back or remove
# them after testing, because they are only changed for the duration of the
# Python process, and this configuration only matters if running pytest
# directly, not from e.g. an IPython session.
|
2dbd41a8166b3ca4f8163a1f7b8ed4f3ac8198c7fa4b1b5fedcecb1f39aacbd1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import weakref
import re
from copy import deepcopy
import numpy as np
from numpy import ma
# Remove this when Numpy no longer emits this warning and that Numpy version
# becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
try:
from numpy.ma.core import MaskedArrayFutureWarning
except ImportError:
# For Numpy versions that do not raise this warning.
MaskedArrayFutureWarning = None
from astropy.units import Unit, Quantity
from astropy.utils.console import color_print
from astropy.utils.metadata import MetaData
from astropy.utils.data_info import BaseColumnInfo, dtype_info_name
from astropy.utils.misc import dtype_bytes_or_chars
from . import groups
from . import pprint
from .np_utils import fix_column_name
# These "shims" provide __getitem__ implementations for Column and MaskedColumn
from ._column_mixins import _ColumnGetitemShim, _MaskedColumnGetitemShim
# Create a generic TableFormatter object for use by bare columns with no
# parent table.
FORMATTER = pprint.TableFormatter()
class StringTruncateWarning(UserWarning):
"""
Warning class for when a string column is assigned a value
that gets truncated because the base (numpy) string length
is too short.
This does not inherit from AstropyWarning because we want to use
stacklevel=2 to show the user where the issue occurred in their code.
"""
pass
# Always emit this warning, not just the first instance
warnings.simplefilter('always', StringTruncateWarning)
def _auto_names(n_cols):
from . import conf
return [str(conf.auto_colname).format(i) for i in range(n_cols)]
# list of one and two-dimensional comparison functions, which sometimes return
# a Column class and sometimes a plain array. Used in __array_wrap__ to ensure
# they only return plain (masked) arrays (see #1446 and #1685)
_comparison_functions = set(
[np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.equal,
np.isfinite, np.isinf, np.isnan, np.sign, np.signbit])
def col_copy(col, copy_indices=True):
"""
Mixin-safe version of Column.copy() (with copy_data=True).
Parameters
----------
col : Column or mixin column
Input column
copy_indices : bool
Copy the column ``indices`` attribute
Returns
-------
col : Copy of input column
"""
if isinstance(col, BaseColumn):
return col.copy()
# The new column should have None for the parent_table ref. If the
# original parent_table weakref there at the point of copying then it
# generates an infinite recursion. Instead temporarily remove the weakref
# on the original column and restore after the copy in an exception-safe
# manner.
parent_table = col.info.parent_table
indices = col.info.indices
col.info.parent_table = None
col.info.indices = []
try:
newcol = col.copy() if hasattr(col, 'copy') else deepcopy(col)
newcol.info = col.info
newcol.info.indices = deepcopy(indices or []) if copy_indices else []
for index in newcol.info.indices:
index.replace_col(col, newcol)
finally:
col.info.parent_table = parent_table
col.info.indices = indices
return newcol
class FalseArray(np.ndarray):
"""
Boolean mask array that is always False.
This is used to create a stub ``mask`` property which is a boolean array of
``False`` used by default for mixin columns and corresponding to the mixin
column data shape. The ``mask`` looks like a normal numpy array but an
exception will be raised if ``True`` is assigned to any element. The
consequences of the limitation are most obvious in the high-level table
operations.
Parameters
----------
shape : tuple
Data shape
"""
def __new__(cls, shape):
obj = np.zeros(shape, dtype=bool).view(cls)
return obj
def __setitem__(self, item, val):
val = np.asarray(val)
if np.any(val):
raise ValueError('Cannot set any element of {0} class to True'
.format(self.__class__.__name__))
class ColumnInfo(BaseColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information.
"""
attrs_from_parent = BaseColumnInfo.attr_names
_supports_indexing = True
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Column instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Column (or subclass)
New instance of this class consistent with ``cols``
"""
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'unit', 'format', 'description'))
return self._parent_cls(length=length, **attrs)
class BaseColumn(_ColumnGetitemShim, np.ndarray):
meta = MetaData()
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if data is None:
dtype = (np.dtype(dtype).str, shape)
self_data = np.zeros(length, dtype=dtype)
elif isinstance(data, BaseColumn) and hasattr(data, '_name'):
# When unpickling a MaskedColumn, ``data`` will be a bare
# BaseColumn with none of the expected attributes. In this case
# do NOT execute this block which initializes from ``data``
# attributes.
self_data = np.array(data.data, dtype=dtype, copy=copy)
if description is None:
description = data.description
if unit is None:
unit = unit or data.unit
if format is None:
format = data.format
if meta is None:
meta = data.meta
if name is None:
name = data.name
elif isinstance(data, Quantity):
if unit is None:
self_data = np.array(data, dtype=dtype, copy=copy)
unit = data.unit
else:
self_data = np.array(data.to(unit), dtype=dtype, copy=copy)
if description is None:
description = data.info.description
if format is None:
format = data.info.format
if meta is None:
meta = data.info.meta
else:
if np.dtype(dtype).char == 'S':
data = cls._encode_str(data)
self_data = np.array(data, dtype=dtype, copy=copy)
self = self_data.view(cls)
self._name = fix_column_name(name)
self._parent_table = None
self.unit = unit
self._format = format
self.description = description
self.meta = meta
self.indices = deepcopy(getattr(data, 'indices', [])) if copy_indices else []
for index in self.indices:
index.replace_col(data, self)
return self
@property
def data(self):
return self.view(np.ndarray)
@property
def parent_table(self):
# Note: It seems there are some cases where _parent_table is not set,
# such after restoring from a pickled Column. Perhaps that should be
# fixed, but this is also okay for now.
if getattr(self, '_parent_table', None) is None:
return None
else:
return self._parent_table()
@parent_table.setter
def parent_table(self, table):
if table is None:
self._parent_table = None
else:
self._parent_table = weakref.ref(table)
info = ColumnInfo()
def copy(self, order='C', data=None, copy_data=True):
"""
Return a copy of the current instance.
If ``data`` is supplied then a view (reference) of ``data`` is used,
and ``copy_data`` is ignored.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.) Default is 'C'.
data : array, optional
If supplied then use a view of ``data`` instead of the instance
data. This allows copying the instance attributes and meta.
copy_data : bool, optional
Make a copy of the internal numpy array instead of using a
reference. Default is True.
Returns
-------
col : Column or MaskedColumn
Copy of the current column (same type as original)
"""
if data is None:
data = self.data
if copy_data:
data = data.copy(order)
out = data.view(self.__class__)
out.__array_finalize__(self)
# If there is meta on the original column then deepcopy (since "copy" of column
# implies complete independence from original). __array_finalize__ will have already
# made a light copy. I'm not sure how to avoid that initial light copy.
if self.meta is not None:
out.meta = self.meta # MetaData descriptor does a deepcopy here
# for MaskedColumn, MaskedArray.__array_finalize__ also copies mask
# from self, which is not the idea here, so undo
if isinstance(self, MaskedColumn):
out._mask = data._mask
self._copy_groups(out)
return out
def __setstate__(self, state):
"""
Restore the internal state of the Column/MaskedColumn for pickling
purposes. This requires that the last element of ``state`` is a
5-tuple that has Column-specific state values.
"""
# Get the Column attributes
names = ('_name', '_unit', '_format', 'description', 'meta', 'indices')
attrs = {name: val for name, val in zip(names, state[-1])}
state = state[:-1]
# Using super().__setstate__(state) gives
# "TypeError 'int' object is not iterable", raised in
# astropy.table._column_mixins._ColumnGetitemShim.__setstate_cython__()
# Previously, it seems to have given an infinite recursion.
# Hence, manually call the right super class to actually set up
# the array object.
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
super_class.__setstate__(self, state)
# Set the Column attributes
for name, val in attrs.items():
setattr(self, name, val)
self._parent_table = None
def __reduce__(self):
"""
Return a 3-tuple for pickling a Column. Use the super-class
functionality but then add in a 5-tuple of Column-specific values
that get used in __setstate__.
"""
super_class = ma.MaskedArray if isinstance(self, ma.MaskedArray) else np.ndarray
reconstruct_func, reconstruct_func_args, state = super_class.__reduce__(self)
# Define Column-specific attrs and meta that gets added to state.
column_state = (self.name, self.unit, self.format, self.description,
self.meta, self.indices)
state = state + (column_state,)
return reconstruct_func, reconstruct_func_args, state
def __array_finalize__(self, obj):
# Obj will be none for direct call to Column() creator
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
self.parent_table = None
if not hasattr(self, 'indices'): # may have been copied in __new__
self.indices = []
self._copy_attrs(obj)
def __array_wrap__(self, out_arr, context=None):
"""
__array_wrap__ is called at the end of every ufunc.
Normally, we want a Column object back and do not have to do anything
special. But there are two exceptions:
1) If the output shape is different (e.g. for reduction ufuncs
like sum() or mean()), a Column still linking to a parent_table
makes little sense, so we return the output viewed as the
column content (ndarray or MaskedArray).
For this case, we use "[()]" to select everything, and to ensure we
convert a zero rank array to a scalar. (For some reason np.sum()
returns a zero rank scalar array while np.mean() returns a scalar;
So the [()] is needed for this case.
2) When the output is created by any function that returns a boolean
we also want to consistently return an array rather than a column
(see #1446 and #1685)
"""
out_arr = super().__array_wrap__(out_arr, context)
if (self.shape != out_arr.shape or
(isinstance(out_arr, BaseColumn) and
(context is not None and context[0] in _comparison_functions))):
return out_arr.data[()]
else:
return out_arr
@property
def name(self):
"""
The name of this column.
"""
return self._name
@name.setter
def name(self, val):
val = fix_column_name(val)
if self.parent_table is not None:
table = self.parent_table
table.columns._rename_column(self.name, val)
self._name = val
@property
def format(self):
"""
Format string for displaying values in this column.
"""
return self._format
@format.setter
def format(self, format_string):
prev_format = getattr(self, '_format', None)
self._format = format_string # set new format string
try:
# test whether it formats without error exemplarily
self.pformat(max_lines=1)
except Exception as err:
# revert to restore previous format if there was one
self._format = prev_format
raise ValueError(
"Invalid format for column '{0}': could not display "
"values in this column using this format ({1})".format(
self.name, err.args[0]))
@property
def descr(self):
"""Array-interface compliant full description of the column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
return (self.name, self.dtype.str, self.shape[1:])
def iter_str_vals(self):
"""
Return an iterator that yields the string-formatted values of this
column.
Returns
-------
str_vals : iterator
Column values formatted as strings
"""
# Iterate over formatted values with no max number of lines, no column
# name, no unit, and ignoring the returned header info in outs.
_pformat_col_iter = self._formatter._pformat_col_iter
for str_val in _pformat_col_iter(self, -1, show_name=False, show_unit=False,
show_dtype=False, outs={}):
yield str_val
def attrs_equal(self, col):
"""Compare the column attributes of ``col`` to this object.
The comparison attributes are: ``name``, ``unit``, ``dtype``,
``format``, ``description``, and ``meta``.
Parameters
----------
col : Column
Comparison column
Returns
-------
equal : boolean
True if all attributes are equal
"""
if not isinstance(col, BaseColumn):
raise ValueError('Comparison `col` must be a Column or '
'MaskedColumn object')
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
equal = all(getattr(self, x) == getattr(col, x) for x in attrs)
return equal
@property
def _formatter(self):
return FORMATTER if (self.parent_table is None) else self.parent_table.formatter
def pformat(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False,
html=False):
"""Return a list of formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum lines of output (header + data rows)
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is False.
html : bool
Format the output as an HTML table. Default is False.
Returns
-------
lines : list
List of lines with header and formatted column values
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype,
html=html)
return lines
def pprint(self, max_lines=None, show_name=True, show_unit=False, show_dtype=False):
"""Print a formatted string representation of column values.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default will be
determined using the ``astropy.conf.max_lines`` configuration
item. If a negative value of ``max_lines`` is supplied then
there is no line limit applied.
Parameters
----------
max_lines : int
Maximum number of values in output
show_name : bool
Include column name. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
show_dtype : bool
Include column dtype. Default is True.
"""
_pformat_col = self._formatter._pformat_col
lines, outs = _pformat_col(self, max_lines, show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype)
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def more(self, max_lines=None, show_name=True, show_unit=False):
"""Interactively browse column with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is False.
"""
_more_tabcol = self._formatter._more_tabcol
_more_tabcol(self, max_lines=max_lines, show_name=show_name,
show_unit=show_unit)
@property
def unit(self):
"""
The unit associated with this column. May be a string or a
`astropy.units.UnitBase` instance.
Setting the ``unit`` property does not change the values of the
data. To perform a unit conversion, use ``convert_unit_to``.
"""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
else:
self._unit = Unit(unit, parse_strict='silent')
@unit.deleter
def unit(self):
self._unit = None
def convert_unit_to(self, new_unit, equivalencies=[]):
"""
Converts the values of the column in-place from the current
unit to the given unit.
To change the unit associated with this column without
actually changing the data values, simply set the ``unit``
property.
Parameters
----------
new_unit : str or `astropy.units.UnitBase` instance
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the unit are not
directly convertible. See :ref:`unit_equivalencies`.
Raises
------
astropy.units.UnitsError
If units are inconsistent
"""
if self.unit is None:
raise ValueError("No unit set on column")
self.data[:] = self.unit.to(
new_unit, self.data, equivalencies=equivalencies)
self.unit = new_unit
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.ColumnGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this column by the specified ``keys``
This effectively splits the column into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`Column` or `MaskedColumn` which contains a copy of this column but
sorted by row according to ``keys``.
The ``keys`` input to ``group_by`` must be a numpy array with the
same length as this column.
Parameters
----------
keys : numpy array
Key grouping object
Returns
-------
out : Column
New column with groups attribute set accordingly
"""
return groups.column_group_by(self, keys)
def _copy_groups(self, out):
"""
Copy current groups into a copy of self ``out``
"""
if self.parent_table:
if hasattr(self.parent_table, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self.parent_table._groups._indices)
elif hasattr(self, '_groups'):
out._groups = groups.ColumnGroups(out, indices=self._groups._indices)
# Strip off the BaseColumn-ness for repr and str so that
# MaskedColumn.data __repr__ does not include masked_BaseColumn(data =
# [1 2], ...).
def __repr__(self):
return np.asarray(self).__repr__()
@property
def quantity(self):
"""
A view of this table column as a `~astropy.units.Quantity` object with
units given by the Column's `unit` parameter.
"""
# the Quantity initializer is used here because it correctly fails
# if the column's values are non-numeric (like strings), while .view
# will happily return a quantity with gibberish for numerical values
return Quantity(self, self.unit, copy=False, dtype=self.dtype, order='A', subok=True)
def to(self, unit, equivalencies=[], **kwargs):
"""
Converts this table column to a `~astropy.units.Quantity` object with
the requested units.
Parameters
----------
unit : `~astropy.units.Unit` or str
The unit to convert to (i.e., a valid argument to the
:meth:`astropy.units.Quantity.to` method).
equivalencies : list of equivalence pairs, optional
Equivalencies to use for this conversion. See
:meth:`astropy.units.Quantity.to` for more details.
Returns
-------
quantity : `~astropy.units.Quantity`
A quantity object with the contents of this column in the units
``unit``.
"""
return self.quantity.to(unit, equivalencies)
def _copy_attrs(self, obj):
"""
Copy key column attributes from ``obj`` to self
"""
for attr in ('name', 'unit', '_format', 'description'):
val = getattr(obj, attr, None)
setattr(self, attr, val)
# Light copy of meta if it is not empty
obj_meta = getattr(obj, 'meta', None)
if obj_meta:
self.meta = obj_meta.copy()
@staticmethod
def _encode_str(value):
"""
Encode anything that is unicode-ish as utf-8. This method is only
called for Py3+.
"""
if isinstance(value, str):
value = value.encode('utf-8')
elif isinstance(value, bytes) or value is np.ma.masked:
pass
else:
arr = np.asarray(value)
if arr.dtype.char == 'U':
arr = np.char.encode(arr, encoding='utf-8')
if isinstance(value, np.ma.MaskedArray):
arr = np.ma.array(arr, mask=value.mask, copy=False)
value = arr
return value
def tolist(self):
if self.dtype.kind == 'S':
return np.chararray.decode(self, encoding='utf-8').tolist()
else:
return super().tolist()
class Column(BaseColumn):
"""Define a data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A Column can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = Column(data=[1, 2], name='name') # shape=(2,)
col = Column(data=[[1, 2], [3, 4]], name='name') # shape=(2, 2)
col = Column(data=[1, 2], name='name', dtype=float)
col = Column(data=np.array([1, 2]), name='name')
col = Column(data=['hello', 'world'], name='name')
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = Column(name='name', length=5)
col = Column(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
def __new__(cls, data=None, name=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if isinstance(data, MaskedColumn) and np.any(data.mask):
raise TypeError("Cannot convert a MaskedColumn with masked value to a Column")
self = super().__new__(
cls, data=data, name=name, dtype=dtype, shape=shape, length=length,
description=description, unit=unit, format=format, meta=meta,
copy=copy, copy_indices=copy_indices)
return self
def __setattr__(self, item, value):
if not isinstance(self, MaskedColumn) and item == "mask":
raise AttributeError("cannot set mask value to a column in non-masked Table")
super().__setattr__(item, value)
if item == 'unit' and issubclass(self.dtype.type, np.number):
try:
converted = self.parent_table._convert_col_for_table(self)
except AttributeError: # Either no parent table or parent table is None
pass
else:
if converted is not self:
self.parent_table.replace_column(self.name, converted)
def _base_repr_(self, html=False):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return repr(self.item())
descr_vals = [self.__class__.__name__]
unit = None if self.unit is None else str(self.unit)
shape = None if self.ndim <= 1 else self.shape[1:]
for attr, val in (('name', self.name),
('dtype', dtype_info_name(self.dtype)),
('shape', shape),
('unit', unit),
('format', self.format),
('description', self.description),
('length', len(self))):
if val is not None:
descr_vals.append('{0}={1!r}'.format(attr, val))
descr = '<' + ' '.join(descr_vals) + '>\n'
if html:
from astropy.utils.xml.writer import xml_escape
descr = xml_escape(descr)
data_lines, outs = self._formatter._pformat_col(
self, show_name=False, show_unit=False, show_length=False, html=html)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True)
def __repr__(self):
return self._base_repr_(html=False)
def __str__(self):
# If scalar then just convert to correct numpy type and use numpy repr
if self.ndim == 0:
return str(self.item())
lines, outs = self._formatter._pformat_col(self)
return '\n'.join(lines)
def __bytes__(self):
return str(self).encode('utf-8')
def _check_string_truncate(self, value):
"""
Emit a warning if any elements of ``value`` will be truncated when
``value`` is assigned to self.
"""
# Convert input ``value`` to the string dtype of this column and
# find the length of the longest string in the array.
value = np.asanyarray(value, dtype=self.dtype.type)
if value.size == 0:
return
value_str_len = np.char.str_len(value).max()
# Parse the array-protocol typestring (e.g. '|U15') of self.dtype which
# has the character repeat count on the right side.
self_str_len = dtype_bytes_or_chars(self.dtype)
if value_str_len > self_str_len:
warnings.warn('truncated right side string(s) longer than {} '
'character(s) during assignment'
.format(self_str_len),
StringTruncateWarning,
stacklevel=3)
def __setitem__(self, index, value):
if self.dtype.char == 'S':
value = self._encode_str(value)
# Issue warning for string assignment that truncates ``value``
if issubclass(self.dtype.type, np.character):
self._check_string_truncate(value)
# update indices
self.info.adjust_indices(index, value, len(self))
# Set items using a view of the underlying data, as it gives an
# order-of-magnitude speed-up. [#2994]
self.data[index] = value
def _make_compare(oper):
"""
Make comparison methods which encode the ``other`` object to utf-8
in the case of a bytestring dtype for Py3+.
"""
swapped_oper = {'__eq__': '__eq__',
'__ne__': '__ne__',
'__gt__': '__lt__',
'__lt__': '__gt__',
'__ge__': '__le__',
'__le__': '__ge__'}[oper]
def _compare(self, other):
op = oper # copy enclosed ref to allow swap below
# Special case to work around #6838. Other combinations work OK,
# see tests.test_column.test_unicode_sandwich_compare(). In this
# case just swap self and other.
#
# This is related to an issue in numpy that was addressed in np 1.13.
# However that fix does not make this problem go away, but maybe
# future numpy versions will do so. NUMPY_LT_1_13 to get the
# attention of future maintainers to check (by deleting or versioning
# the if block below). See #6899 discussion.
if (isinstance(self, MaskedColumn) and self.dtype.kind == 'U' and
isinstance(other, MaskedColumn) and other.dtype.kind == 'S'):
self, other = other, self
op = swapped_oper
if self.dtype.char == 'S':
other = self._encode_str(other)
return getattr(self.data, op)(other)
return _compare
__eq__ = _make_compare('__eq__')
__ne__ = _make_compare('__ne__')
__gt__ = _make_compare('__gt__')
__lt__ = _make_compare('__lt__')
__ge__ = _make_compare('__ge__')
__le__ = _make_compare('__le__')
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.table.Column` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.Column`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new column is returned.
"""
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
data = np.insert(self, obj, None, axis=axis)
data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
data = np.insert(self, obj, values, axis=axis)
out = data.view(self.__class__)
out.__array_finalize__(self)
return out
# We do this to make the methods show up in the API docs
name = BaseColumn.name
unit = BaseColumn.unit
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
quantity = BaseColumn.quantity
to = BaseColumn.to
class MaskedColumnInfo(ColumnInfo):
"""
Container for meta information like name, description, format.
This is required when the object is used as a mixin column within a table,
but can be used as a general way to store meta information. In this case
it just adds the ``mask_val`` attribute.
"""
# Add `serialize_method` attribute to the attrs that MaskedColumnInfo knows
# about. This allows customization of the way that MaskedColumn objects
# get written to file depending on format. The default is to use whatever
# the writer would normally do, which in the case of FITS or ECSV is to use
# a NULL value within the data itself. If serialize_method is 'data_mask'
# then the mask is explicitly written out as a separate column if there
# are any masked values. See also code below.
attr_names = ColumnInfo.attr_names | {'serialize_method'}
# When `serialize_method` is 'data_mask', and data and mask are being written
# as separate columns, use column names <name> and <name>.mask (instead
# of default encoding as <name>.data and <name>.mask).
_represent_as_dict_primary_data = 'data'
mask_val = np.ma.masked
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
self.serialize_method = {'fits': 'null_value',
'ecsv': 'null_value',
'hdf5': 'data_mask',
None: 'null_value'}
def _represent_as_dict(self):
out = super()._represent_as_dict()
col = self._parent
# If the serialize method for this context (e.g. 'fits' or 'ecsv') is
# 'data_mask', that means to serialize using an explicit mask column.
method = self.serialize_method[self._serialize_context]
if method == 'data_mask':
# Note that adding to _represent_as_dict_attrs triggers later code which
# will add this to the '__serialized_columns__' meta YAML dict.
# Note also one driver here is a performance issue in #8443 where repr() of a
# np.ma.MaskedArray value is up to 10 times slower than repr of a normal array
# value. So regardless of whether there are masked elements it is useful to
# explicitly define this as a serialized column and use col.data.data (ndarray)
# instead of letting it fall through to the "standard" serialization machinery.
out['data'] = col.data.data
self._represent_as_dict_attrs += ('data',)
if np.any(col.mask):
# Only if there are actually masked elements do we add the ``mask`` column
out['mask'] = col.mask
self._represent_as_dict_attrs += ('mask',)
elif method is 'null_value':
pass
else:
raise ValueError('serialize method must be either "data_mask" or "null_value"')
return out
class MaskedColumn(Column, _MaskedColumnGetitemShim, ma.MaskedArray):
"""Define a masked data column for use in a Table object.
Parameters
----------
data : list, ndarray or None
Column data values
name : str
Column name and key for reference within Table
mask : list, ndarray or None
Boolean mask for which True indicates missing or invalid data
fill_value : float, int, str or None
Value used when filling masked column elements
dtype : numpy.dtype compatible value
Data type for column
shape : tuple or ()
Dimensions of a single row element in the column data
length : int or 0
Number of row elements in column data
description : str or None
Full description of column
unit : str or None
Physical unit
format : str or None or function or callable
Format string for outputting column values. This can be an
"old-style" (``format % value``) or "new-style" (`str.format`)
format specification string or a function or any callable object that
accepts a single value and returns a string.
meta : dict-like or None
Meta-data associated with the column
Examples
--------
A MaskedColumn is similar to a Column except that it includes ``mask`` and
``fill_value`` attributes. It can be created in two different ways:
- Provide a ``data`` value but not ``shape`` or ``length`` (which are
inferred from the data).
Examples::
col = MaskedColumn(data=[1, 2], name='name')
col = MaskedColumn(data=[1, 2], name='name', mask=[True, False])
col = MaskedColumn(data=[1, 2], name='name', dtype=float, fill_value=99)
The ``mask`` argument will be cast as a boolean array and specifies
which elements are considered to be missing or invalid.
The ``dtype`` argument can be any value which is an acceptable
fixed-size data-type initializer for the numpy.dtype() method. See
`<https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_.
Examples include:
- Python non-string type (float, int, bool)
- Numpy non-string type (e.g. np.float32, np.int64, np.bool\\_)
- Numpy.dtype array-protocol type strings (e.g. 'i4', 'f8', 'S15')
If no ``dtype`` value is provide then the type is inferred using
``np.array(data)``. When ``data`` is provided then the ``shape``
and ``length`` arguments are ignored.
- Provide ``length`` and optionally ``shape``, but not ``data``
Examples::
col = MaskedColumn(name='name', length=5)
col = MaskedColumn(name='name', dtype=int, length=10, shape=(3,4))
The default ``dtype`` is ``np.float64``. The ``shape`` argument is the
array shape of a single cell in the column.
"""
info = MaskedColumnInfo()
def __new__(cls, data=None, name=None, mask=None, fill_value=None,
dtype=None, shape=(), length=0,
description=None, unit=None, format=None, meta=None,
copy=False, copy_indices=True):
if mask is None:
# If mask is None then we need to determine the mask (if any) from the data.
# The naive method is looking for a mask attribute on data, but this can fail,
# see #8816. Instead use ``MaskedArray`` to do the work.
mask = ma.MaskedArray(data).mask
if mask is np.ma.nomask:
# Handle odd-ball issue with np.ma.nomask (numpy #13758), and see below.
mask = False
elif copy:
mask = mask.copy()
elif mask is np.ma.nomask:
# Force the creation of a full mask array as nomask is tricky to
# use and will fail in an unexpected manner when setting a value
# to the mask.
mask = False
else:
mask = deepcopy(mask)
# Create self using MaskedArray as a wrapper class, following the example of
# class MSubArray in
# https://github.com/numpy/numpy/blob/maintenance/1.8.x/numpy/ma/tests/test_subclassing.py
# This pattern makes it so that __array_finalize__ is called as expected (e.g. #1471 and
# https://github.com/astropy/astropy/commit/ff6039e8)
# First just pass through all args and kwargs to BaseColumn, then wrap that object
# with MaskedArray.
self_data = BaseColumn(data, dtype=dtype, shape=shape, length=length, name=name,
unit=unit, format=format, description=description,
meta=meta, copy=copy, copy_indices=copy_indices)
self = ma.MaskedArray.__new__(cls, data=self_data, mask=mask)
# Note: do not set fill_value in the MaskedArray constructor because this does not
# go through the fill_value workarounds.
if fill_value is None and getattr(data, 'fill_value', None) is not None:
# Coerce the fill_value to the correct type since `data` may be a
# different dtype than self.
fill_value = self.dtype.type(data.fill_value)
self.fill_value = fill_value
self.parent_table = None
# needs to be done here since self doesn't come from BaseColumn.__new__
for index in self.indices:
index.replace_col(self_data, self)
return self
@property
def fill_value(self):
return self.get_fill_value() # defer to native ma.MaskedArray method
@fill_value.setter
def fill_value(self, val):
"""Set fill value both in the masked column view and in the parent table
if it exists. Setting one or the other alone doesn't work."""
# another ma bug workaround: If the value of fill_value for a string array is
# requested but not yet set then it gets created as 'N/A'. From this point onward
# any new fill_values are truncated to 3 characters. Note that this does not
# occur if the masked array is a structured array (as in the previous block that
# deals with the parent table).
#
# >>> x = ma.array(['xxxx'])
# >>> x.fill_value # fill_value now gets represented as an 'S3' array
# 'N/A'
# >>> x.fill_value='yyyy'
# >>> x.fill_value
# 'yyy'
#
# To handle this we are forced to reset a private variable first:
self._fill_value = None
self.set_fill_value(val) # defer to native ma.MaskedArray method
@property
def data(self):
out = self.view(ma.MaskedArray)
# The following is necessary because of a bug in Numpy, which was
# fixed in numpy/numpy#2703. The fix should be included in Numpy 1.8.0.
out.fill_value = self.fill_value
return out
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled with a given value.
Parameters
----------
fill_value : scalar; optional
The value to use for invalid entries (`None` by default). If
`None`, the ``fill_value`` attribute of the array is used
instead.
Returns
-------
filled_column : Column
A copy of ``self`` with masked entries replaced by `fill_value`
(be it the function argument or the attribute of ``self``).
"""
if fill_value is None:
fill_value = self.fill_value
data = super().filled(fill_value)
# Use parent table definition of Column if available
column_cls = self.parent_table.Column if (self.parent_table is not None) else Column
out = column_cls(name=self.name, data=data, unit=self.unit,
format=self.format, description=self.description,
meta=deepcopy(self.meta))
return out
def insert(self, obj, values, mask=None, axis=0):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.table.MaskedColumn` object.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
mask : boolean array_like
Mask value(s) to insert. If not supplied then False is used.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the column array is flattened before insertion. Default is 0,
which will insert a row.
Returns
-------
out : `~astropy.table.MaskedColumn`
A copy of column with ``values`` and ``mask`` inserted. Note that the
insertion does not occur in-place: a new masked column is returned.
"""
self_ma = self.data # self viewed as MaskedArray
if self.dtype.kind == 'O':
# Even if values is array-like (e.g. [1,2,3]), insert as a single
# object. Numpy.insert instead inserts each element in an array-like
# input individually.
new_data = np.insert(self_ma.data, obj, None, axis=axis)
new_data[obj] = values
else:
# Explicitly convert to dtype of this column. Needed because numpy 1.7
# enforces safe casting by default, so . This isn't the case for 1.6 or 1.8+.
values = np.asarray(values, dtype=self.dtype)
new_data = np.insert(self_ma.data, obj, values, axis=axis)
if mask is None:
if self.dtype.kind == 'O':
mask = False
else:
mask = np.zeros(values.shape, dtype=bool)
new_mask = np.insert(self_ma.mask, obj, mask, axis=axis)
new_ma = np.ma.array(new_data, mask=new_mask, copy=False)
out = new_ma.view(self.__class__)
out.parent_table = None
out.indices = []
out._copy_attrs(self)
out.fill_value = self.fill_value
return out
def _copy_attrs_slice(self, out):
# Fixes issue #3023: when calling getitem with a MaskedArray subclass
# the original object attributes are not copied.
if out.__class__ is self.__class__:
out.parent_table = None
# we need this because __getitem__ does a shallow copy of indices
if out.indices is self.indices:
out.indices = []
out._copy_attrs(self)
return out
def __setitem__(self, index, value):
# Issue warning for string assignment that truncates ``value``
if self.dtype.char == 'S':
value = self._encode_str(value)
if issubclass(self.dtype.type, np.character):
# Account for a bug in np.ma.MaskedArray setitem.
# https://github.com/numpy/numpy/issues/8624
value = np.ma.asanyarray(value, dtype=self.dtype.type)
# Check for string truncation after filling masked items with
# empty (zero-length) string. Note that filled() does not make
# a copy if there are no masked items.
self._check_string_truncate(value.filled(''))
# update indices
self.info.adjust_indices(index, value, len(self))
# Remove this when Numpy no longer emits this warning and that
# Numpy version becomes the minimum required version for Astropy.
# https://github.com/astropy/astropy/issues/6285
if MaskedArrayFutureWarning is None:
ma.MaskedArray.__setitem__(self, index, value)
else:
with warnings.catch_warnings():
warnings.simplefilter('ignore', MaskedArrayFutureWarning)
ma.MaskedArray.__setitem__(self, index, value)
# We do this to make the methods show up in the API docs
name = BaseColumn.name
copy = BaseColumn.copy
more = BaseColumn.more
pprint = BaseColumn.pprint
pformat = BaseColumn.pformat
convert_unit_to = BaseColumn.convert_unit_to
|
f04bd37a1aa767986ee1267951a693822967b245a4c81a2835237378a114667e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines base classes for all models. The base class of all
models is `~astropy.modeling.Model`. `~astropy.modeling.FittableModel` is
the base class for all fittable models. Fittable models can be linear or
nonlinear in a regression analysis sense.
All models provide a `__call__` method which performs the transformation in
a purely mathematical way, i.e. the models are unitless. Model instances can
represent either a single model, or a "model set" representing multiple copies
of the same type of model, but with potentially different values of the
parameters in each model making up the set.
"""
import abc
import copy
import copyreg
import inspect
import functools
import operator
import types
import warnings
from collections import defaultdict, OrderedDict
from contextlib import suppress
from inspect import signature
from itertools import chain, islice
import numpy as np
from astropy.utils import indent, metadata
from astropy.table import Table
from astropy.units import Quantity, UnitsError, dimensionless_unscaled
from astropy.units.utils import quantity_asanyarray
from astropy.utils import (sharedmethod, find_current_module,
InheritDocstrings, OrderedDescriptorContainer,
check_broadcast, IncompatibleShapeError, isiterable)
from astropy.utils.codegen import make_function_with_signature
from astropy.utils.exceptions import AstropyDeprecationWarning
from .utils import (combine_labels, make_binary_operator_eval,
ExpressionTree, AliasDict, get_inputs_and_params,
_BoundingBox, _combine_equivalency_dict)
from astropy.nddata.utils import add_array, extract_array
from .parameters import Parameter, InputParameterError, param_repr_oneline
__all__ = ['Model', 'FittableModel', 'Fittable1DModel', 'Fittable2DModel',
'custom_model', 'ModelDefinitionError']
class ModelDefinitionError(TypeError):
"""Used for incorrect models definitions"""
def _model_oper(oper, **kwargs):
"""
Returns a function that evaluates a given Python arithmetic operator
between two models. The operator should be given as a string, like ``'+'``
or ``'**'``.
Any additional keyword arguments passed in are passed to
`_CompoundModelMeta._from_operator`.
"""
# Note: Originally this used functools.partial, but that won't work when
# used in the class definition of _CompoundModelMeta since
# _CompoundModelMeta has not been defined yet.
def _opfunc(left, right):
# Deprecation is for https://github.com/astropy/astropy/issues/8234
if not (isinstance(left, Model) and isinstance(right, Model)):
warnings.warn(
'Composition of model classes will be removed in 4.0 '
'(but composition of model instances is not affected)',
AstropyDeprecationWarning)
# Perform an arithmetic operation on two models.
return _CompoundModelMeta._from_operator(oper, left, right, **kwargs)
return _opfunc
class _ModelMeta(OrderedDescriptorContainer, InheritDocstrings, abc.ABCMeta):
"""
Metaclass for Model.
Currently just handles auto-generating the param_names list based on
Parameter descriptors declared at the class-level of Model subclasses.
"""
_is_dynamic = False
"""
This flag signifies whether this class was created in the "normal" way,
with a class statement in the body of a module, as opposed to a call to
`type` or some other metaclass constructor, such that the resulting class
does not belong to a specific module. This is important for pickling of
dynamic classes.
This flag is always forced to False for new classes, so code that creates
dynamic classes should manually set it to True on those classes when
creating them.
"""
# Default empty dict for _parameters_, which will be empty on model
# classes that don't have any Parameters
_parameters_ = OrderedDict()
def __new__(mcls, name, bases, members):
# See the docstring for _is_dynamic above
if '_is_dynamic' not in members:
members['_is_dynamic'] = mcls._is_dynamic
return super().__new__(mcls, name, bases, members)
def __init__(cls, name, bases, members):
# Make sure OrderedDescriptorContainer gets to run before doing
# anything else
super().__init__(name, bases, members)
if cls._parameters_:
if hasattr(cls, '_param_names'):
# Slight kludge to support compound models, where
# cls.param_names is a property; could be improved with a
# little refactoring but fine for now
cls._param_names = tuple(cls._parameters_)
else:
cls.param_names = tuple(cls._parameters_)
cls._create_inverse_property(members)
cls._create_bounding_box_property(members)
cls._handle_special_methods(members)
def __repr__(cls):
"""
Custom repr for Model subclasses.
"""
return cls._format_cls_repr()
def _repr_pretty_(cls, p, cycle):
"""
Repr for IPython's pretty printer.
By default IPython "pretty prints" classes, so we need to implement
this so that IPython displays the custom repr for Models.
"""
p.text(repr(cls))
def __reduce__(cls):
if not cls._is_dynamic:
# Just return a string specifying where the class can be imported
# from
return cls.__name__
else:
members = dict(cls.__dict__)
# Delete any ABC-related attributes--these will be restored when
# the class is reconstructed:
for key in list(members):
if key.startswith('_abc_'):
del members[key]
# Delete custom __init__ and __call__ if they exist:
for key in ('__init__', '__call__'):
if key in members:
del members[key]
return (type(cls), (cls.__name__, cls.__bases__, members))
@property
def name(cls):
"""
The name of this model class--equivalent to ``cls.__name__``.
This attribute is provided for symmetry with the `Model.name` attribute
of model instances.
"""
return cls.__name__
@property
def n_inputs(cls):
return len(cls.inputs)
@property
def n_outputs(cls):
return len(cls.outputs)
@property
def _is_concrete(cls):
"""
A class-level property that determines whether the class is a concrete
implementation of a Model--i.e. it is not some abstract base class or
internal implementation detail (i.e. begins with '_').
"""
return not (cls.__name__.startswith('_') or inspect.isabstract(cls))
def rename(cls, name):
"""
Creates a copy of this model class with a new name.
The new class is technically a subclass of the original class, so that
instance and type checks will still work. For example::
>>> from astropy.modeling.models import Rotation2D
>>> SkyRotation = Rotation2D.rename('SkyRotation')
>>> SkyRotation
<class '__main__.SkyRotation'>
Name: SkyRotation (Rotation2D)
Inputs: ('x', 'y')
Outputs: ('x', 'y')
Fittable parameters: ('angle',)
>>> issubclass(SkyRotation, Rotation2D)
True
>>> r = SkyRotation(90)
>>> isinstance(r, Rotation2D)
True
"""
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
new_cls = type(name, (cls,), {})
new_cls.__module__ = modname
if hasattr(cls, '__qualname__'):
if new_cls.__module__ == '__main__':
# __main__ is not added to a class's qualified name
new_cls.__qualname__ = name
else:
new_cls.__qualname__ = '{0}.{1}'.format(modname, name)
return new_cls
def _create_inverse_property(cls, members):
inverse = members.get('inverse')
if inverse is None or cls.__bases__[0] is object:
# The latter clause is the prevent the below code from running on
# the Model base class, which implements the default getter and
# setter for .inverse
return
if isinstance(inverse, property):
# We allow the @property decorator to be omitted entirely from
# the class definition, though its use should be encouraged for
# clarity
inverse = inverse.fget
# Store the inverse getter internally, then delete the given .inverse
# attribute so that cls.inverse resolves to Model.inverse instead
cls._inverse = inverse
del cls.inverse
def _create_bounding_box_property(cls, members):
"""
Takes any bounding_box defined on a concrete Model subclass (either
as a fixed tuple or a property or method) and wraps it in the generic
getter/setter interface for the bounding_box attribute.
"""
# TODO: Much of this is verbatim from _create_inverse_property--I feel
# like there could be a way to generify properties that work this way,
# but for the time being that would probably only confuse things more.
bounding_box = members.get('bounding_box')
if bounding_box is None or cls.__bases__[0] is object:
return
if isinstance(bounding_box, property):
bounding_box = bounding_box.fget
if not callable(bounding_box):
# See if it's a hard-coded bounding_box (as a sequence) and
# normalize it
try:
bounding_box = _BoundingBox.validate(cls, bounding_box)
except ValueError as exc:
raise ModelDefinitionError(exc.args[0])
else:
sig = signature(bounding_box)
# May be a method that only takes 'self' as an argument (like a
# property, but the @property decorator was forgotten)
# TODO: Maybe warn in the above case?
#
# However, if the method takes additional arguments then this is a
# parameterized bounding box and should be callable
if len(sig.parameters) > 1:
bounding_box = \
cls._create_bounding_box_subclass(bounding_box, sig)
# See the Model.bounding_box getter definition for how this attribute
# is used
cls._bounding_box = bounding_box
del cls.bounding_box
def _create_bounding_box_subclass(cls, func, sig):
"""
For Models that take optional arguments for defining their bounding
box, we create a subclass of _BoundingBox with a ``__call__`` method
that supports those additional arguments.
Takes the function's Signature as an argument since that is already
computed in _create_bounding_box_property, so no need to duplicate that
effort.
"""
# TODO: Might be convenient if calling the bounding box also
# automatically sets the _user_bounding_box. So that
#
# >>> model.bounding_box(arg=1)
#
# in addition to returning the computed bbox, also sets it, so that
# it's a shortcut for
#
# >>> model.bounding_box = model.bounding_box(arg=1)
#
# Not sure if that would be non-obvious / confusing though...
def __call__(self, **kwargs):
return func(self._model, **kwargs)
kwargs = []
for idx, param in enumerate(sig.parameters.values()):
if idx == 0:
# Presumed to be a 'self' argument
continue
if param.default is param.empty:
raise ModelDefinitionError(
'The bounding_box method for {0} is not correctly '
'defined: If defined as a method all arguments to that '
'method (besides self) must be keyword arguments with '
'default values that can be used to compute a default '
'bounding box.'.format(cls.name))
kwargs.append((param.name, param.default))
__call__.__signature__ = sig
return type(str('_{0}BoundingBox'.format(cls.name)), (_BoundingBox,),
{'__call__': __call__})
def _handle_special_methods(cls, members):
# Handle init creation from inputs
def update_wrapper(wrapper, cls):
# Set up the new __call__'s metadata attributes as though it were
# manually defined in the class definition
# A bit like functools.update_wrapper but uses the class instead of
# the wrapped function
wrapper.__module__ = cls.__module__
wrapper.__doc__ = getattr(cls, wrapper.__name__).__doc__
if hasattr(cls, '__qualname__'):
wrapper.__qualname__ = '{0}.{1}'.format(
cls.__qualname__, wrapper.__name__)
if ('__call__' not in members and 'inputs' in members and
isinstance(members['inputs'], tuple)):
# Don't create a custom __call__ for classes that already have one
# explicitly defined (this includes the Model base class, and any
# other classes that manually override __call__
def __call__(self, *inputs, **kwargs):
"""Evaluate this model on the supplied inputs."""
return super(cls, self).__call__(*inputs, **kwargs)
# When called, models can take two optional keyword arguments:
#
# * model_set_axis, which indicates (for multi-dimensional input)
# which axis is used to indicate different models
#
# * equivalencies, a dictionary of equivalencies to be applied to
# the input values, where each key should correspond to one of
# the inputs.
#
# The following code creates the __call__ function with these
# two keyword arguments.
inputs = members['inputs']
args = ('self',) + inputs
new_call = make_function_with_signature(
__call__, args, [('model_set_axis', None),
('with_bounding_box', False),
('fill_value', np.nan),
('equivalencies', None)])
# The following makes it look like __call__ was defined in the class
update_wrapper(new_call, cls)
cls.__call__ = new_call
if ('__init__' not in members and not inspect.isabstract(cls) and
cls._parameters_):
# If *all* the parameters have default values we can make them
# keyword arguments; otherwise they must all be positional arguments
if all(p.default is not None for p in cls._parameters_.values()):
args = ('self',)
kwargs = []
for param_name in cls.param_names:
default = cls._parameters_[param_name].default
unit = cls._parameters_[param_name].unit
# If the unit was specified in the parameter but the default
# is not a Quantity, attach the unit to the default.
if unit is not None:
default = Quantity(default, unit, copy=False)
kwargs.append((param_name, default))
else:
args = ('self',) + cls.param_names
kwargs = {}
def __init__(self, *params, **kwargs):
return super(cls, self).__init__(*params, **kwargs)
new_init = make_function_with_signature(
__init__, args, kwargs, varkwargs='kwargs')
update_wrapper(new_init, cls)
cls.__init__ = new_init
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Other utilities ***
def _format_cls_repr(cls, keywords=[]):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# For the sake of familiarity start the output with the standard class
# __repr__
parts = [super().__repr__()]
if not cls._is_concrete:
return parts[0]
def format_inheritance(cls):
bases = []
for base in cls.mro()[1:]:
if not issubclass(base, Model):
continue
elif (inspect.isabstract(base) or
base.__name__.startswith('_')):
break
bases.append(base.name)
if bases:
return '{0} ({1})'.format(cls.name, ' -> '.join(bases))
else:
return cls.name
try:
default_keywords = [
('Name', format_inheritance(cls)),
('Inputs', cls.inputs),
('Outputs', cls.outputs),
]
if cls.param_names:
default_keywords.append(('Fittable parameters',
cls.param_names))
for keyword, value in default_keywords + keywords:
if value is not None:
parts.append('{0}: {1}'.format(keyword, value))
return '\n'.join(parts)
except Exception:
# If any of the above formatting fails fall back on the basic repr
# (this is particularly useful in debugging)
return parts[0]
class Model(metaclass=_ModelMeta):
"""
Base class for all models.
This is an abstract class and should not be instantiated directly.
This class sets the constraints and other properties for all individual
parameters and performs parameter validation.
The following initialization arguments apply to the majority of Model
subclasses by default (exceptions include specialized utility models
like `~astropy.modeling.mappings.Mapping`). Parametric models take all
their parameters as arguments, followed by any of the following optional
keyword arguments:
Parameters
----------
name : str, optional
A human-friendly name associated with this model instance
(particularly useful for identifying the individual components of a
compound model).
meta : dict, optional
An optional dict of user-defined metadata to attach to this model.
How this is used and interpreted is up to the user or individual use
case.
n_models : int, optional
If given an integer greater than 1, a *model set* is instantiated
instead of a single model. This affects how the parameter arguments
are interpreted. In this case each parameter must be given as a list
or array--elements of this array are taken along the first axis (or
``model_set_axis`` if specified), such that the Nth element is the
value of that parameter for the Nth model in the set.
See the section on model sets in the documentation for more details.
model_set_axis : int, optional
This argument only applies when creating a model set (i.e. ``n_models >
1``). It changes how parameter values are interpreted. Normally the
first axis of each input parameter array (properly the 0th axis) is
taken as the axis corresponding to the model sets. However, any axis
of an input array may be taken as this "model set axis". This accepts
negative integers as well--for example use ``model_set_axis=-1`` if the
last (most rapidly changing) axis should be associated with the model
sets. Also, ``model_set_axis=False`` can be used to tell that a given
input should be used to evaluate all the models in the model set.
fixed : dict, optional
Dictionary ``{parameter_name: bool}`` setting the fixed constraint
for one or more parameters. `True` means the parameter is held fixed
during fitting and is prevented from updates once an instance of the
model has been created.
Alternatively the `~astropy.modeling.Parameter.fixed` property of a
parameter may be used to lock or unlock individual parameters.
tied : dict, optional
Dictionary ``{parameter_name: callable}`` of parameters which are
linked to some other parameter. The dictionary values are callables
providing the linking relationship.
Alternatively the `~astropy.modeling.Parameter.tied` property of a
parameter may be used to set the ``tied`` constraint on individual
parameters.
bounds : dict, optional
A dictionary ``{parameter_name: value}`` of lower and upper bounds of
parameters. Keys are parameter names. Values are a list or a tuple
of length 2 giving the desired range for the parameter.
Alternatively the `~astropy.modeling.Parameter.min` and
`~astropy.modeling.Parameter.max` or
~astropy.modeling.Parameter.bounds` properties of a parameter may be
used to set bounds on individual parameters.
eqcons : list, optional
List of functions of length n such that ``eqcons[j](x0, *args) == 0.0``
in a successfully optimized problem.
ineqcons : list, optional
List of functions of length n such that ``ieqcons[j](x0, *args) >=
0.0`` is a successfully optimized problem.
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that ``'mean'`` is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
"""
parameter_constraints = Parameter.constraints
"""
Primarily for informational purposes, these are the types of constraints
that can be set on a model's parameters.
"""
model_constraints = ('eqcons', 'ineqcons')
"""
Primarily for informational purposes, these are the types of constraints
that constrain model evaluation.
"""
param_names = ()
"""
Names of the parameters that describe models of this type.
The parameters in this tuple are in the same order they should be passed in
when initializing a model of a specific type. Some types of models, such
as polynomial models, have a different number of parameters depending on
some other property of the model, such as the degree.
When defining a custom model class the value of this attribute is
automatically set by the `~astropy.modeling.Parameter` attributes defined
in the class body.
"""
inputs = ()
"""The name(s) of the input variable(s) on which a model is evaluated."""
outputs = ()
"""The name(s) of the output(s) of the model."""
standard_broadcasting = True
fittable = False
linear = True
_separable = None
""" A boolean flag to indicate whether a model is separable."""
meta = metadata.MetaData()
"""A dict-like object to store optional information."""
# By default models either use their own inverse property or have no
# inverse at all, but users may also assign a custom inverse to a model,
# optionally; in that case it is of course up to the user to determine
# whether their inverse is *actually* an inverse to the model they assign
# it to.
_inverse = None
_user_inverse = None
_bounding_box = None
_user_bounding_box = None
# Default n_models attribute, so that __len__ is still defined even when a
# model hasn't completed initialization yet
_n_models = 1
# New classes can set this as a boolean value.
# It is converted to a dictionary mapping input name to a boolean value.
_input_units_strict = False
# Allow dimensionless input (and corresponding output). If this is True,
# input values to evaluate will gain the units specified in input_units. If
# this is a dictionary then it should map input name to a bool to allow
# dimensionless numbers for that input.
# Only has an effect if input_units is defined.
_input_units_allow_dimensionless = False
# Default equivalencies to apply to input values. If set, this should be a
# dictionary where each key is a string that corresponds to one of the
# model inputs. Only has an effect if input_units is defined.
input_units_equivalencies = None
def __init__(self, *args, meta=None, name=None, **kwargs):
super().__init__()
if meta is not None:
self.meta = meta
self._name = name
self._initialize_constraints(kwargs)
# Remaining keyword args are either parameter values or invalid
# Parameter values must be passed in as keyword arguments in order to
# distinguish them
self._initialize_parameters(args, kwargs)
self._initialize_unit_support()
def _initialize_unit_support(self):
"""
Convert self._input_units_strict and
self.input_units_allow_dimensionless to dictionaries
mapping input name to a boolena value.
"""
if isinstance(self._input_units_strict, bool):
self._input_units_strict = {key: self._input_units_strict for
key in self.__class__.inputs}
if isinstance(self._input_units_allow_dimensionless, bool):
self._input_units_allow_dimensionless = {key: self._input_units_allow_dimensionless
for key in self.__class__.inputs}
@property
def input_units_strict(self):
"""
Enforce strict units on inputs to evaluate. If this is set to True,
input values to evaluate will be in the exact units specified by
input_units. If the input quantities are convertible to input_units,
they are converted. If this is a dictionary then it should map input
name to a bool to set strict input units for that parameter.
"""
val = self._input_units_strict
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def input_units_allow_dimensionless(self):
"""
Allow dimensionless input (and corresponding output). If this is True,
input values to evaluate will gain the units specified in input_units. If
this is a dictionary then it should map input name to a bool to allow
dimensionless numbers for that input.
Only has an effect if input_units is defined.
"""
val = self._input_units_allow_dimensionless
if isinstance(val, bool):
return {key: val for key in self.__class__.inputs}
else:
return val
@property
def uses_quantity(self):
"""
True if this model has been created with `~astropy.units.Quantity`
objects or if there are no parameters.
This can be used to determine if this model should be evaluated with
`~astropy.units.Quantity` or regular floats.
"""
pisq = [isinstance(p, Quantity) for p in self._param_sets(units=True)]
return (len(pisq) == 0) or any(pisq)
def __repr__(self):
return self._format_repr()
def __str__(self):
return self._format_str()
def __len__(self):
return self._n_models
def __call__(self, *inputs, **kwargs):
"""
Evaluate this model using the given input(s) and the parameter values
that were specified when the model was instantiated.
"""
inputs, format_info = self.prepare_inputs(*inputs, **kwargs)
parameters = self._param_sets(raw=True, units=True)
with_bbox = kwargs.pop('with_bounding_box', False)
fill_value = kwargs.pop('fill_value', np.nan)
bbox = None
if with_bbox:
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
if self.n_inputs > 1 and bbox is not None:
# bounding_box is in python order - convert it to the order of the inputs
bbox = bbox[::-1]
if bbox is None:
outputs = self.evaluate(*chain(inputs, parameters))
else:
if self.n_inputs == 1:
bbox = [bbox]
# indices where input is outside the bbox
# have a value of 1 in ``nan_ind``
nan_ind = np.zeros(inputs[0].shape, dtype=bool)
for ind, inp in enumerate(inputs):
# Pass an ``out`` array so that ``axis_ind`` is array for scalars as well.
axis_ind = np.zeros(inp.shape, dtype=bool)
axis_ind = np.logical_or(inp < bbox[ind][0], inp > bbox[ind][1], out=axis_ind)
nan_ind[axis_ind] = 1
# get an array with indices of valid inputs
valid_ind = np.logical_not(nan_ind).nonzero()
# inputs holds only inputs within the bbox
args = []
for input in inputs:
if not input.shape:
# shape is ()
if nan_ind:
outputs = [fill_value for a in args]
else:
args.append(input)
else:
args.append(input[valid_ind])
valid_result = self.evaluate(*chain(args, parameters))
if self.n_outputs == 1:
valid_result = [valid_result]
# combine the valid results with the ``fill_value`` values
# outside the bbox
result = [np.zeros(inputs[0].shape) + fill_value for i in range(len(valid_result))]
for ind, r in enumerate(valid_result):
if not result[ind].shape:
# shape is ()
result[ind] = r
else:
result[ind][valid_ind] = r
# format output
if self.n_outputs == 1:
outputs = np.asarray(result[0])
else:
outputs = [np.asarray(r) for r in result]
else:
outputs = self.evaluate(*chain(inputs, parameters))
if self.n_outputs == 1:
outputs = (outputs,)
outputs = self.prepare_outputs(format_info, *outputs, **kwargs)
outputs = self._process_output_units(inputs, outputs)
if self.n_outputs == 1:
return outputs[0]
else:
return outputs
# *** Arithmetic operators for creating compound models ***
__add__ = _model_oper('+')
__sub__ = _model_oper('-')
__mul__ = _model_oper('*')
__truediv__ = _model_oper('/')
__pow__ = _model_oper('**')
__or__ = _model_oper('|')
__and__ = _model_oper('&')
# *** Properties ***
@property
def name(self):
"""User-provided name for this model instance."""
return self._name
@name.setter
def name(self, val):
"""Assign a (new) name to this model."""
self._name = val
@property
def n_inputs(self):
"""
The number of inputs to this model.
Equivalent to ``len(model.inputs)``.
"""
return len(self.inputs)
@property
def n_outputs(self):
"""
The number of outputs from this model.
Equivalent to ``len(model.outputs)``.
"""
return len(self.outputs)
@property
def model_set_axis(self):
"""
The index of the model set axis--that is the axis of a parameter array
that pertains to which model a parameter value pertains to--as
specified when the model was initialized.
See the documentation on `Model Sets
<http://docs.astropy.org/en/stable/modeling/models.html#model-sets>`_
for more details.
"""
return self._model_set_axis
@property
def param_sets(self):
"""
Return parameters as a pset.
This is a list with one item per parameter set, which is an array of
that parameter's values across all parameter sets, with the last axis
associated with the parameter set.
"""
return self._param_sets()
@property
def parameters(self):
"""
A flattened array of all parameter values in all parameter sets.
Fittable parameters maintain this list and fitters modify it.
"""
# Currently the sequence of a model's parameters must be contiguous
# within the _parameters array (which may be a view of a larger array,
# for example when taking a sub-expression of a compound model), so
# the assumption here is reliable:
if not self.param_names:
# Trivial, but not unheard of
return self._parameters
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
return self._parameters[start:stop]
@parameters.setter
def parameters(self, value):
"""
Assigning to this attribute updates the parameters array rather than
replacing it.
"""
if not self.param_names:
return
start = self._param_metrics[self.param_names[0]]['slice'].start
stop = self._param_metrics[self.param_names[-1]]['slice'].stop
try:
value = np.array(value).flatten()
self._parameters[start:stop] = value
except ValueError as e:
raise InputParameterError(
"Input parameter values not compatible with the model "
"parameters array: {0}".format(e))
@property
def fixed(self):
"""
A `dict` mapping parameter names to their fixed constraint.
"""
return self._constraints['fixed']
@property
def tied(self):
"""
A `dict` mapping parameter names to their tied constraint.
"""
return self._constraints['tied']
@property
def bounds(self):
"""
A `dict` mapping parameter names to their upper and lower bounds as
``(min, max)`` tuples or ``[min, max]`` lists.
"""
return self._constraints['bounds']
@property
def eqcons(self):
"""List of parameter equality constraints."""
return self._constraints['eqcons']
@property
def ineqcons(self):
"""List of parameter inequality constraints."""
return self._constraints['ineqcons']
@property
def inverse(self):
"""
Returns a new `~astropy.modeling.Model` instance which performs the
inverse transform, if an analytic inverse is defined for this model.
Even on models that don't have an inverse defined, this property can be
set with a manually-defined inverse, such a pre-computed or
experimentally determined inverse (often given as a
`~astropy.modeling.polynomial.PolynomialModel`, but not by
requirement).
A custom inverse can be deleted with ``del model.inverse``. In this
case the model's inverse is reset to its default, if a default exists
(otherwise the default is to raise `NotImplementedError`).
Note to authors of `~astropy.modeling.Model` subclasses: To define an
inverse for a model simply override this property to return the
appropriate model representing the inverse. The machinery that will
make the inverse manually-overridable is added automatically by the
base class.
"""
if self._user_inverse is not None:
return self._user_inverse
elif self._inverse is not None:
return self._inverse()
raise NotImplementedError("An analytical inverse transform has not "
"been implemented for this model.")
@inverse.setter
def inverse(self, value):
if not isinstance(value, (Model, type(None))):
raise ValueError(
"The ``inverse`` attribute may be assigned a `Model` "
"instance or `None` (where `None` explicitly forces the "
"model to have no inverse.")
self._user_inverse = value
@inverse.deleter
def inverse(self):
"""
Resets the model's inverse to its default (if one exists, otherwise
the model will have no inverse).
"""
del self._user_inverse
@property
def has_user_inverse(self):
"""
A flag indicating whether or not a custom inverse model has been
assigned to this model by a user, via assignment to ``model.inverse``.
"""
return self._user_inverse is not None
@property
def bounding_box(self):
r"""
A `tuple` of length `n_inputs` defining the bounding box limits, or
`None` for no bounding box.
The default limits are given by a ``bounding_box`` property or method
defined in the class body of a specific model. If not defined then
this property just raises `NotImplementedError` by default (but may be
assigned a custom value by a user). ``bounding_box`` can be set
manually to an array-like object of shape ``(model.n_inputs, 2)``. For
further usage, see :ref:`bounding-boxes`
The limits are ordered according to the `numpy` indexing
convention, and are the reverse of the model input order,
e.g. for inputs ``('x', 'y', 'z')``, ``bounding_box`` is defined:
* for 1D: ``(x_low, x_high)``
* for 2D: ``((y_low, y_high), (x_low, x_high))``
* for 3D: ``((z_low, z_high), (y_low, y_high), (x_low, x_high))``
Examples
--------
Setting the ``bounding_box`` limits for a 1D and 2D model:
>>> from astropy.modeling.models import Gaussian1D, Gaussian2D
>>> model_1d = Gaussian1D()
>>> model_2d = Gaussian2D(x_stddev=1, y_stddev=1)
>>> model_1d.bounding_box = (-5, 5)
>>> model_2d.bounding_box = ((-6, 6), (-5, 5))
Setting the bounding_box limits for a user-defined 3D `custom_model`:
>>> from astropy.modeling.models import custom_model
>>> def const3d(x, y, z, amp=1):
... return amp
...
>>> Const3D = custom_model(const3d)
>>> model_3d = Const3D()
>>> model_3d.bounding_box = ((-6, 6), (-5, 5), (-4, 4))
To reset ``bounding_box`` to its default limits just delete the
user-defined value--this will reset it back to the default defined
on the class:
>>> del model_1d.bounding_box
To disable the bounding box entirely (including the default),
set ``bounding_box`` to `None`:
>>> model_1d.bounding_box = None
>>> model_1d.bounding_box # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "astropy\modeling\core.py", line 980, in bounding_box
"No bounding box is defined for this model (note: the "
NotImplementedError: No bounding box is defined for this model (note:
the bounding box was explicitly disabled for this model; use `del
model.bounding_box` to restore the default bounding box, if one is
defined for this model).
"""
if self._user_bounding_box is not None:
if self._user_bounding_box is NotImplemented:
raise NotImplementedError(
"No bounding box is defined for this model (note: the "
"bounding box was explicitly disabled for this model; "
"use `del model.bounding_box` to restore the default "
"bounding box, if one is defined for this model).")
return self._user_bounding_box
elif self._bounding_box is None:
raise NotImplementedError(
"No bounding box is defined for this model.")
elif isinstance(self._bounding_box, _BoundingBox):
# This typically implies a hard-coded bounding box. This will
# probably be rare, but it is an option
return self._bounding_box
elif isinstance(self._bounding_box, types.MethodType):
return self._bounding_box()
else:
# The only other allowed possibility is that it's a _BoundingBox
# subclass, so we call it with its default arguments and return an
# instance of it (that can be called to recompute the bounding box
# with any optional parameters)
# (In other words, in this case self._bounding_box is a *class*)
bounding_box = self._bounding_box((), _model=self)()
return self._bounding_box(bounding_box, _model=self)
@bounding_box.setter
def bounding_box(self, bounding_box):
"""
Assigns the bounding box limits.
"""
if bounding_box is None:
cls = None
# We use this to explicitly set an unimplemented bounding box (as
# opposed to no user bounding box defined)
bounding_box = NotImplemented
elif (isinstance(self._bounding_box, type) and
issubclass(self._bounding_box, _BoundingBox)):
cls = self._bounding_box
else:
cls = _BoundingBox
if cls is not None:
try:
bounding_box = cls.validate(self, bounding_box)
except ValueError as exc:
raise ValueError(exc.args[0])
self._user_bounding_box = bounding_box
@bounding_box.deleter
def bounding_box(self):
self._user_bounding_box = None
@property
def has_user_bounding_box(self):
"""
A flag indicating whether or not a custom bounding_box has been
assigned to this model by a user, via assignment to
``model.bounding_box``.
"""
return self._user_bounding_box is not None
@property
def separable(self):
""" A flag indicating whether a model is separable."""
if self._separable is not None:
return self._separable
else:
raise NotImplementedError(
'The "separable" property is not defined for '
'model {}'.format(self.__class__.__name__))
# *** Public methods ***
def without_units_for_data(self, **kwargs):
"""
Return an instance of the model for which the parameter values have been
converted to the right units for the data, then the units have been
stripped away.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters should be converted to are not necessarily
the units of the input data, but are derived from them. Model subclasses
that want fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
for name, unit in parameter_units.items():
parameter = getattr(model, name)
if parameter.unit is not None:
parameter.value = parameter.quantity.to(unit).value
parameter._set_unit(None, force=True)
if isinstance(model, _CompoundModel):
model.strip_units_from_tree()
return model
def strip_units_from_tree(self):
for item in self._tree.traverse_inorder():
if isinstance(item.value, Model):
for parname in item.value.param_names:
par = getattr(item.value, parname)
par._set_unit(None, force=True)
setattr(item.value, parname, par)
def with_units_from_data(self, **kwargs):
"""
Return an instance of the model which has units for which the parameter
values are compatible with the data units specified.
The input and output Quantity objects should be given as keyword
arguments.
Notes
-----
This method is needed in order to be able to fit models with units in
the parameters, since we need to temporarily strip away the units from
the model during the fitting (which might be done by e.g. scipy
functions).
The units that the parameters will gain are not necessarily the units of
the input data, but are derived from them. Model subclasses that want
fitting to work in the presence of quantities need to define a
_parameter_units_for_data_units method that takes the input and output
units (as two dictionaries) and returns a dictionary giving the target
units for each parameter.
"""
model = self.copy()
inputs_unit = {inp: getattr(kwargs[inp], 'unit', dimensionless_unscaled)
for inp in self.inputs if kwargs[inp] is not None}
outputs_unit = {out: getattr(kwargs[out], 'unit', dimensionless_unscaled)
for out in self.outputs if kwargs[out] is not None}
parameter_units = self._parameter_units_for_data_units(inputs_unit, outputs_unit)
# We are adding units to parameters that already have a value, but we
# don't want to convert the parameter, just add the unit directly, hence
# the call to _set_unit.
for name, unit in parameter_units.items():
parameter = getattr(model, name)
parameter._set_unit(unit, force=True)
return model
@property
def _has_units(self):
# Returns True if any of the parameters have units
for param in self.param_names:
if getattr(self, param).unit is not None:
return True
else:
return False
@property
def _supports_unit_fitting(self):
# If the model has a '_parameter_units_for_data_units' method, this
# indicates that we have enough information to strip the units away
# and add them back after fitting, when fitting quantities
return hasattr(self, '_parameter_units_for_data_units')
@abc.abstractmethod
def evaluate(self, *args, **kwargs):
"""Evaluate the model on some input variables."""
def sum_of_implicit_terms(self, *args, **kwargs):
"""
Evaluate the sum of any implicit model terms on some input variables.
This includes any fixed terms used in evaluating a linear model that
do not have corresponding parameters exposed to the user. The
prototypical case is `astropy.modeling.functional_models.Shift`, which
corresponds to a function y = a + bx, where b=1 is intrinsically fixed
by the type of model, such that sum_of_implicit_terms(x) == x. This
method is needed by linear fitters to correct the dependent variable
for the implicit term(s) when solving for the remaining terms
(ie. a = y - bx).
"""
def render(self, out=None, coords=None):
"""
Evaluate a model at fixed positions, respecting the ``bounding_box``.
The key difference relative to evaluating the model directly is that
this method is limited to a bounding box if the `Model.bounding_box`
attribute is set.
Parameters
----------
out : `numpy.ndarray`, optional
An array that the evaluated model will be added to. If this is not
given (or given as ``None``), a new array will be created.
coords : array-like, optional
An array to be used to translate from the model's input coordinates
to the ``out`` array. It should have the property that
``self(coords)`` yields the same shape as ``out``. If ``out`` is
not specified, ``coords`` will be used to determine the shape of the
returned array. If this is not provided (or None), the model will be
evaluated on a grid determined by `Model.bounding_box`.
Returns
-------
out : `numpy.ndarray`
The model added to ``out`` if ``out`` is not ``None``, or else a
new array from evaluating the model over ``coords``.
If ``out`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Raises
------
ValueError
If ``coords`` are not given and the the `Model.bounding_box` of this
model is not set.
Examples
--------
:ref:`bounding-boxes`
"""
try:
bbox = self.bounding_box
except NotImplementedError:
bbox = None
ndim = self.n_inputs
if (coords is None) and (out is None) and (bbox is None):
raise ValueError('If no bounding_box is set, '
'coords or out must be input.')
# for consistent indexing
if ndim == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if coords is not None:
coords = np.asanyarray(coords, dtype=float)
# Check dimensions match out and model
assert len(coords) == ndim
if out is not None:
if coords[0].shape != out.shape:
raise ValueError('inconsistent shape of the output.')
else:
out = np.zeros(coords[0].shape)
if out is not None:
out = np.asanyarray(out, dtype=float)
if out.ndim != ndim:
raise ValueError('the array and model must have the same '
'number of dimensions.')
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
pos, delta = pd
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos)
for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if out is None:
out = self(*sub_coords)
else:
try:
out = add_array(out, self(*sub_coords), pos)
except ValueError:
raise ValueError(
'The `bounding_box` is larger than the input out in '
'one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = out.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
coords = coords[::-1]
out += self(*coords)
return out
@property
def input_units(self):
"""
This property is used to indicate what units or sets of units the
evaluate method expects, and returns a dictionary mapping inputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid input units, in which case this property should
not be overridden since it will return the input units based on the
annotations.
"""
if hasattr(self, '_input_units'):
return self._input_units
elif hasattr(self.evaluate, '__annotations__'):
annotations = self.evaluate.__annotations__.copy()
annotations.pop('return', None)
if annotations:
# If there are not annotations for all inputs this will error.
return dict((name, annotations[name]) for name in self.inputs)
else:
# None means any unit is accepted
return None
@property
def return_units(self):
"""
This property is used to indicate what units or sets of units the output
of evaluate should be in, and returns a dictionary mapping outputs to
units (or `None` if any units are accepted).
Model sub-classes can also use function annotations in evaluate to
indicate valid output units, in which case this property should not be
overridden since it will return the return units based on the
annotations.
"""
if hasattr(self, '_return_units'):
return self._return_units
elif hasattr(self.evaluate, '__annotations__'):
return self.evaluate.__annotations__.get('return', None)
else:
# None means any unit is accepted
return None
def prepare_inputs(self, *inputs, model_set_axis=None, equivalencies=None,
**kwargs):
"""
This method is used in `~astropy.modeling.Model.__call__` to ensure
that all the inputs to the model can be broadcast into compatible
shapes (if one or both of them are input as arrays), particularly if
there are more than one parameter sets. This also makes sure that (if
applicable) the units of the input will be compatible with the evaluate
method.
"""
# When we instantiate the model class, we make sure that __call__ can
# take the following two keyword arguments: model_set_axis and
# equivalencies.
if model_set_axis is None:
# By default the model_set_axis for the input is assumed to be the
# same as that for the parameters the model was defined with
# TODO: Ensure that negative model_set_axis arguments are respected
model_set_axis = self.model_set_axis
n_models = len(self)
params = [getattr(self, name) for name in self.param_names]
inputs = [np.asanyarray(_input, dtype=float) for _input in inputs]
_validate_input_shapes(inputs, self.inputs, n_models,
model_set_axis, self.standard_broadcasting)
inputs = self._validate_input_units(inputs, equivalencies)
# The input formatting required for single models versus a multiple
# model set are different enough that they've been split into separate
# subroutines
if n_models == 1:
return _prepare_inputs_single_model(self, params, inputs,
**kwargs)
else:
return _prepare_inputs_model_set(self, params, inputs, n_models,
model_set_axis, **kwargs)
def _validate_input_units(self, inputs, equivalencies=None):
inputs = list(inputs)
name = self.name or self.__class__.__name__
# Check that the units are correct, if applicable
if self.input_units is not None:
# We combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(self.inputs,
equivalencies,
self.input_units_equivalencies)
# We now iterate over the different inputs and make sure that their
# units are consistent with those specified in input_units.
for i in range(len(inputs)):
input_name = self.inputs[i]
input_unit = self.input_units.get(input_name, None)
if input_unit is None:
continue
if isinstance(inputs[i], Quantity):
# We check for consistency of the units with input_units,
# taking into account any equivalencies
if inputs[i].unit.is_equivalent(input_unit, equivalencies=input_units_equivalencies[input_name]):
# If equivalencies have been specified, we need to
# convert the input to the input units - this is because
# some equivalencies are non-linear, and we need to be
# sure that we evaluate the model in its own frame
# of reference. If input_units_strict is set, we also
# need to convert to the input units.
if len(input_units_equivalencies) > 0 or self.input_units_strict[input_name]:
inputs[i] = inputs[i].to(input_unit, equivalencies=input_units_equivalencies[input_name])
else:
# We consider the following two cases separately so as
# to be able to raise more appropriate/nicer exceptions
if input_unit is dimensionless_unscaled:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required dimensionless "
"input".format(name,
self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type))
else:
raise UnitsError("{0}: Units of input '{1}', {2} ({3}), could not be "
"converted to required input units of "
"{4} ({5})".format(name, self.inputs[i],
inputs[i].unit,
inputs[i].unit.physical_type,
input_unit,
input_unit.physical_type))
else:
# If we allow dimensionless input, we add the units to the
# input values without conversion, otherwise we raise an
# exception.
if (not self.input_units_allow_dimensionless[input_name] and
input_unit is not dimensionless_unscaled and input_unit is not None):
if np.any(inputs[i] != 0):
raise UnitsError("{0}: Units of input '{1}', (dimensionless), could not be "
"converted to required input units of "
"{2} ({3})".format(name, self.inputs[i], input_unit,
input_unit.physical_type))
return inputs
def _process_output_units(self, inputs, outputs):
inputs_are_quantity = any([isinstance(i, Quantity) for i in inputs])
if self.return_units and inputs_are_quantity:
# We allow a non-iterable unit only if there is one output
if self.n_outputs == 1 and not isiterable(self.return_units):
return_units = {self.outputs[0]: self.return_units}
else:
return_units = self.return_units
outputs = tuple([Quantity(out, return_units.get(out_name, None), subok=True)
for out, out_name in zip(outputs, self.outputs)])
return outputs
def prepare_outputs(self, format_info, *outputs, **kwargs):
model_set_axis = kwargs.get('model_set_axis', None)
if len(self) == 1:
return _prepare_outputs_single_model(self, outputs, format_info)
else:
return _prepare_outputs_model_set(self, outputs, format_info, model_set_axis)
def copy(self):
"""
Return a copy of this model.
Uses a deep copy so that all model attributes, including parameter
values, are copied as well.
"""
return copy.deepcopy(self)
def deepcopy(self):
"""
Return a deep copy of this model.
"""
return copy.deepcopy(self)
@sharedmethod
def rename(self, name):
"""
Return a copy of this model with a new name.
"""
new_model = self.copy()
new_model._name = name
return new_model
@sharedmethod
def n_submodels(self):
"""
Return the number of components in a single model, which is
obviously 1.
"""
return 1
# *** Internal methods ***
@sharedmethod
def _from_existing(self, existing, param_names):
"""
Creates a new instance of ``cls`` that shares its underlying parameter
values with an existing model instance given by ``existing``.
This is used primarily by compound models to return a view of an
individual component of a compound model. ``param_names`` should be
the names of the parameters in the *existing* model to use as the
parameters in this new model. Its length should equal the number of
parameters this model takes, so that it can map parameters on the
existing model to parameters on this model one-to-one.
"""
# Basically this is an alternative __init__
if isinstance(self, type):
# self is a class, not an instance
needs_initialization = True
dummy_args = (0,) * len(param_names)
self = self.__new__(self, *dummy_args)
else:
needs_initialization = False
self = self.copy()
aliases = dict(zip(self.param_names, param_names))
# This is basically an alternative _initialize_constraints
constraints = {}
for cons_type in self.parameter_constraints:
orig = existing._constraints[cons_type]
constraints[cons_type] = AliasDict(orig, aliases)
self._constraints = constraints
self._n_models = existing._n_models
self._model_set_axis = existing._model_set_axis
self._parameters = existing._parameters
self._param_metrics = defaultdict(dict)
for param_a, param_b in aliases.items():
# Take the param metrics info for the giving parameters in the
# existing model, and hand them to the appropriate parameters in
# the new model
self._param_metrics[param_a] = existing._param_metrics[param_b]
if needs_initialization:
self.__init__(*dummy_args)
return self
def _initialize_constraints(self, kwargs):
"""
Pop parameter constraint values off the keyword arguments passed to
`Model.__init__` and store them in private instance attributes.
"""
if hasattr(self, '_constraints'):
# Skip constraint initialization if it has already been handled via
# an alternate initialization
return
self._constraints = {}
# Pop any constraints off the keyword arguments
for constraint in self.parameter_constraints:
values = kwargs.pop(constraint, {})
self._constraints[constraint] = values.copy()
# Update with default parameter constraints
for param_name in self.param_names:
param = getattr(self, param_name)
# Parameters don't have all constraint types
value = getattr(param, constraint)
if value is not None:
self._constraints[constraint][param_name] = value
for constraint in self.model_constraints:
values = kwargs.pop(constraint, [])
self._constraints[constraint] = values
def _initialize_parameters(self, args, kwargs):
"""
Initialize the _parameters array that stores raw parameter values for
all parameter sets for use with vectorized fitting algorithms; on
FittableModels the _param_name attributes actually just reference
slices of this array.
"""
if hasattr(self, '_parameters'):
# Skip parameter initialization if it has already been handled via
# an alternate initialization
return
n_models = kwargs.pop('n_models', None)
if not (n_models is None or
(isinstance(n_models, (int, np.integer)) and n_models >= 1)):
raise ValueError(
"n_models must be either None (in which case it is "
"determined from the model_set_axis of the parameter initial "
"values) or it must be a positive integer "
"(got {0!r})".format(n_models))
model_set_axis = kwargs.pop('model_set_axis', None)
if model_set_axis is None:
if n_models is not None and n_models > 1:
# Default to zero
model_set_axis = 0
else:
# Otherwise disable
model_set_axis = False
else:
if not (model_set_axis is False or
(isinstance(model_set_axis, int) and
not isinstance(model_set_axis, bool))):
raise ValueError(
"model_set_axis must be either False or an integer "
"specifying the parameter array axis to map to each "
"model in a set of models (got {0!r}).".format(
model_set_axis))
# Process positional arguments by matching them up with the
# corresponding parameters in self.param_names--if any also appear as
# keyword arguments this presents a conflict
params = {}
if len(args) > len(self.param_names):
raise TypeError(
"{0}.__init__() takes at most {1} positional arguments ({2} "
"given)".format(self.__class__.__name__, len(self.param_names),
len(args)))
self._model_set_axis = model_set_axis
self._param_metrics = defaultdict(dict)
for idx, arg in enumerate(args):
if arg is None:
# A value of None implies using the default value, if exists
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[self.param_names[idx]] = quantity_asanyarray(arg, dtype=float)
# At this point the only remaining keyword arguments should be
# parameter names; any others are in error.
for param_name in self.param_names:
if param_name in kwargs:
if param_name in params:
raise TypeError(
"{0}.__init__() got multiple values for parameter "
"{1!r}".format(self.__class__.__name__, param_name))
value = kwargs.pop(param_name)
if value is None:
continue
# We use quantity_asanyarray here instead of np.asanyarray because
# if any of the arguments are quantities, we need to return a
# Quantity object not a plain Numpy array.
params[param_name] = quantity_asanyarray(value, dtype=float)
if kwargs:
# If any keyword arguments were left over at this point they are
# invalid--the base class should only be passed the parameter
# values, constraints, and param_dim
for kwarg in kwargs:
# Just raise an error on the first unrecognized argument
raise TypeError(
'{0}.__init__() got an unrecognized parameter '
'{1!r}'.format(self.__class__.__name__, kwarg))
# Determine the number of model sets: If the model_set_axis is
# None then there is just one parameter set; otherwise it is determined
# by the size of that axis on the first parameter--if the other
# parameters don't have the right number of axes or the sizes of their
# model_set_axis don't match an error is raised
if model_set_axis is not False and n_models != 1 and params:
max_ndim = 0
if model_set_axis < 0:
min_ndim = abs(model_set_axis)
else:
min_ndim = model_set_axis + 1
for name, value in params.items():
param_ndim = np.ndim(value)
if param_ndim < min_ndim:
raise InputParameterError(
"All parameter values must be arrays of dimension "
"at least {0} for model_set_axis={1} (the value "
"given for {2!r} is only {3}-dimensional)".format(
min_ndim, model_set_axis, name, param_ndim))
max_ndim = max(max_ndim, param_ndim)
if n_models is None:
# Use the dimensions of the first parameter to determine
# the number of model sets
n_models = value.shape[model_set_axis]
elif value.shape[model_set_axis] != n_models:
raise InputParameterError(
"Inconsistent dimensions for parameter {0!r} for "
"{1} model sets. The length of axis {2} must be the "
"same for all input parameter values".format(
name, n_models, model_set_axis))
self._check_param_broadcast(params, max_ndim)
else:
if n_models is None:
n_models = 1
self._check_param_broadcast(params, None)
self._n_models = n_models
self._initialize_parameter_values(params)
def _initialize_parameter_values(self, params):
# self._param_metrics should have been initialized in
# self._initialize_parameters
param_metrics = self._param_metrics
total_size = 0
for name in self.param_names:
unit = None
param_descr = getattr(self, name)
if params.get(name) is None:
default = param_descr.default
if default is None:
# No value was supplied for the parameter and the
# parameter does not have a default, therefore the model
# is underspecified
raise TypeError(
"{0}.__init__() requires a value for parameter "
"{1!r}".format(self.__class__.__name__, name))
value = params[name] = default
unit = param_descr.unit
else:
value = params[name]
if isinstance(value, Quantity):
unit = value.unit
else:
unit = None
param_size = np.size(value)
param_shape = np.shape(value)
param_slice = slice(total_size, total_size + param_size)
param_metrics[name]['slice'] = param_slice
param_metrics[name]['shape'] = param_shape
if unit is None and param_descr.unit is not None:
raise InputParameterError(
"{0}.__init__() requires a Quantity for parameter "
"{1!r}".format(self.__class__.__name__, name))
param_metrics[name]['orig_unit'] = unit
param_metrics[name]['raw_unit'] = None
if param_descr._setter is not None:
_val = param_descr._setter(value)
if isinstance(_val, Quantity):
param_metrics[name]['raw_unit'] = _val.unit
else:
param_metrics[name]['raw_unit'] = None
total_size += param_size
self._param_metrics = param_metrics
self._parameters = np.empty(total_size, dtype=np.float64)
# Now set the parameter values (this will also fill
# self._parameters)
# TODO: This is a bit ugly, but easier to deal with than how this was
# done previously. There's still lots of opportunity for refactoring
# though, in particular once we move the _get/set_model_value methods
# out of Parameter and into Model (renaming them
# _get/set_parameter_value)
for name, value in params.items():
# value here may be a Quantity object.
param_descr = getattr(self, name)
unit = param_descr.unit
value = np.array(value)
orig_unit = param_metrics[name]['orig_unit']
if param_descr._setter is not None:
if unit is not None:
value = np.asarray(param_descr._setter(value * orig_unit).value)
else:
value = param_descr._setter(value)
self._parameters[param_metrics[name]['slice']] = value.ravel()
# Finally validate all the parameters; we do this last so that
# validators that depend on one of the other parameters' values will
# work
for name in params:
param_descr = getattr(self, name)
param_descr.validator(param_descr.value)
def _check_param_broadcast(self, params, max_ndim):
"""
This subroutine checks that all parameter arrays can be broadcast
against each other, and determines the shapes parameters must have in
order to broadcast correctly.
If model_set_axis is None this merely checks that the parameters
broadcast and returns an empty dict if so. This mode is only used for
single model sets.
"""
all_shapes = []
param_names = []
model_set_axis = self._model_set_axis
for name in self.param_names:
# Previously this just used iteritems(params), but we loop over all
# param_names instead just to ensure some determinism in the
# ordering behavior
if name not in params:
continue
value = params[name]
param_names.append(name)
# We've already checked that each parameter array is compatible in
# the model_set_axis dimension, but now we need to check the
# dimensions excluding that axis
# Split the array dimensions into the axes before model_set_axis
# and after model_set_axis
param_shape = np.shape(value)
param_ndim = len(param_shape)
if max_ndim is not None and param_ndim < max_ndim:
# All arrays have the same number of dimensions up to the
# model_set_axis dimension, but after that they may have a
# different number of trailing axes. The number of trailing
# axes must be extended for mutual compatibility. For example
# if max_ndim = 3 and model_set_axis = 0, an array with the
# shape (2, 2) must be extended to (2, 1, 2). However, an
# array with shape (2,) is extended to (2, 1).
new_axes = (1,) * (max_ndim - param_ndim)
if model_set_axis < 0:
# Just need to prepend axes to make up the difference
broadcast_shape = new_axes + param_shape
else:
broadcast_shape = (param_shape[:model_set_axis + 1] +
new_axes +
param_shape[model_set_axis + 1:])
self._param_metrics[name]['broadcast_shape'] = broadcast_shape
all_shapes.append(broadcast_shape)
else:
all_shapes.append(param_shape)
# Now check mutual broadcastability of all shapes
try:
check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
param_a = param_names[shape_a_idx]
param_b = param_names[shape_b_idx]
raise InputParameterError(
"Parameter {0!r} of shape {1!r} cannot be broadcast with "
"parameter {2!r} of shape {3!r}. All parameter arrays "
"must have shapes that are mutually compatible according "
"to the broadcasting rules.".format(param_a, shape_a,
param_b, shape_b))
def _param_sets(self, raw=False, units=False):
"""
Implementation of the Model.param_sets property.
This internal implementation has a ``raw`` argument which controls
whether or not to return the raw parameter values (i.e. the values that
are actually stored in the ._parameters array, as opposed to the values
displayed to users. In most cases these are one in the same but there
are currently a few exceptions.
Note: This is notably an overcomplicated device and may be removed
entirely in the near future.
"""
param_metrics = self._param_metrics
values = []
shapes = []
for name in self.param_names:
param = getattr(self, name)
if raw:
value = param._raw_value
else:
value = param.value
broadcast_shape = param_metrics[name].get('broadcast_shape')
if broadcast_shape is not None:
value = value.reshape(broadcast_shape)
shapes.append(np.shape(value))
if len(self) == 1:
# Add a single param set axis to the parameter's value (thus
# converting scalars to shape (1,) array values) for
# consistency
value = np.array([value])
if units:
if raw and self._param_metrics[name]['raw_unit'] is not None:
unit = self._param_metrics[name]['raw_unit']
else:
unit = param.unit
if unit is not None:
value = Quantity(value, unit)
values.append(value)
if len(set(shapes)) != 1 or units:
# If the parameters are not all the same shape, converting to an
# array is going to produce an object array
# However the way Numpy creates object arrays is tricky in that it
# will recurse into array objects in the list and break them up
# into separate objects. Doing things this way ensures a 1-D
# object array the elements of which are the individual parameter
# arrays. There's not much reason to do this over returning a list
# except for consistency
psets = np.empty(len(values), dtype=object)
psets[:] = values
return psets
# TODO: Returning an array from this method may be entirely pointless
# for internal use--perhaps only the external param_sets method should
# return an array (and just for backwards compat--I would prefer to
# maybe deprecate that method)
return np.array(values)
def _format_repr(self, args=[], kwargs={}, defaults={}):
"""
Internal implementation of ``__repr__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__repr__`` while keeping the same basic
formatting.
"""
# TODO: I think this could be reworked to preset model sets better
parts = [repr(a) for a in args]
parts.extend(
"{0}={1}".format(name,
param_repr_oneline(getattr(self, name)))
for name in self.param_names)
if self.name is not None:
parts.append('name={0!r}'.format(self.name))
for kwarg, value in kwargs.items():
if kwarg in defaults and defaults[kwarg] != value:
continue
parts.append('{0}={1!r}'.format(kwarg, value))
if len(self) > 1:
parts.append("n_models={0}".format(len(self)))
return '<{0}({1})>'.format(self.__class__.__name__, ', '.join(parts))
def _format_str(self, keywords=[]):
"""
Internal implementation of ``__str__``.
This is separated out for ease of use by subclasses that wish to
override the default ``__str__`` while keeping the same basic
formatting.
"""
default_keywords = [
('Model', self.__class__.__name__),
('Name', self.name),
('Inputs', self.inputs),
('Outputs', self.outputs),
('Model set size', len(self))
]
parts = ['{0}: {1}'.format(keyword, value)
for keyword, value in default_keywords + keywords
if value is not None]
parts.append('Parameters:')
if len(self) == 1:
columns = [[getattr(self, name).value]
for name in self.param_names]
else:
columns = [getattr(self, name).value
for name in self.param_names]
if columns:
param_table = Table(columns, names=self.param_names)
# Set units on the columns
for name in self.param_names:
param_table[name].unit = getattr(self, name).unit
parts.append(indent(str(param_table), width=4))
return '\n'.join(parts)
class FittableModel(Model):
"""
Base class for models that can be fitted using the built-in fitting
algorithms.
"""
linear = False
# derivative with respect to parameters
fit_deriv = None
"""
Function (similar to the model's `~Model.evaluate`) to compute the
derivatives of the model with respect to its parameters, for use by fitting
algorithms. In other words, this computes the Jacobian matrix with respect
to the model's parameters.
"""
# Flag that indicates if the model derivatives with respect to parameters
# are given in columns or rows
col_fit_deriv = True
fittable = True
class Fittable1DModel(FittableModel):
"""
Base class for one-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x',)
outputs = ('y',)
_separable = True
class Fittable2DModel(FittableModel):
"""
Base class for two-dimensional fittable models.
This class provides an easier interface to defining new models.
Examples can be found in `astropy.modeling.functional_models`.
"""
inputs = ('x', 'y')
outputs = ('z',)
def _make_arithmetic_operator(oper):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
def op(f, g):
return (make_binary_operator_eval(oper, f[0], g[0]), f[1], f[2])
return op
def _composition_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: g[0](f[0](inputs, params), params),
f[1], g[2])
def _join_operator(f, g):
# We don't bother with tuple unpacking here for efficiency's sake, but for
# documentation purposes:
#
# f_eval, f_n_inputs, f_n_outputs = f
#
# and similarly for g
return (lambda inputs, params: (f[0](inputs[:f[1]], params) +
g[0](inputs[f[1]:], params)),
f[1] + g[1], f[2] + g[2])
# TODO: Support a couple unary operators--at least negation?
BINARY_OPERATORS = {
'+': _make_arithmetic_operator(operator.add),
'-': _make_arithmetic_operator(operator.sub),
'*': _make_arithmetic_operator(operator.mul),
'/': _make_arithmetic_operator(operator.truediv),
'**': _make_arithmetic_operator(operator.pow),
'|': _composition_operator,
'&': _join_operator
}
_ORDER_OF_OPERATORS = [('|',), ('&',), ('+', '-'), ('*', '/'), ('**',)]
OPERATOR_PRECEDENCE = {}
for idx, ops in enumerate(_ORDER_OF_OPERATORS):
for op in ops:
OPERATOR_PRECEDENCE[op] = idx
del idx, op, ops
class _CompoundModelMeta(_ModelMeta):
_tree = None
_submodels = None
_submodel_names = None
_nextid = 0
_param_names = None
# _param_map is a mapping of the compound model's generated param names to
# the parameters of submodels they are associated with. The values in this
# mapping are (idx, name) tuples were idx is the index of the submodel this
# parameter is associated with, and name is the same parameter's name on
# the submodel
# In principle this will allow compound models to give entirely new names
# to parameters that don't have to be the same as their original names on
# the submodels, but right now that isn't taken advantage of
_param_map = None
_slice_offset = 0
# When taking slices of a compound model, this keeps track of how offset
# the first model in the slice is from the first model in the original
# compound model it was taken from
# This just inverts _param_map, swapping keys with values. This is also
# useful to have.
_param_map_inverse = None
_fittable = None
_evaluate = None
def __getitem__(cls, index):
index = cls._normalize_index(index)
if isinstance(index, (int, np.integer)):
return cls._get_submodels()[index]
else:
return cls._get_slice(index.start, index.stop)
def __getattr__(cls, attr):
# Make sure the _tree attribute is set; otherwise we are not looking up
# an attribute on a concrete compound model class and should just raise
# the AttributeError
if cls._tree is not None and attr in cls.param_names:
cls._init_param_descriptors()
return getattr(cls, attr)
raise AttributeError(attr)
def __repr__(cls):
if cls._tree is None:
# This case is mostly for debugging purposes
return cls._format_cls_repr()
expression = cls._format_expression()
components = cls._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return cls._format_cls_repr(keywords=keywords)
def __dir__(cls):
"""
Returns a list of attributes defined on a compound model, including
all of its parameters.
"""
basedir = super().__dir__()
if cls._tree is not None:
for name in cls.param_names:
basedir.append(name)
basedir.sort()
return basedir
def __reduce__(cls):
rv = super().__reduce__()
if isinstance(rv, tuple):
# Delete _evaluate from the members dict
with suppress(KeyError):
del rv[1][2]['_evaluate']
return rv
@property
def submodel_names(cls):
if cls._submodel_names is None:
seen = {}
names = []
for idx, submodel in enumerate(cls._get_submodels()):
name = str(submodel.name)
if name in seen:
names.append('{0}_{1}'.format(name, idx))
if seen[name] >= 0:
jdx = seen[name]
names[jdx] = '{0}_{1}'.format(names[jdx], jdx)
seen[name] = -1
else:
names.append(name)
seen[name] = idx
cls._submodel_names = tuple(names)
return cls._submodel_names
@property
def param_names(cls):
if cls._param_names is None:
cls._init_param_names()
return cls._param_names
@property
def fittable(cls):
if cls._fittable is None:
cls._fittable = all(m.fittable for m in cls._get_submodels())
return cls._fittable
# TODO: Maybe we could use make_function_with_signature for evaluate, but
# it's probably not worth it (and I'm not sure what the limit is on number
# of function arguments/local variables but we could break that limit for
# complicated compound models...
def evaluate(cls, *args):
if cls._evaluate is None:
func = cls._tree.evaluate(BINARY_OPERATORS,
getter=cls._model_evaluate_getter)[0]
cls._evaluate = func
inputs = args[:cls.n_inputs]
params = iter(args[cls.n_inputs:])
result = cls._evaluate(inputs, params)
if cls.n_outputs == 1:
return result[0]
else:
return result
# TODO: This supports creating a new compound model from two existing
# compound models (or normal models) and a single operator. However, it
# ought also to be possible to create a new model from an *entire*
# expression, represented as a sequence of operators and their operands (or
# an exiting ExpressionTree) and build that into a compound model without
# creating an intermediate _CompoundModel class for every single operator
# in the expression. This will prove to be a useful optimization in many
# cases
@classmethod
def _from_operator(mcls, operator, left, right, additional_members={}):
"""
Given a Python operator (represented by a string, such as ``'+'``
or ``'*'``, and two model classes or instances, return a new compound
model that evaluates the given operator on the outputs of the left and
right input models.
If either of the input models are a model *class* (i.e. a subclass of
`~astropy.modeling.Model`) then the returned model is a new subclass of
`~astropy.modeling.Model` that may be instantiated with any parameter
values. If both input models are *instances* of a model, a new class
is still created, but this method returns an *instance* of that class,
taking the parameter values from the parameters of the input model
instances.
If given, the ``additional_members`` `dict` may provide additional
class members that should be added to the generated
`~astropy.modeling.Model` subclass. Some members that are generated by
this method should not be provided by ``additional_members``. These
include ``_tree``, ``inputs``, ``outputs``, ``linear``,
``standard_broadcasting``, and ``__module__`. This is currently for
internal use only.
"""
# Note, currently this only supports binary operators, but could be
# easily extended to support unary operators (namely '-') if/when
# needed
children = []
for child in (left, right):
if isinstance(child, (_CompoundModelMeta, _CompoundModel)):
"""
Although the original child models were copied we make another
copy here to ensure that changes in this child compound model
parameters will not propagate to the reuslt, that is
cm1 = Gaussian1D(1, 5, .1) + Gaussian1D()
cm2 = cm1 | Scale()
cm1.amplitude_0 = 100
assert(cm2.amplitude_0 == 1)
"""
children.append(copy.deepcopy(child._tree))
elif isinstance(child, Model):
children.append(ExpressionTree(child.copy(),
inputs=child.inputs,
outputs=child.outputs))
else:
children.append(ExpressionTree(child, inputs=child.inputs, outputs=child.outputs))
inputs, outputs = mcls._check_inputs_and_outputs(operator, left, right)
tree = ExpressionTree(operator, left=children[0], right=children[1],
inputs=inputs, outputs=outputs)
name = str('CompoundModel{0}'.format(_CompoundModelMeta._nextid))
_CompoundModelMeta._nextid += 1
mod = find_current_module(3)
if mod:
modname = mod.__name__
else:
modname = '__main__'
if operator in ('|', '+', '-'):
linear = left.linear and right.linear
else:
# Which is not to say it is *definitely* not linear but it would be
# trickier to determine
linear = False
standard_broadcasting = left.standard_broadcasting and right.standard_broadcasting
# Note: If any other members are added here, make sure to mention them
# in the docstring of this method.
members = additional_members
members.update({
'_tree': tree,
'_is_dynamic': True, # See docs for _ModelMeta._is_dynamic
'inputs': inputs,
'outputs': outputs,
'linear': linear,
'standard_broadcasting': standard_broadcasting,
'__module__': str(modname)})
new_cls = mcls(name, (_CompoundModel,), members)
if isinstance(left, Model) and isinstance(right, Model):
# Both models used in the operator were already instantiated models,
# not model *classes*. As such it's not particularly useful to return
# the class itself, but to instead produce a new instance:
instance = new_cls()
# Workaround for https://github.com/astropy/astropy/issues/3542
# TODO: Any effort to restructure the tree-like data structure for
# compound models should try to obviate this workaround--if
# intermediate compound models are stored in the tree as well then
# we can immediately check for custom inverses on sub-models when
# computing the inverse
instance._user_inverse = mcls._make_user_inverse(
operator, left, right)
if left._n_models == right._n_models:
instance._n_models = left._n_models
else:
raise ValueError('Model sets must have the same number of '
'components.')
return instance
# Otherwise return the new uninstantiated class itself
return new_cls
@classmethod
def _check_inputs_and_outputs(mcls, operator, left, right):
# TODO: These aren't the full rules for handling inputs and outputs, but
# this will handle most basic cases correctly
if operator == '|':
inputs = left.inputs
outputs = right.outputs
if left.n_outputs != right.n_inputs:
raise ModelDefinitionError(
"Unsupported operands for |: {0} (n_inputs={1}, "
"n_outputs={2}) and {3} (n_inputs={4}, n_outputs={5}); "
"n_outputs for the left-hand model must match n_inputs "
"for the right-hand model.".format(
left.name, left.n_inputs, left.n_outputs, right.name,
right.n_inputs, right.n_outputs))
elif operator == '&':
inputs = combine_labels(left.inputs, right.inputs)
outputs = combine_labels(left.outputs, right.outputs)
else:
# Without loss of generality
inputs = left.inputs
outputs = left.outputs
if (left.n_inputs != right.n_inputs or
left.n_outputs != right.n_outputs):
raise ModelDefinitionError(
"Unsupported operands for {0}: {1} (n_inputs={2}, "
"n_outputs={3}) and {4} (n_inputs={5}, n_outputs={6}); "
"models must have the same n_inputs and the same "
"n_outputs for this operator".format(
operator, left.name, left.n_inputs, left.n_outputs,
right.name, right.n_inputs, right.n_outputs))
return inputs, outputs
@classmethod
def _make_user_inverse(mcls, operator, left, right):
"""
Generates an inverse `Model` for this `_CompoundModel` when either
model in the operation has a *custom inverse* that was manually
assigned by the user.
If either model has a custom inverse, and in particular if another
`_CompoundModel` has a custom inverse, then none of that model's
sub-models should be considered at all when computing the inverse.
So in that case we just compute the inverse ahead of time and set
it as the new compound model's custom inverse.
Note, this use case only applies when combining model instances,
since model classes don't currently have a notion of a "custom
inverse" (though it could probably be supported by overriding the
class's inverse property).
TODO: Consider fixing things so the aforementioned class-based case
works as well. However, for the present purposes this is good enough.
"""
if not (operator in ('&', '|') and
(left._user_inverse or right._user_inverse)):
# These are the only operators that support an inverse right now
return None
try:
left_inv = left.inverse
right_inv = right.inverse
except NotImplementedError:
# If either inverse is undefined then just return False; this
# means the normal _CompoundModel.inverse routine will fail
# naturally anyways, since it requires all sub-models to have
# an inverse defined
return None
if operator == '&':
return left_inv & right_inv
else:
return right_inv | left_inv
# TODO: Perhaps, just perhaps, the post-order (or ???-order) ordering of
# leaf nodes is something the ExpressionTree class itself could just know
def _get_submodels(cls):
# Would make this a lazyproperty but those don't currently work with
# type objects
if cls._submodels is not None:
return cls._submodels
submodels = [c.value for c in cls._tree.traverse_postorder()
if c.isleaf]
cls._submodels = submodels
return submodels
def _init_param_descriptors(cls):
"""
This routine sets up the names for all the parameters on a compound
model, including figuring out unique names for those parameters and
also mapping them back to their associated parameters of the underlying
submodels.
Setting this all up is costly, and only necessary for compound models
that a user will directly interact with. For example when building an
expression like::
>>> M = (Model1 + Model2) * Model3 # doctest: +SKIP
the user will generally never interact directly with the temporary
result of the subexpression ``(Model1 + Model2)``. So there's no need
to setup all the parameters for that temporary throwaway. Only once
the full expression is built and the user initializes or introspects
``M`` is it necessary to determine its full parameterization.
"""
# Accessing cls.param_names will implicitly call _init_param_names if
# needed and thus also set up the _param_map; I'm not crazy about that
# design but it stands for now
for param_name in cls.param_names:
submodel_idx, submodel_param = cls._param_map[param_name]
submodel = cls[submodel_idx]
orig_param = getattr(submodel, submodel_param, None)
if isinstance(submodel, Model):
# Take the parameter's default from the model's value for that
# parameter
default = orig_param.value
else:
default = orig_param.default
# Copy constraints
constraints = dict((key, getattr(orig_param, key))
for key in Model.parameter_constraints)
# Note: Parameter.copy() returns a new unbound Parameter, never
# a bound Parameter even if submodel is a Model instance (as
# opposed to a Model subclass)
new_param = orig_param.copy(name=param_name, default=default,
unit=orig_param.unit,
**constraints)
setattr(cls, param_name, new_param)
def _init_param_names(cls):
"""
This subroutine is solely for setting up the ``param_names`` attribute
itself.
See ``_init_param_descriptors`` for the full parameter setup.
"""
# Currently this skips over Model *instances* in the expression tree;
# basically these are treated as constants and do not add
# fittable/tunable parameters to the compound model.
# TODO: I'm not 100% happy with this design, and maybe we need some
# interface for distinguishing fittable/settable parameters with
# *constant* parameters (which would be distinct from parameters with
# fixed constraints since they're permanently locked in place). But I'm
# not sure if this is really the best way to treat the issue.
names = []
param_map = {}
# Start counting the suffix indices to put on parameter names from the
# slice_offset. Usually this will just be zero, but for compound
# models that were sliced from another compound model this may be > 0
param_suffix = cls._slice_offset
for idx, model in enumerate(cls._get_submodels()):
if not model.param_names:
# Skip models that don't have parameters in the numbering
# TODO: Reevaluate this if it turns out to be confusing, though
# parameter-less models are not very common in practice (there
# are a few projections that don't take parameters)
continue
for param_name in model.param_names:
# This is sort of heuristic, but we want to check that
# model.param_name *actually* returns a Parameter descriptor,
# and that the model isn't some inconsistent type that happens
# to have a param_names attribute but does not actually
# implement settable parameters.
# In the future we can probably remove this check, but this is
# here specifically to support the legacy compat
# _CompositeModel which can be considered a pathological case
# in the context of the new framework
# if not isinstance(getattr(model, param_name, None),
# Parameter):
# break
name = '{0}_{1}'.format(param_name, param_suffix + idx)
names.append(name)
param_map[name] = (idx, param_name)
cls._param_names = tuple(names)
cls._param_map = param_map
cls._param_map_inverse = dict((v, k) for k, v in param_map.items())
def _format_expression(cls):
# TODO: At some point might be useful to make a public version of this,
# albeit with more formatting options
return cls._tree.format_expression(OPERATOR_PRECEDENCE)
def _format_components(cls):
return '\n\n'.join('[{0}]: {1!r}'.format(idx, m)
for idx, m in enumerate(cls._get_submodels()))
def _normalize_index(cls, index):
"""
Converts an index given to __getitem__ to either an integer, or
a slice with integer start and stop values.
If the length of the slice is exactly 1 this converts the index to a
simple integer lookup.
Negative integers are converted to positive integers.
"""
def get_index_from_name(name):
try:
return cls.submodel_names.index(name)
except ValueError:
raise IndexError(
'Compound model {0} does not have a component named '
'{1}'.format(cls.name, name))
def check_for_negative_index(index):
if index < 0:
new_index = len(cls.submodel_names) + index
if new_index < 0:
# If still < 0 then this is an invalid index
raise IndexError(
"Model index {0} out of range.".format(index))
else:
index = new_index
return index
if isinstance(index, str):
return get_index_from_name(index)
elif isinstance(index, slice):
if index.step not in (1, None):
# In principle it could be but I can scarcely imagine a case
# where it would be useful. If someone can think of one then
# we can enable it.
raise ValueError(
"Step not supported for compound model slicing.")
start = index.start if index.start is not None else 0
stop = (index.stop
if index.stop is not None else len(cls.submodel_names))
if isinstance(start, (int, np.integer)):
start = check_for_negative_index(start)
if isinstance(stop, (int, np.integer)):
stop = check_for_negative_index(stop)
if isinstance(start, str):
start = get_index_from_name(start)
if isinstance(stop, str):
stop = get_index_from_name(stop) + 1
length = stop - start
if length == 1:
return start
elif length <= 0:
raise ValueError("Empty slice of a compound model.")
return slice(start, stop)
elif isinstance(index, (int, np.integer)):
if index >= len(cls.submodel_names):
raise IndexError(
"Model index {0} out of range.".format(index))
return check_for_negative_index(index)
raise TypeError(
'Submodels can be indexed either by their integer order or '
'their name (got {0!r}).'.format(index))
def _get_slice(cls, start, stop):
"""
Return a new model build from a sub-expression of the expression
represented by this model.
Right now this is highly inefficient, as it creates a new temporary
model for each operator that appears in the sub-expression. It would
be better if this just built a new expression tree, and the new model
instantiated directly from that tree.
Once tree -> model instantiation is possible this should be fixed to
use that instead.
"""
members = {'_slice_offset': cls._slice_offset + start}
operators = dict((oper, _model_oper(oper, additional_members=members))
for oper in BINARY_OPERATORS)
return cls._tree.evaluate(operators, start=start, stop=stop)
@staticmethod
def _model_evaluate_getter(idx, model):
n_params = len(model.param_names)
n_inputs = model.n_inputs
n_outputs = model.n_outputs
# If model is not an instance, we need to instantiate it to make sure
# that we can call _validate_input_units (since e.g. input_units can
# be an instance property).
def evaluate_wrapper(model, inputs, param_values):
inputs = model._validate_input_units(inputs)
outputs = model.evaluate(*inputs, *param_values)
if n_outputs == 1:
outputs = (outputs,)
return model._process_output_units(inputs, outputs)
if isinstance(model, Model):
def f(inputs, params):
param_values = tuple(islice(params, n_params))
return evaluate_wrapper(model, inputs, param_values)
else:
# Where previously model was a class, now make an instance
def f(inputs, params):
param_values = tuple(islice(params, n_params))
m = model(*param_values)
return evaluate_wrapper(m, inputs, param_values)
return (f, n_inputs, n_outputs)
class _CompoundModel(Model, metaclass=_CompoundModelMeta):
fit_deriv = None
col_fit_deriv = False
_submodels = None
def __str__(self):
expression = self._format_expression()
components = self._format_components()
keywords = [
('Expression', expression),
('Components', '\n' + indent(components))
]
return super()._format_str(keywords=keywords)
def _generate_input_output_units_dict(self, mapping, attr):
"""
This method is used to transform dict or bool settings from
submodels into a single dictionary for the composite model,
taking into account renaming of input parameters.
"""
d = {}
for inp, (model, orig_inp) in mapping.items():
mattr = getattr(model, attr)
if isinstance(mattr, dict):
if orig_inp in mattr:
d[inp] = mattr[orig_inp]
elif isinstance(mattr, bool):
d[inp] = mattr
if d: # Note that if d is empty, we just return None
return d
@property
def input_units_allow_dimensionless(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_allow_dimensionless')
@property
def input_units_strict(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_strict')
@property
def input_units(self):
return self._generate_input_output_units_dict(self._tree.inputs_map, 'input_units')
@property
def input_units_equivalencies(self):
return self._generate_input_output_units_dict(self._tree.inputs_map,
'input_units_equivalencies')
@property
def return_units(self):
return self._generate_input_output_units_dict(self._tree.outputs_map,
'return_units')
def __getattr__(self, attr):
# This __getattr__ is necessary, because _CompoundModelMeta creates
# Parameter descriptors *lazily*--they do not exist in the class
# __dict__ until one of them has been accessed.
# However, this is at odds with how Python looks up descriptors (see
# (https://docs.python.org/3/reference/datamodel.html#invoking-descriptors)
# which is to look directly in the class __dict__
# This workaround allows descriptors to work correctly when they are
# not initially found in the class __dict__
value = getattr(self.__class__, attr)
if hasattr(value, '__get__'):
# Object is a descriptor, so we should really return the result of
# its __get__
value = value.__get__(self, self.__class__)
return value
def __getitem__(self, index):
index = self.__class__._normalize_index(index)
model = self.__class__[index]
if isinstance(index, slice):
param_names = model.param_names
else:
param_map = self.__class__._param_map_inverse
param_names = tuple(param_map[index, name]
for name in model.param_names)
return model._from_existing(self, param_names)
@property
def submodel_names(self):
return self.__class__.submodel_names
@sharedmethod
def n_submodels(self):
return len(self.submodel_names)
@property
def param_names(self):
return self.__class__.param_names
@property
def fittable(self):
return self.__class__.fittable
@sharedmethod
def evaluate(self, *args):
return self.__class__.evaluate(*args)
# TODO: The way this works is highly inefficient--the inverse is created by
# making a new model for each operator in the compound model, which could
# potentially mean creating a large number of temporary throwaway model
# classes. This can definitely be optimized in the future by implementing
# a way to construct a single model class from an existing tree
@property
def inverse(self):
def _not_implemented(oper):
def _raise(x, y):
raise NotImplementedError(
"The inverse is not currently defined for compound "
"models created using the {0} operator.".format(oper))
return _raise
operators = dict((oper, _not_implemented(oper))
for oper in ('+', '-', '*', '/', '**'))
operators['&'] = operator.and_
# Reverse the order of compositions
operators['|'] = lambda x, y: operator.or_(y, x)
def getter(idx, model):
try:
# By indexing on self[] this will return an instance of the
# model, with all the appropriate parameters set, which is
# currently required to return an inverse
return self[idx].inverse
except NotImplementedError:
raise NotImplementedError(
"All models in a composite model must have an inverse "
"defined in order for the composite model to have an "
"inverse. {0!r} does not have an inverse.".format(model))
return self._tree.evaluate(operators, getter=getter)
@sharedmethod
def _get_submodels(self):
return self.__class__._get_submodels()
def _parameter_units_for_data_units(self, input_units, output_units):
units_for_data = {}
for imodel, model in enumerate(self._submodels):
units_for_data_sub = model._parameter_units_for_data_units(input_units, output_units)
for param_sub in units_for_data_sub:
param = self._param_map_inverse[(imodel, param_sub)]
units_for_data[param] = units_for_data_sub[param_sub]
return units_for_data
def deepcopy(self):
"""
Return a deep copy of a compound model.
"""
new_model = self.copy()
new_model._submodels = [model.deepcopy() for model in self._submodels]
return new_model
def custom_model(*args, fit_deriv=None, **kwargs):
"""
Create a model from a user defined function. The inputs and parameters of
the model will be inferred from the arguments of the function.
This can be used either as a function or as a decorator. See below for
examples of both usages.
.. note::
All model parameters have to be defined as keyword arguments with
default values in the model function. Use `None` as a default argument
value if you do not want to have a default value for that parameter.
Parameters
----------
func : function
Function which defines the model. It should take N positional
arguments where ``N`` is dimensions of the model (the number of
independent variable in the model), and any number of keyword arguments
(the parameters). It must return the value of the model (typically as
an array, but can also be a scalar for scalar inputs). This
corresponds to the `~astropy.modeling.Model.evaluate` method.
fit_deriv : function, optional
Function which defines the Jacobian derivative of the model. I.e., the
derivative with respect to the *parameters* of the model. It should
have the same argument signature as ``func``, but should return a
sequence where each element of the sequence is the derivative
with respect to the corresponding argument. This corresponds to the
:meth:`~astropy.modeling.FittableModel.fit_deriv` method.
Examples
--------
Define a sinusoidal model function as a custom 1D model::
>>> from astropy.modeling.models import custom_model
>>> import numpy as np
>>> def sine_model(x, amplitude=1., frequency=1.):
... return amplitude * np.sin(2 * np.pi * frequency * x)
>>> def sine_deriv(x, amplitude=1., frequency=1.):
... return 2 * np.pi * amplitude * np.cos(2 * np.pi * frequency * x)
>>> SineModel = custom_model(sine_model, fit_deriv=sine_deriv)
Create an instance of the custom model and evaluate it::
>>> model = SineModel()
>>> model(0.25)
1.0
This model instance can now be used like a usual astropy model.
The next example demonstrates a 2D Moffat function model, and also
demonstrates the support for docstrings (this example could also include
a derivative, but it has been omitted for simplicity)::
>>> @custom_model
... def Moffat2D(x, y, amplitude=1.0, x_0=0.0, y_0=0.0, gamma=1.0,
... alpha=1.0):
... \"\"\"Two dimensional Moffat function.\"\"\"
... rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
... return amplitude * (1 + rr_gg) ** (-alpha)
...
>>> print(Moffat2D.__doc__)
Two dimensional Moffat function.
>>> model = Moffat2D()
>>> model(1, 1) # doctest: +FLOAT_CMP
0.3333333333333333
"""
if kwargs:
warnings.warn(
"Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
if len(args) == 1 and callable(args[0]):
return _custom_model_wrapper(args[0], fit_deriv=fit_deriv)
elif not args:
return functools.partial(_custom_model_wrapper, fit_deriv=fit_deriv)
else:
raise TypeError(
"{0} takes at most one positional argument (the callable/"
"function to be turned into a model. When used as a decorator "
"it should be passed keyword arguments only (if "
"any).".format(__name__))
def _custom_model_wrapper(func, fit_deriv=None):
"""
Internal implementation `custom_model`.
When `custom_model` is called as a function its arguments are passed to
this function, and the result of this function is returned.
When `custom_model` is used as a decorator a partial evaluation of this
function is returned by `custom_model`.
"""
if not callable(func):
raise ModelDefinitionError(
"func is not callable; it must be a function or other callable "
"object")
if fit_deriv is not None and not callable(fit_deriv):
raise ModelDefinitionError(
"fit_deriv not callable; it must be a function or other "
"callable object")
model_name = func.__name__
inputs, params = get_inputs_and_params(func)
if (fit_deriv is not None and
len(fit_deriv.__defaults__) != len(params)):
raise ModelDefinitionError("derivative function should accept "
"same number of parameters as func.")
# TODO: Maybe have a clever scheme for default output name?
if inputs:
output_names = (inputs[0].name,)
else:
output_names = ('x',)
params = dict((param.name, Parameter(param.name, default=param.default))
for param in params)
mod = find_current_module(2)
if mod:
modname = mod.__name__
else:
modname = '__main__'
members = {
'__module__': str(modname),
'__doc__': func.__doc__,
'inputs': tuple(x.name for x in inputs),
'outputs': output_names,
'evaluate': staticmethod(func),
}
if fit_deriv is not None:
members['fit_deriv'] = staticmethod(fit_deriv)
members.update(params)
return type(model_name, (FittableModel,), members)
def render_model(model, arr=None, coords=None):
"""
Evaluates a model on an input array. Evaluation is limited to
a bounding box if the `Model.bounding_box` attribute is set.
Parameters
----------
model : `Model`
Model to be evaluated.
arr : `numpy.ndarray`, optional
Array on which the model is evaluated.
coords : array-like, optional
Coordinate arrays mapping to ``arr``, such that
``arr[coords] == arr``.
Returns
-------
array : `numpy.ndarray`
The model evaluated on the input ``arr`` or a new array from ``coords``.
If ``arr`` and ``coords`` are both `None`, the returned array is
limited to the `Model.bounding_box` limits. If
`Model.bounding_box` is `None`, ``arr`` or ``coords`` must be passed.
Examples
--------
:ref:`bounding-boxes`
"""
bbox = model.bounding_box
if (coords is None) & (arr is None) & (bbox is None):
raise ValueError('If no bounding_box is set, coords or arr must be input.')
# for consistent indexing
if model.n_inputs == 1:
if coords is not None:
coords = [coords]
if bbox is not None:
bbox = [bbox]
if arr is not None:
arr = arr.copy()
# Check dimensions match model
if arr.ndim != model.n_inputs:
raise ValueError('number of array dimensions inconsistent with '
'number of model inputs.')
if coords is not None:
# Check dimensions match arr and model
coords = np.array(coords)
if len(coords) != model.n_inputs:
raise ValueError('coordinate length inconsistent with the number '
'of model inputs.')
if arr is not None:
if coords[0].shape != arr.shape:
raise ValueError('coordinate shape inconsistent with the '
'array shape.')
else:
arr = np.zeros(coords[0].shape)
if bbox is not None:
# assures position is at center pixel, important when using add_array
pd = pos, delta = np.array([(np.mean(bb), np.ceil((bb[1] - bb[0]) / 2))
for bb in bbox]).astype(int).T
if coords is not None:
sub_shape = tuple(delta * 2 + 1)
sub_coords = np.array([extract_array(c, sub_shape, pos) for c in coords])
else:
limits = [slice(p - d, p + d + 1, 1) for p, d in pd.T]
sub_coords = np.mgrid[limits]
sub_coords = sub_coords[::-1]
if arr is None:
arr = model(*sub_coords)
else:
try:
arr = add_array(arr, model(*sub_coords), pos)
except ValueError:
raise ValueError('The `bounding_box` is larger than the input'
' arr in one or more dimensions. Set '
'`model.bounding_box = None`.')
else:
if coords is None:
im_shape = arr.shape
limits = [slice(i) for i in im_shape]
coords = np.mgrid[limits]
arr += model(*coords[::-1])
return arr
def _prepare_inputs_single_model(model, params, inputs, **kwargs):
broadcasts = []
for idx, _input in enumerate(inputs):
input_shape = _input.shape
# Ensure that array scalars are always upgrade to 1-D arrays for the
# sake of consistency with how parameters work. They will be cast back
# to scalars at the end
if not input_shape:
inputs[idx] = _input.reshape((1,))
if not params:
max_broadcast = input_shape
else:
max_broadcast = ()
for param in params:
try:
if model.standard_broadcasting:
broadcast = check_broadcast(input_shape, param.shape)
else:
broadcast = input_shape
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(broadcast) > len(max_broadcast):
max_broadcast = broadcast
elif len(broadcast) == len(max_broadcast):
max_broadcast = max(max_broadcast, broadcast)
broadcasts.append(max_broadcast)
if model.n_outputs > model.n_inputs:
if len(set(broadcasts)) > 1:
raise ValueError(
"For models with n_outputs > n_inputs, the combination of "
"all inputs and parameters must broadcast to the same shape, "
"which will be used as the shape of all outputs. In this "
"case some of the inputs had different shapes, so it is "
"ambiguous how to format outputs for this model. Try using "
"inputs that are all the same size and shape.")
else:
# Extend the broadcasts list to include shapes for all outputs
extra_outputs = model.n_outputs - model.n_inputs
if not broadcasts:
# If there were no inputs then the broadcasts list is empty
# just add a None since there is no broadcasting of outputs and
# inputs necessary (see _prepare_outputs_single_model)
broadcasts.append(None)
broadcasts.extend([broadcasts[0]] * extra_outputs)
return inputs, (broadcasts,)
def _prepare_outputs_single_model(model, outputs, format_info):
broadcasts = format_info[0]
outputs = list(outputs)
for idx, output in enumerate(outputs):
broadcast_shape = broadcasts[idx]
if broadcast_shape is not None:
if not broadcast_shape:
# Shape is (), i.e. a scalar should be returned
outputs[idx] = output.item()
else:
outputs[idx] = output.reshape(broadcast_shape)
return tuple(outputs)
def _prepare_inputs_model_set(model, params, inputs, n_models, model_set_axis,
**kwargs):
reshaped = []
pivots = []
for idx, _input in enumerate(inputs):
max_param_shape = ()
if n_models > 1 and model_set_axis is not False:
# Use the shape of the input *excluding* the model axis
input_shape = (_input.shape[:model_set_axis] +
_input.shape[model_set_axis + 1:])
else:
input_shape = _input.shape
for param in params:
try:
check_broadcast(input_shape, param.shape)
except IncompatibleShapeError:
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot be "
"broadcast with parameter {2!r} of shape "
"{3!r}.".format(model.inputs[idx], input_shape,
param.name, param.shape))
if len(param.shape) > len(max_param_shape):
max_param_shape = param.shape
# We've now determined that, excluding the model_set_axis, the
# input can broadcast with all the parameters
input_ndim = len(input_shape)
if model_set_axis is False:
if len(max_param_shape) > input_ndim:
# Just needs to prepend new axes to the input
n_new_axes = 1 + len(max_param_shape) - input_ndim
new_axes = (1,) * n_new_axes
new_shape = new_axes + _input.shape
pivot = model.model_set_axis
else:
pivot = input_ndim - len(max_param_shape)
new_shape = (_input.shape[:pivot] + (1,) +
_input.shape[pivot:])
new_input = _input.reshape(new_shape)
else:
if len(max_param_shape) >= input_ndim:
n_new_axes = len(max_param_shape) - input_ndim
pivot = model.model_set_axis
new_axes = (1,) * n_new_axes
new_shape = (_input.shape[:pivot + 1] + new_axes +
_input.shape[pivot + 1:])
new_input = _input.reshape(new_shape)
else:
pivot = _input.ndim - len(max_param_shape) - 1
new_input = np.rollaxis(_input, model_set_axis,
pivot + 1)
pivots.append(pivot)
reshaped.append(new_input)
if model.n_inputs < model.n_outputs:
pivots.extend([model_set_axis] * (model.n_outputs - model.n_inputs))
return reshaped, (pivots,)
def _prepare_outputs_model_set(model, outputs, format_info, model_set_axis):
pivots = format_info[0]
# If model_set_axis = False was passed then use
# model._model_set_axis to format the output.
if model_set_axis is None or model_set_axis is False:
model_set_axis = model.model_set_axis
outputs = list(outputs)
for idx, output in enumerate(outputs):
pivot = pivots[idx]
if pivot < output.ndim and pivot != model_set_axis:
outputs[idx] = np.rollaxis(output, pivot,
model_set_axis)
return tuple(outputs)
def _validate_input_shapes(inputs, argnames, n_models, model_set_axis,
validate_broadcasting):
"""
Perform basic validation of model inputs--that they are mutually
broadcastable and that they have the minimum dimensions for the given
model_set_axis.
If validation succeeds, returns the total shape that will result from
broadcasting the input arrays with each other.
"""
check_model_set_axis = n_models > 1 and model_set_axis is not False
if not (validate_broadcasting or check_model_set_axis):
# Nothing else needed here
return
all_shapes = []
for idx, _input in enumerate(inputs):
input_shape = np.shape(_input)
# Ensure that the input's model_set_axis matches the model's
# n_models
if input_shape and check_model_set_axis:
# Note: Scalar inputs *only* get a pass on this
if len(input_shape) < model_set_axis + 1:
raise ValueError(
"For model_set_axis={0}, all inputs must be at "
"least {1}-dimensional.".format(
model_set_axis, model_set_axis + 1))
elif input_shape[model_set_axis] != n_models:
try:
argname = argnames[idx]
except IndexError:
# the case of model.inputs = ()
argname = str(idx)
raise ValueError(
"Input argument {0!r} does not have the correct "
"dimensions in model_set_axis={1} for a model set with "
"n_models={2}.".format(argname, model_set_axis,
n_models))
all_shapes.append(input_shape)
if not validate_broadcasting:
return
try:
input_broadcast = check_broadcast(*all_shapes)
except IncompatibleShapeError as exc:
shape_a, shape_a_idx, shape_b, shape_b_idx = exc.args
arg_a = argnames[shape_a_idx]
arg_b = argnames[shape_b_idx]
raise ValueError(
"Model input argument {0!r} of shape {1!r} cannot "
"be broadcast with input {2!r} of shape {3!r}".format(
arg_a, shape_a, arg_b, shape_b))
return input_broadcast
copyreg.pickle(_ModelMeta, _ModelMeta.__reduce__)
copyreg.pickle(_CompoundModelMeta, _CompoundModelMeta.__reduce__)
|
ec98bfd1a995cdc05a7f516fd8b1a68fe2fd4565660caf1f8aa47d5c0d2ebb55 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Mathematical models."""
from collections import OrderedDict
import numpy as np
from astropy import units as u
from astropy.units import Quantity, UnitsError
from .core import (Fittable1DModel, Fittable2DModel,
ModelDefinitionError)
from .parameters import Parameter, InputParameterError
from .utils import ellipse_extent
__all__ = ['AiryDisk2D', 'Moffat1D', 'Moffat2D', 'Box1D', 'Box2D', 'Const1D',
'Const2D', 'Ellipse2D', 'Disk2D', 'Gaussian1D', 'Gaussian2D', 'Linear1D',
'Lorentz1D', 'MexicanHat1D', 'MexicanHat2D', 'RedshiftScaleFactor',
'Multiply', 'Planar2D', 'Scale', 'Sersic1D', 'Sersic2D', 'Shift',
'Sine1D', 'Trapezoid1D', 'TrapezoidDisk2D', 'Ring2D', 'Voigt1D']
TWOPI = 2 * np.pi
FLOAT_EPSILON = float(np.finfo(np.float32).tiny)
# Note that we define this here rather than using the value defined in
# astropy.stats to avoid importing astropy.stats every time astropy.modeling
# is loaded.
GAUSSIAN_SIGMA_TO_FWHM = 2.0 * np.sqrt(2.0 * np.log(2.0))
class Gaussian1D(Fittable1DModel):
"""
One dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
mean : float
Mean of the Gaussian.
stddev : float
Standard deviation of the Gaussian.
Notes
-----
Model formula:
.. math:: f(x) = A e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}
Examples
--------
>>> from astropy.modeling import models
>>> def tie_center(model):
... mean = 50 * model.stddev
... return mean
>>> tied_parameters = {'mean': tie_center}
Specify that 'mean' is a tied parameter in one of two ways:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... tied=tied_parameters)
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.mean.tied
False
>>> g1.mean.tied = tie_center
>>> g1.mean.tied
<function tie_center at 0x...>
Fixed parameters:
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3,
... fixed={'stddev': True})
>>> g1.stddev.fixed
True
or
>>> g1 = models.Gaussian1D(amplitude=10, mean=5, stddev=.3)
>>> g1.stddev.fixed
False
>>> g1.stddev.fixed = True
>>> g1.stddev.fixed
True
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Gaussian1D
plt.figure()
s1 = Gaussian1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
See Also
--------
Gaussian2D, Box1D, Moffat1D, Lorentz1D
"""
amplitude = Parameter(default=1)
mean = Parameter(default=0)
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
stddev = Parameter(default=1, bounds=(FLOAT_EPSILON, None))
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``
Parameters
----------
factor : float
The multiple of `stddev` used to define the limits.
The default is 5.5, corresponding to a relative error < 1e-7.
Examples
--------
>>> from astropy.modeling.models import Gaussian1D
>>> model = Gaussian1D(mean=0, stddev=2)
>>> model.bounding_box
(-11.0, 11.0)
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor,
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
(-4.0, 4.0)
"""
x0 = self.mean
dx = factor * self.stddev
return (x0 - dx, x0 + dx)
@property
def fwhm(self):
"""Gaussian full width at half maximum."""
return self.stddev * GAUSSIAN_SIGMA_TO_FWHM
@staticmethod
def evaluate(x, amplitude, mean, stddev):
"""
Gaussian1D model function.
"""
return amplitude * np.exp(- 0.5 * (x - mean) ** 2 / stddev ** 2)
@staticmethod
def fit_deriv(x, amplitude, mean, stddev):
"""
Gaussian1D model function derivatives.
"""
d_amplitude = np.exp(-0.5 / stddev ** 2 * (x - mean) ** 2)
d_mean = amplitude * d_amplitude * (x - mean) / stddev ** 2
d_stddev = amplitude * d_amplitude * (x - mean) ** 2 / stddev ** 3
return [d_amplitude, d_mean, d_stddev]
@property
def input_units(self):
if self.mean.unit is None:
return None
else:
return {'x': self.mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('mean', inputs_unit['x']),
('stddev', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Gaussian2D(Fittable2DModel):
r"""
Two dimensional Gaussian model.
Parameters
----------
amplitude : float
Amplitude of the Gaussian.
x_mean : float
Mean of the Gaussian in x.
y_mean : float
Mean of the Gaussian in y.
x_stddev : float or None
Standard deviation of the Gaussian in x before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
y_stddev : float or None
Standard deviation of the Gaussian in y before rotating by theta. Must
be None if a covariance matrix (``cov_matrix``) is provided. If no
``cov_matrix`` is given, ``None`` means the default value (1).
theta : float, optional
Rotation angle in radians. The rotation angle increases
counterclockwise. Must be None if a covariance matrix (``cov_matrix``)
is provided. If no ``cov_matrix`` is given, ``None`` means the default
value (0).
cov_matrix : ndarray, optional
A 2x2 covariance matrix. If specified, overrides the ``x_stddev``,
``y_stddev``, and ``theta`` defaults.
Notes
-----
Model formula:
.. math::
f(x, y) = A e^{-a\left(x - x_{0}\right)^{2} -b\left(x - x_{0}\right)
\left(y - y_{0}\right) -c\left(y - y_{0}\right)^{2}}
Using the following definitions:
.. math::
a = \left(\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
b = \left(\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{x}^{2}} -
\frac{\sin{\left (2 \theta \right )}}{2 \sigma_{y}^{2}}\right)
c = \left(\frac{\sin^{2}{\left (\theta \right )}}{2 \sigma_{x}^{2}} +
\frac{\cos^{2}{\left (\theta \right )}}{2 \sigma_{y}^{2}}\right)
If using a ``cov_matrix``, the model is of the form:
.. math::
f(x, y) = A e^{-0.5 \left(\vec{x} - \vec{x}_{0}\right)^{T} \Sigma^{-1} \left(\vec{x} - \vec{x}_{0}\right)}
where :math:`\vec{x} = [x, y]`, :math:`\vec{x}_{0} = [x_{0}, y_{0}]`,
and :math:`\Sigma` is the covariance matrix:
.. math::
\Sigma = \left(\begin{array}{ccc}
\sigma_x^2 & \rho \sigma_x \sigma_y \\
\rho \sigma_x \sigma_y & \sigma_y^2
\end{array}\right)
:math:`\rho` is the correlation between ``x`` and ``y``, which should
be between -1 and +1. Positive correlation corresponds to a
``theta`` in the range 0 to 90 degrees. Negative correlation
corresponds to a ``theta`` in the range of 0 to -90 degrees.
See [1]_ for more details about the 2D Gaussian function.
See Also
--------
Gaussian1D, Box2D, Moffat2D
References
----------
.. [1] https://en.wikipedia.org/wiki/Gaussian_function
"""
amplitude = Parameter(default=1)
x_mean = Parameter(default=0)
y_mean = Parameter(default=0)
x_stddev = Parameter(default=1)
y_stddev = Parameter(default=1)
theta = Parameter(default=0.0)
def __init__(self, amplitude=amplitude.default, x_mean=x_mean.default,
y_mean=y_mean.default, x_stddev=None, y_stddev=None,
theta=None, cov_matrix=None, **kwargs):
if cov_matrix is None:
if x_stddev is None:
x_stddev = self.__class__.x_stddev.default
if y_stddev is None:
y_stddev = self.__class__.y_stddev.default
if theta is None:
theta = self.__class__.theta.default
else:
if x_stddev is not None or y_stddev is not None or theta is not None:
raise InputParameterError("Cannot specify both cov_matrix and "
"x/y_stddev/theta")
else:
# Compute principle coordinate system transformation
cov_matrix = np.array(cov_matrix)
if cov_matrix.shape != (2, 2):
# TODO: Maybe it should be possible for the covariance matrix
# to be some (x, y, ..., z, 2, 2) array to be broadcast with
# other parameters of shape (x, y, ..., z)
# But that's maybe a special case to work out if/when needed
raise ValueError("Covariance matrix must be 2x2")
eig_vals, eig_vecs = np.linalg.eig(cov_matrix)
x_stddev, y_stddev = np.sqrt(eig_vals)
y_vec = eig_vecs[:, 0]
theta = np.arctan2(y_vec[1], y_vec[0])
# Ensure stddev makes sense if its bounds are not explicitly set.
# stddev must be non-zero and positive.
# TODO: Investigate why setting this in Parameter above causes
# convolution tests to hang.
kwargs.setdefault('bounds', {})
kwargs['bounds'].setdefault('x_stddev', (FLOAT_EPSILON, None))
kwargs['bounds'].setdefault('y_stddev', (FLOAT_EPSILON, None))
super().__init__(
amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev, theta=theta, **kwargs)
@property
def x_fwhm(self):
"""Gaussian full width at half maximum in X."""
return self.x_stddev * GAUSSIAN_SIGMA_TO_FWHM
@property
def y_fwhm(self):
"""Gaussian full width at half maximum in Y."""
return self.y_stddev * GAUSSIAN_SIGMA_TO_FWHM
def bounding_box(self, factor=5.5):
"""
Tuple defining the default ``bounding_box`` limits in each dimension,
``((y_low, y_high), (x_low, x_high))``
The default offset from the mean is 5.5-sigma, corresponding
to a relative error < 1e-7. The limits are adjusted for rotation.
Parameters
----------
factor : float, optional
The multiple of `x_stddev` and `y_stddev` used to define the limits.
The default is 5.5.
Examples
--------
>>> from astropy.modeling.models import Gaussian2D
>>> model = Gaussian2D(x_mean=0, y_mean=0, x_stddev=1, y_stddev=2)
>>> model.bounding_box
((-11.0, 11.0), (-5.5, 5.5))
This range can be set directly (see: `Model.bounding_box
<astropy.modeling.Model.bounding_box>`) or by using a different factor
like:
>>> model.bounding_box = model.bounding_box(factor=2)
>>> model.bounding_box
((-4.0, 4.0), (-2.0, 2.0))
"""
a = factor * self.x_stddev
b = factor * self.y_stddev
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_mean - dy, self.y_mean + dy),
(self.x_mean - dx, self.x_mean + dx))
@staticmethod
def evaluate(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function"""
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xdiff = x - x_mean
ydiff = y - y_mean
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
return amplitude * np.exp(-((a * xdiff ** 2) + (b * xdiff * ydiff) +
(c * ydiff ** 2)))
@staticmethod
def fit_deriv(x, y, amplitude, x_mean, y_mean, x_stddev, y_stddev, theta):
"""Two dimensional Gaussian function derivative with respect to parameters"""
cost = np.cos(theta)
sint = np.sin(theta)
cost2 = np.cos(theta) ** 2
sint2 = np.sin(theta) ** 2
cos2t = np.cos(2. * theta)
sin2t = np.sin(2. * theta)
xstd2 = x_stddev ** 2
ystd2 = y_stddev ** 2
xstd3 = x_stddev ** 3
ystd3 = y_stddev ** 3
xdiff = x - x_mean
ydiff = y - y_mean
xdiff2 = xdiff ** 2
ydiff2 = ydiff ** 2
a = 0.5 * ((cost2 / xstd2) + (sint2 / ystd2))
b = 0.5 * ((sin2t / xstd2) - (sin2t / ystd2))
c = 0.5 * ((sint2 / xstd2) + (cost2 / ystd2))
g = amplitude * np.exp(-((a * xdiff2) + (b * xdiff * ydiff) +
(c * ydiff2)))
da_dtheta = (sint * cost * ((1. / ystd2) - (1. / xstd2)))
da_dx_stddev = -cost2 / xstd3
da_dy_stddev = -sint2 / ystd3
db_dtheta = (cos2t / xstd2) - (cos2t / ystd2)
db_dx_stddev = -sin2t / xstd3
db_dy_stddev = sin2t / ystd3
dc_dtheta = -da_dtheta
dc_dx_stddev = -sint2 / xstd3
dc_dy_stddev = -cost2 / ystd3
dg_dA = g / amplitude
dg_dx_mean = g * ((2. * a * xdiff) + (b * ydiff))
dg_dy_mean = g * ((b * xdiff) + (2. * c * ydiff))
dg_dx_stddev = g * (-(da_dx_stddev * xdiff2 +
db_dx_stddev * xdiff * ydiff +
dc_dx_stddev * ydiff2))
dg_dy_stddev = g * (-(da_dy_stddev * xdiff2 +
db_dy_stddev * xdiff * ydiff +
dc_dy_stddev * ydiff2))
dg_dtheta = g * (-(da_dtheta * xdiff2 +
db_dtheta * xdiff * ydiff +
dc_dtheta * ydiff2))
return [dg_dA, dg_dx_mean, dg_dy_mean, dg_dx_stddev, dg_dy_stddev,
dg_dtheta]
@property
def input_units(self):
if self.x_mean.unit is None and self.y_mean.unit is None:
return None
else:
return {'x': self.x_mean.unit,
'y': self.y_mean.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_mean', inputs_unit['x']),
('y_mean', inputs_unit['x']),
('x_stddev', inputs_unit['x']),
('y_stddev', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Shift(Fittable1DModel):
"""
Shift a coordinate.
Parameters
----------
offset : float
Offset to add to a coordinate.
"""
offset = Parameter(default=0)
linear = True
@property
def input_units(self):
if self.offset.unit is None:
return None
else:
return {'x': self.offset.unit}
@property
def inverse(self):
"""One dimensional inverse Shift model function"""
inv = self.copy()
inv.offset *= -1
return inv
@staticmethod
def evaluate(x, offset):
"""One dimensional Shift model function"""
return x + offset
@staticmethod
def sum_of_implicit_terms(x):
"""Evaluate the implicit term (x) of one dimensional Shift model"""
return x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Shift model derivative with respect to parameter"""
d_offset = np.ones_like(x)
return [d_offset]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('offset', outputs_unit['y'])])
class Scale(Fittable1DModel):
"""
Multiply a model by a dimensionless factor.
Parameters
----------
factor : float
Factor by which to scale a coordinate.
Notes
-----
If ``factor`` is a `~astropy.units.Quantity` then the units will be
stripped before the scaling operation.
"""
factor = Parameter(default=1)
linear = True
fittable = True
_input_units_strict = True
_input_units_allow_dimensionless = True
@property
def input_units(self):
if self.factor.unit is None:
return None
else:
return {'x': self.factor.unit}
@property
def inverse(self):
"""One dimensional inverse Scale model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional Scale model function"""
if isinstance(factor, u.Quantity):
factor = factor.value
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional Scale model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
unit = outputs_unit['y'] / inputs_unit['x']
if unit == u.one:
unit = None
return OrderedDict([('factor', unit)])
class Multiply(Fittable1DModel):
"""
Multiply a model by a quantity or number.
Parameters
----------
factor : float
Factor by which to multiply a coordinate.
"""
inputs = ('x',)
outputs = ('y',)
factor = Parameter(default=1)
linear = True
fittable = True
@property
def inverse(self):
"""One dimensional inverse multiply model function"""
inv = self.copy()
inv.factor = 1 / self.factor
return inv
@staticmethod
def evaluate(x, factor):
"""One dimensional multiply model function"""
return factor * x
@staticmethod
def fit_deriv(x, *params):
"""One dimensional multiply model derivative with respect to parameter"""
d_factor = x
return [d_factor]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('factor', outputs_unit['y'])])
class RedshiftScaleFactor(Fittable1DModel):
"""
One dimensional redshift scale factor model.
Parameters
----------
z : float
Redshift value.
Notes
-----
Model formula:
.. math:: f(x) = x (1 + z)
"""
z = Parameter(description='redshift', default=0)
@staticmethod
def evaluate(x, z):
"""One dimensional RedshiftScaleFactor model function"""
return (1 + z) * x
@staticmethod
def fit_deriv(x, z):
"""One dimensional RedshiftScaleFactor model derivative"""
d_z = x
return [d_z]
@property
def inverse(self):
"""Inverse RedshiftScaleFactor model"""
inv = self.copy()
inv.z = 1.0 / (1.0 + self.z) - 1.0
return inv
class Sersic1D(Fittable1DModel):
r"""
One dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
See Also
--------
Gaussian1D, Moffat1D, Lorentz1D
Notes
-----
Model formula:
.. math::
I(r)=I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic1D
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(111, xscale='log', yscale='log')
s1 = Sersic1D(amplitude=1, r_eff=5)
r=np.arange(0, 100, .01)
for n in range(1, 10):
s1.n = n
plt.plot(r, s1(r), color=str(float(n) / 15))
plt.axis([1e-1, 30, 1e-2, 1e3])
plt.xlabel('log Radius')
plt.ylabel('log Surface Brightness')
plt.text(.25, 1.5, 'n=1')
plt.text(.25, 300, 'n=10')
plt.xticks([])
plt.yticks([])
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
_gammaincinv = None
@classmethod
def evaluate(cls, r, amplitude, r_eff, n):
"""One dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic1D model requires scipy > 0.11.')
return (amplitude * np.exp(
-cls._gammaincinv(2 * n, 0.5) * ((r / r_eff) ** (1 / n) - 1)))
@property
def input_units(self):
if self.r_eff.unit is None:
return None
else:
return {'x': self.r_eff.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('r_eff', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Sine1D(Fittable1DModel):
"""
One dimensional Sine model.
Parameters
----------
amplitude : float
Oscillation amplitude
frequency : float
Oscillation frequency
phase : float
Oscillation phase
See Also
--------
Const1D, Linear1D
Notes
-----
Model formula:
.. math:: f(x) = A \\sin(2 \\pi f x + 2 \\pi p)
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Sine1D
plt.figure()
s1 = Sine1D(amplitude=1, frequency=.25)
r=np.arange(0, 10, .01)
for amplitude in range(1,4):
s1.amplitude = amplitude
plt.plot(r, s1(r), color=str(0.25 * amplitude), lw=2)
plt.axis([0, 10, -5, 5])
plt.show()
"""
amplitude = Parameter(default=1)
frequency = Parameter(default=1)
phase = Parameter(default=0)
@staticmethod
def evaluate(x, amplitude, frequency, phase):
"""One dimensional Sine model function"""
# Note: If frequency and x are quantities, they should normally have
# inverse units, so that argument ends up being dimensionless. However,
# np.sin of a dimensionless quantity will crash, so we remove the
# quantity-ness from argument in this case (another option would be to
# multiply by * u.rad but this would be slower overall).
argument = TWOPI * (frequency * x + phase)
if isinstance(argument, Quantity):
argument = argument.value
return amplitude * np.sin(argument)
@staticmethod
def fit_deriv(x, amplitude, frequency, phase):
"""One dimensional Sine model derivative"""
d_amplitude = np.sin(TWOPI * frequency * x + TWOPI * phase)
d_frequency = (TWOPI * x * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
d_phase = (TWOPI * amplitude *
np.cos(TWOPI * frequency * x + TWOPI * phase))
return [d_amplitude, d_frequency, d_phase]
@property
def input_units(self):
if self.frequency.unit is None:
return None
else:
return {'x': 1. / self.frequency.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('frequency', inputs_unit['x'] ** -1),
('amplitude', outputs_unit['y'])])
class Linear1D(Fittable1DModel):
"""
One dimensional Line model.
Parameters
----------
slope : float
Slope of the straight line
intercept : float
Intercept of the straight line
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x) = a x + b
"""
slope = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, slope, intercept):
"""One dimensional Line model function"""
return slope * x + intercept
@staticmethod
def fit_deriv(x, slope, intercept):
"""One dimensional Line model derivative with respect to parameters"""
d_slope = x
d_intercept = np.ones_like(x)
return [d_slope, d_intercept]
@property
def inverse(self):
new_slope = self.slope ** -1
new_intercept = -self.intercept / self.slope
return self.__class__(slope=new_slope, intercept=new_intercept)
@property
def input_units(self):
if self.intercept.unit is None and self.slope.unit is None:
return None
else:
return {'x': self.intercept.unit / self.slope.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['y']),
('slope', outputs_unit['y'] / inputs_unit['x'])])
class Planar2D(Fittable2DModel):
"""
Two dimensional Plane model.
Parameters
----------
slope_x : float
Slope of the straight line in X
slope_y : float
Slope of the straight line in Y
intercept : float
Z-intercept of the straight line
Notes
-----
Model formula:
.. math:: f(x, y) = a x + b y + c
"""
slope_x = Parameter(default=1)
slope_y = Parameter(default=1)
intercept = Parameter(default=0)
linear = True
@staticmethod
def evaluate(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model function"""
return slope_x * x + slope_y * y + intercept
@staticmethod
def fit_deriv(x, y, slope_x, slope_y, intercept):
"""Two dimensional Plane model derivative with respect to parameters"""
d_slope_x = x
d_slope_y = y
d_intercept = np.ones_like(x)
return [d_slope_x, d_slope_y, d_intercept]
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('intercept', outputs_unit['z']),
('slope_x', outputs_unit['z'] / inputs_unit['x']),
('slope_y', outputs_unit['z'] / inputs_unit['y'])])
class Lorentz1D(Fittable1DModel):
"""
One dimensional Lorentzian model.
Parameters
----------
amplitude : float
Peak value
x_0 : float
Position of the peak
fwhm : float
Full width at half maximum
See Also
--------
Gaussian1D, Box1D, MexicanHat1D
Notes
-----
Model formula:
.. math::
f(x) = \\frac{A \\gamma^{2}}{\\gamma^{2} + \\left(x - x_{0}\\right)^{2}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Lorentz1D
plt.figure()
s1 = Lorentz1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
fwhm = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model function"""
return (amplitude * ((fwhm / 2.) ** 2) / ((x - x_0) ** 2 +
(fwhm / 2.) ** 2))
@staticmethod
def fit_deriv(x, amplitude, x_0, fwhm):
"""One dimensional Lorentzian model derivative with respect to parameters"""
d_amplitude = fwhm ** 2 / (fwhm ** 2 + (x - x_0) ** 2)
d_x_0 = (amplitude * d_amplitude * (2 * x - 2 * x_0) /
(fwhm ** 2 + (x - x_0) ** 2))
d_fwhm = 2 * amplitude * d_amplitude / fwhm * (1 - d_amplitude)
return [d_amplitude, d_x_0, d_fwhm]
def bounding_box(self, factor=25):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of FWHM used to define the limits.
Default is chosen to include most (99%) of the
area under the curve, while still showing the
central feature of interest.
"""
x0 = self.x_0
dx = factor * self.fwhm
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Voigt1D(Fittable1DModel):
"""
One dimensional model for the Voigt profile.
Parameters
----------
x_0 : float
Position of the peak
amplitude_L : float
The Lorentzian amplitude
fwhm_L : float
The Lorentzian full width at half maximum
fwhm_G : float
The Gaussian full width at half maximum
See Also
--------
Gaussian1D, Lorentz1D
Notes
-----
Algorithm for the computation taken from
McLean, A. B., Mitchell, C. E. J. & Swanston, D. M. Implementation of an
efficient analytical approximation to the Voigt function for photoemission
lineshape analysis. Journal of Electron Spectroscopy and Related Phenomena
69, 125-132 (1994)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Voigt1D
import matplotlib.pyplot as plt
plt.figure()
x = np.arange(0, 10, 0.01)
v1 = Voigt1D(x_0=5, amplitude_L=10, fwhm_L=0.5, fwhm_G=0.9)
plt.plot(x, v1(x))
plt.show()
"""
x_0 = Parameter(default=0)
amplitude_L = Parameter(default=1)
fwhm_L = Parameter(default=2/np.pi)
fwhm_G = Parameter(default=np.log(2))
_abcd = np.array([
[-1.2150, -1.3509, -1.2150, -1.3509], # A
[1.2359, 0.3786, -1.2359, -0.3786], # B
[-0.3085, 0.5906, -0.3085, 0.5906], # C
[0.0210, -1.1858, -0.0210, 1.1858]]) # D
@classmethod
def evaluate(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[..., np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[..., np.newaxis]
V = np.sum((C * (Y - A) + D * (X - B))/(((Y - A) ** 2 + (X - B) ** 2)), axis=-1)
return (fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G) * V
@classmethod
def fit_deriv(cls, x, x_0, amplitude_L, fwhm_L, fwhm_G):
A, B, C, D = cls._abcd
sqrt_ln2 = np.sqrt(np.log(2))
X = (x - x_0) * 2 * sqrt_ln2 / fwhm_G
X = np.atleast_1d(X)[:, np.newaxis]
Y = fwhm_L * sqrt_ln2 / fwhm_G
Y = np.atleast_1d(Y)[:, np.newaxis]
constant = fwhm_L * amplitude_L * np.sqrt(np.pi) * sqrt_ln2 / fwhm_G
alpha = C * (Y - A) + D * (X - B)
beta = (Y - A) ** 2 + (X - B) ** 2
V = np.sum((alpha / beta), axis=-1)
dVdx = np.sum((D/beta - 2 * (X - B) * alpha / np.square(beta)), axis=-1)
dVdy = np.sum((C/beta - 2 * (Y - A) * alpha / np.square(beta)), axis=-1)
dyda = [-constant * dVdx * 2 * sqrt_ln2 / fwhm_G,
constant * V / amplitude_L,
constant * (V / fwhm_L + dVdy * sqrt_ln2 / fwhm_G),
-constant * (V + (sqrt_ln2 / fwhm_G) * (2 * (x - x_0) * dVdx + fwhm_L * dVdy)) / fwhm_G]
return dyda
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('fwhm_L', inputs_unit['x']),
('fwhm_G', inputs_unit['x']),
('amplitude_L', outputs_unit['y'])])
class Const1D(Fittable1DModel):
"""
One dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const2D
Notes
-----
Model formula:
.. math:: f(x) = A
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Const1D
plt.figure()
s1 = Const1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, amplitude):
"""One dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@staticmethod
def fit_deriv(x, amplitude):
"""One dimensional Constant model derivative with respect to parameters"""
d_amplitude = np.ones_like(x)
return [d_amplitude]
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['y'])])
class Const2D(Fittable2DModel):
"""
Two dimensional Constant model.
Parameters
----------
amplitude : float
Value of the constant function
See Also
--------
Const1D
Notes
-----
Model formula:
.. math:: f(x, y) = A
"""
amplitude = Parameter(default=1)
linear = True
@staticmethod
def evaluate(x, y, amplitude):
"""Two dimensional Constant model function"""
if amplitude.size == 1:
# This is slightly faster than using ones_like and multiplying
x = np.empty_like(x, subok=False)
x.fill(amplitude.item())
else:
# This case is less likely but could occur if the amplitude
# parameter is given an array-like value
x = amplitude * np.ones_like(x, subok=False)
if isinstance(amplitude, Quantity):
return Quantity(x, unit=amplitude.unit, copy=False)
else:
return x
@property
def input_units(self):
return None
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('amplitude', outputs_unit['z'])])
class Ellipse2D(Fittable2DModel):
"""
A 2D Ellipse model.
Parameters
----------
amplitude : float
Value of the ellipse.
x_0 : float
x position of the center of the disk.
y_0 : float
y position of the center of the disk.
a : float
The length of the semimajor axis.
b : float
The length of the semiminor axis.
theta : float
The rotation angle in radians of the semimajor axis. The
rotation angle increases counterclockwise from the positive x
axis.
See Also
--------
Disk2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
\\mathrm{amplitude} & : \\left[\\frac{(x - x_0) \\cos
\\theta + (y - y_0) \\sin \\theta}{a}\\right]^2 +
\\left[\\frac{-(x - x_0) \\sin \\theta + (y - y_0)
\\cos \\theta}{b}\\right]^2 \\leq 1 \\\\
0 & : \\mathrm{otherwise}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Ellipse2D
from astropy.coordinates import Angle
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x0, y0 = 25, 25
a, b = 20, 10
theta = Angle(30, 'deg')
e = Ellipse2D(amplitude=100., x_0=x0, y_0=y0, a=a, b=b,
theta=theta.radian)
y, x = np.mgrid[0:50, 0:50]
fig, ax = plt.subplots(1, 1)
ax.imshow(e(x, y), origin='lower', interpolation='none', cmap='Greys_r')
e2 = mpatches.Ellipse((x0, y0), 2*a, 2*b, theta.degree, edgecolor='red',
facecolor='none')
ax.add_patch(e2)
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
a = Parameter(default=1)
b = Parameter(default=1)
theta = Parameter(default=0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, a, b, theta):
"""Two dimensional Ellipse model function."""
xx = x - x_0
yy = y - y_0
cost = np.cos(theta)
sint = np.sin(theta)
numerator1 = (xx * cost) + (yy * sint)
numerator2 = -(xx * sint) + (yy * cost)
in_ellipse = (((numerator1 / a) ** 2 + (numerator2 / b) ** 2) <= 1.)
result = np.select([in_ellipse], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
a = self.a
b = self.b
theta = self.theta.value
dx, dy = ellipse_extent(a, b, theta)
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('a', inputs_unit['x']),
('b', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
class Disk2D(Fittable2DModel):
"""
Two dimensional radial symmetric Disk model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
R_0 : float
Radius of the disk
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r \\leq R_0 \\\\
0 & : r > R_0
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0):
"""Two dimensional Disk model function"""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
result = np.select([rr <= R_0 ** 2], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``((y_low, y_high), (x_low, x_high))``
"""
return ((self.y_0 - self.R_0, self.y_0 + self.R_0),
(self.x_0 - self.R_0, self.x_0 + self.R_0))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Ring2D(Fittable2DModel):
"""
Two dimensional radial symmetric Ring model.
Parameters
----------
amplitude : float
Value of the disk function
x_0 : float
x position center of the disk
y_0 : float
y position center of the disk
r_in : float
Inner radius of the ring
width : float
Width of the ring.
r_out : float
Outer Radius of the ring. Can be specified instead of width.
See Also
--------
Disk2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(r) = \\left \\{
\\begin{array}{ll}
A & : r_{in} \\leq r \\leq r_{out} \\\\
0 & : \\text{else}
\\end{array}
\\right.
Where :math:`r_{out} = r_{in} + r_{width}`.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
r_in = Parameter(default=1)
width = Parameter(default=1)
def __init__(self, amplitude=amplitude.default, x_0=x_0.default,
y_0=y_0.default, r_in=r_in.default, width=width.default,
r_out=None, **kwargs):
# If outer radius explicitly given, it overrides default width.
if r_out is not None:
if width != self.width.default:
raise InputParameterError(
"Cannot specify both width and outer radius separately.")
width = r_out - r_in
elif width is None:
width = self.width.default
super().__init__(
amplitude=amplitude, x_0=x_0, y_0=y_0, r_in=r_in, width=width,
**kwargs)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, r_in, width):
"""Two dimensional Ring model function."""
rr = (x - x_0) ** 2 + (y - y_0) ** 2
r_range = np.logical_and(rr >= r_in ** 2, rr <= (r_in + width) ** 2)
result = np.select([r_range], [amplitude])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.r_in + self.width
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_in', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Delta1D(Fittable1DModel):
"""One dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Delta2D(Fittable2DModel):
"""Two dimensional Dirac delta function."""
def __init__(self):
raise ModelDefinitionError("Not implemented")
class Box1D(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function"""
inside = np.logical_and(x >= x_0 - width / 2., x <= x_0 + width / 2.)
result = np.select([inside], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Box2D(Fittable2DModel):
"""
Two dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
x position of the center of the box function
x_width : float
Width in x direction of the box
y_0 : float
y position of the center of the box function
y_width : float
Width in y direction of the box
See Also
--------
Box1D, Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
f(x, y) = \\left \\{
\\begin{array}{ll}
A : & x_0 - w_x/2 \\leq x \\leq x_0 + w_x/2 \\text{ and} \\\\
& y_0 - w_y/2 \\leq y \\leq y_0 + w_y/2 \\\\
0 : & \\text{else}
\\end{array}
\\right.
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
x_width = Parameter(default=1)
y_width = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, x_width, y_width):
"""Two dimensional Box model function"""
x_range = np.logical_and(x >= x_0 - x_width / 2.,
x <= x_0 + x_width / 2.)
y_range = np.logical_and(y >= y_0 - y_width / 2.,
y <= y_0 + y_width / 2.)
result = np.select([np.logical_and(x_range, y_range)], [amplitude], 0)
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dx = self.x_width / 2
dy = self.y_width / 2
return ((self.y_0 - dy, self.y_0 + dy),
(self.x_0 - dx, self.x_0 + dx))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['y']),
('x_width', inputs_unit['x']),
('y_width', inputs_unit['y']),
('amplitude', outputs_unit['z'])])
class Trapezoid1D(Fittable1DModel):
"""
One dimensional Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
Center position of the trapezoid
width : float
Width of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid
See Also
--------
Box1D, Gaussian1D, Moffat1D
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Trapezoid1D
plt.figure()
s1 = Trapezoid1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
width = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, width, slope):
"""One dimensional Trapezoid model function"""
# Compute the four points where the trapezoid changes slope
# x1 <= x2 <= x3 <= x4
x2 = x_0 - width / 2.
x3 = x_0 + width / 2.
x1 = x2 - amplitude / slope
x4 = x3 + amplitude / slope
# Compute model values in pieces between the change points
range_a = np.logical_and(x >= x1, x < x2)
range_b = np.logical_and(x >= x2, x < x3)
range_c = np.logical_and(x >= x3, x < x4)
val_a = slope * (x - x1)
val_b = amplitude
val_c = slope * (x4 - x)
result = np.select([range_a, range_b, range_c], [val_a, val_b, val_c])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2 + self.amplitude / self.slope
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('width', inputs_unit['x']),
('slope', outputs_unit['y'] / inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class TrapezoidDisk2D(Fittable2DModel):
"""
Two dimensional circular Trapezoid model.
Parameters
----------
amplitude : float
Amplitude of the trapezoid
x_0 : float
x position of the center of the trapezoid
y_0 : float
y position of the center of the trapezoid
R_0 : float
Radius of the constant part of the trapezoid.
slope : float
Slope of the tails of the trapezoid in x direction.
See Also
--------
Disk2D, Box2D
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
R_0 = Parameter(default=1)
slope = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, R_0, slope):
"""Two dimensional Trapezoid Disk model function"""
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2)
range_1 = r <= R_0
range_2 = np.logical_and(r > R_0, r <= R_0 + amplitude / slope)
val_1 = amplitude
val_2 = amplitude + slope * (R_0 - r)
result = np.select([range_1, range_2], [val_1, val_2])
if isinstance(amplitude, Quantity):
return Quantity(result, unit=amplitude.unit, copy=False)
else:
return result
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box``.
``((y_low, y_high), (x_low, x_high))``
"""
dr = self.R_0 + self.amplitude / self.slope
return ((self.y_0 - dr, self.y_0 + dr),
(self.x_0 - dr, self.x_0 + dr))
@property
def input_units(self):
if self.x_0.unit is None and self.y_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('R_0', inputs_unit['x']),
('slope', outputs_unit['z'] / inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class MexicanHat1D(Fittable1DModel):
"""
One dimensional Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
Position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat2D, Box1D, Gaussian1D, Trapezoid1D
Notes
-----
Model formula:
.. math::
f(x) = {A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{- \\frac{\\left(x - x_{0}\\right)^{2}}{2 \\sigma^{2}}}}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import MexicanHat1D
plt.figure()
s1 = MexicanHat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -2, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, amplitude, x_0, sigma):
"""One dimensional Mexican Hat model function"""
xx_ww = (x - x_0) ** 2 / (2 * sigma ** 2)
return amplitude * (1 - 2 * xx_ww) * np.exp(-xx_ww)
def bounding_box(self, factor=10.0):
"""Tuple defining the default ``bounding_box`` limits,
``(x_low, x_high)``.
Parameters
----------
factor : float
The multiple of sigma used to define the limits.
"""
x0 = self.x_0
dx = factor * self.sigma
return (x0 - dx, x0 + dx)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class MexicanHat2D(Fittable2DModel):
"""
Two dimensional symmetric Mexican Hat model.
Parameters
----------
amplitude : float
Amplitude
x_0 : float
x position of the peak
y_0 : float
y position of the peak
sigma : float
Width of the Mexican hat
See Also
--------
MexicanHat1D, Gaussian2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 - \\frac{\\left(x - x_{0}\\right)^{2}
+ \\left(y - y_{0}\\right)^{2}}{\\sigma^{2}}\\right)
e^{\\frac{- \\left(x - x_{0}\\right)^{2}
- \\left(y - y_{0}\\right)^{2}}{2 \\sigma^{2}}}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
sigma = Parameter(default=1)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, sigma):
"""Two dimensional Mexican Hat model function"""
rr_ww = ((x - x_0) ** 2 + (y - y_0) ** 2) / (2 * sigma ** 2)
return amplitude * (1 - rr_ww) * np.exp(- rr_ww)
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('sigma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class AiryDisk2D(Fittable2DModel):
"""
Two dimensional Airy disk model.
Parameters
----------
amplitude : float
Amplitude of the Airy function.
x_0 : float
x position of the maximum of the Airy function.
y_0 : float
y position of the maximum of the Airy function.
radius : float
The radius of the Airy disk (radius of the first zero).
See Also
--------
Box2D, TrapezoidDisk2D, Gaussian2D
Notes
-----
Model formula:
.. math:: f(r) = A \\left[\\frac{2 J_1(\\frac{\\pi r}{R/R_z})}{\\frac{\\pi r}{R/R_z}}\\right]^2
Where :math:`J_1` is the first order Bessel function of the first
kind, :math:`r` is radial distance from the maximum of the Airy
function (:math:`r = \\sqrt{(x - x_0)^2 + (y - y_0)^2}`), :math:`R`
is the input ``radius`` parameter, and :math:`R_z =
1.2196698912665045`).
For an optical system, the radius of the first zero represents the
limiting angular resolution and is approximately 1.22 * lambda / D,
where lambda is the wavelength of the light and D is the diameter of
the aperture.
See [1]_ for more details about the Airy disk.
References
----------
.. [1] https://en.wikipedia.org/wiki/Airy_disk
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
radius = Parameter(default=1)
_rz = None
_j1 = None
@classmethod
def evaluate(cls, x, y, amplitude, x_0, y_0, radius):
"""Two dimensional Airy model function"""
if cls._rz is None:
try:
from scipy.special import j1, jn_zeros
cls._rz = jn_zeros(1, 1)[0] / np.pi
cls._j1 = j1
except ValueError:
raise ImportError('AiryDisk2D model requires scipy > 0.11.')
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / cls._rz)
if isinstance(r, Quantity):
# scipy function cannot handle Quantity, so turn into array.
r = r.to_value(u.dimensionless_unscaled)
# Since r can be zero, we have to take care to treat that case
# separately so as not to raise a numpy warning
z = np.ones(r.shape)
rt = np.pi * r[r > 0]
z[r > 0] = (2.0 * cls._j1(rt) / rt) ** 2
if isinstance(amplitude, Quantity):
# make z quantity too, otherwise in-place multiplication fails.
z = Quantity(z, u.dimensionless_unscaled, copy=False)
z *= amplitude
return z
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('radius', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Moffat1D(Fittable1DModel):
"""
One dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian1D, Box1D
Notes
-----
Model formula:
.. math::
f(x) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Moffat1D
plt.figure()
s1 = Moffat1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model function"""
return amplitude * (1 + ((x - x_0) / gamma) ** 2) ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, gamma, alpha):
"""One dimensional Moffat model derivative with respect to parameters"""
fac = (1 + (x - x_0) ** 2 / gamma ** 2)
d_A = fac ** (-alpha)
d_x_0 = (2 * amplitude * alpha * (x - x_0) * d_A / (fac * gamma ** 2))
d_gamma = (2 * amplitude * alpha * (x - x_0) ** 2 * d_A /
(fac * gamma ** 3))
d_alpha = -amplitude * d_A * np.log(fac)
return [d_A, d_x_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return OrderedDict([('x_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['y'])])
class Moffat2D(Fittable2DModel):
"""
Two dimensional Moffat model.
Parameters
----------
amplitude : float
Amplitude of the model.
x_0 : float
x position of the maximum of the Moffat model.
y_0 : float
y position of the maximum of the Moffat model.
gamma : float
Core width of the Moffat model.
alpha : float
Power index of the Moffat model.
See Also
--------
Gaussian2D, Box2D
Notes
-----
Model formula:
.. math::
f(x, y) = A \\left(1 + \\frac{\\left(x - x_{0}\\right)^{2} +
\\left(y - y_{0}\\right)^{2}}{\\gamma^{2}}\\right)^{- \\alpha}
"""
amplitude = Parameter(default=1)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
gamma = Parameter(default=1)
alpha = Parameter(default=1)
@property
def fwhm(self):
"""
Moffat full width at half maximum.
Derivation of the formula is available in
`this notebook by Yoonsoo Bach <http://nbviewer.jupyter.org/github/ysbach/AO_2017/blob/master/04_Ground_Based_Concept.ipynb#1.2.-Moffat>`_.
"""
return 2.0 * np.abs(self.gamma) * np.sqrt(2.0 ** (1.0 / self.alpha) - 1.0)
@staticmethod
def evaluate(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model function"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
return amplitude * (1 + rr_gg) ** (-alpha)
@staticmethod
def fit_deriv(x, y, amplitude, x_0, y_0, gamma, alpha):
"""Two dimensional Moffat model derivative with respect to parameters"""
rr_gg = ((x - x_0) ** 2 + (y - y_0) ** 2) / gamma ** 2
d_A = (1 + rr_gg) ** (-alpha)
d_x_0 = (2 * amplitude * alpha * d_A * (x - x_0) /
(gamma ** 2 * (1 + rr_gg)))
d_y_0 = (2 * amplitude * alpha * d_A * (y - y_0) /
(gamma ** 2 * (1 + rr_gg)))
d_alpha = -amplitude * d_A * np.log(1 + rr_gg)
d_gamma = (2 * amplitude * alpha * d_A * rr_gg /
(gamma ** 3 * (1 + rr_gg)))
return [d_A, d_x_0, d_y_0, d_gamma, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('gamma', inputs_unit['x']),
('amplitude', outputs_unit['z'])])
class Sersic2D(Fittable2DModel):
r"""
Two dimensional Sersic surface brightness profile.
Parameters
----------
amplitude : float
Surface brightness at r_eff.
r_eff : float
Effective (half-light) radius
n : float
Sersic Index.
x_0 : float, optional
x position of the center.
y_0 : float, optional
y position of the center.
ellip : float, optional
Ellipticity.
theta : float, optional
Rotation angle in radians, counterclockwise from
the positive x-axis.
See Also
--------
Gaussian2D, Moffat2D
Notes
-----
Model formula:
.. math::
I(x,y) = I(r) = I_e\exp\left\{-b_n\left[\left(\frac{r}{r_{e}}\right)^{(1/n)}-1\right]\right\}
The constant :math:`b_n` is defined such that :math:`r_e` contains half the total
luminosity, and can be solved for numerically.
.. math::
\Gamma(2n) = 2\gamma (b_n,2n)
Examples
--------
.. plot::
:include-source:
import numpy as np
from astropy.modeling.models import Sersic2D
import matplotlib.pyplot as plt
x,y = np.meshgrid(np.arange(100), np.arange(100))
mod = Sersic2D(amplitude = 1, r_eff = 25, n=4, x_0=50, y_0=50,
ellip=.5, theta=-1)
img = mod(x, y)
log_img = np.log10(img)
plt.figure()
plt.imshow(log_img, origin='lower', interpolation='nearest',
vmin=-1, vmax=2)
plt.xlabel('x')
plt.ylabel('y')
cbar = plt.colorbar()
cbar.set_label('Log Brightness', rotation=270, labelpad=25)
cbar.set_ticks([-1, 0, 1, 2], update_ticks=True)
plt.show()
References
----------
.. [1] http://ned.ipac.caltech.edu/level5/March05/Graham/Graham2.html
"""
amplitude = Parameter(default=1)
r_eff = Parameter(default=1)
n = Parameter(default=4)
x_0 = Parameter(default=0)
y_0 = Parameter(default=0)
ellip = Parameter(default=0)
theta = Parameter(default=0)
_gammaincinv = None
@classmethod
def evaluate(cls, x, y, amplitude, r_eff, n, x_0, y_0, ellip, theta):
"""Two dimensional Sersic profile function."""
if cls._gammaincinv is None:
try:
from scipy.special import gammaincinv
cls._gammaincinv = gammaincinv
except ValueError:
raise ImportError('Sersic2D model requires scipy > 0.11.')
bn = cls._gammaincinv(2. * n, 0.5)
a, b = r_eff, (1 - ellip) * r_eff
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
x_maj = (x - x_0) * cos_theta + (y - y_0) * sin_theta
x_min = -(x - x_0) * sin_theta + (y - y_0) * cos_theta
z = np.sqrt((x_maj / a) ** 2 + (x_min / b) ** 2)
return amplitude * np.exp(-bn * (z ** (1 / n) - 1))
@property
def input_units(self):
if self.x_0.unit is None:
return None
else:
return {'x': self.x_0.unit,
'y': self.y_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
# Note that here we need to make sure that x and y are in the same
# units otherwise this can lead to issues since rotation is not well
# defined.
if inputs_unit['x'] != inputs_unit['y']:
raise UnitsError("Units of 'x' and 'y' inputs should match")
return OrderedDict([('x_0', inputs_unit['x']),
('y_0', inputs_unit['x']),
('r_eff', inputs_unit['x']),
('theta', u.rad),
('amplitude', outputs_unit['z'])])
|
599a6692e65a1b1e9f3498725f03be219cec2b2dbf53972d2be2f0950e8a5dde | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""General purpose timer related functions."""
# STDLIB
import time
import warnings
from collections import OrderedDict
from collections.abc import Iterable
from functools import partial, wraps
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import units as u
from astropy import log
from astropy import modeling
from .exceptions import AstropyUserWarning
__all__ = ['timefunc', 'RunTimePredictor']
__doctest_skip__ = ['timefunc']
def timefunc(num_tries=1, verbose=True):
"""Decorator to time a function or method.
Parameters
----------
num_tries : int, optional
Number of calls to make. Timer will take the
average run time.
verbose : bool, optional
Extra log information.
Returns
-------
tt : float
Average run time in seconds.
result
Output(s) from the function.
Examples
--------
To add timer to time `numpy.log` for 100 times with
verbose output::
import numpy as np
from astropy.utils.timer import timefunc
@timefunc(100)
def timed_log(x):
return np.log(x)
To run the decorated function above:
>>> t, y = timed_log(100)
INFO: timed_log took 9.29832458496e-06 s on AVERAGE for 100 call(s). [...]
>>> t
9.298324584960938e-06
>>> y
4.6051701859880918
"""
def real_decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
ts = time.time()
for i in range(num_tries):
result = function(*args, **kwargs)
te = time.time()
tt = (te - ts) / num_tries
if verbose: # pragma: no cover
log.info('{0} took {1} s on AVERAGE for {2} call(s).'.format(
function.__name__, tt, num_tries))
return tt, result
return wrapper
return real_decorator
class RunTimePredictor:
"""Class to predict run time.
.. note:: Only predict for single varying numeric input parameter.
Parameters
----------
func : function
Function to time.
args : tuple
Fixed positional argument(s) for the function.
kwargs : dict
Fixed keyword argument(s) for the function.
Examples
--------
>>> from astropy.utils.timer import RunTimePredictor
Set up a predictor for :math:`10^{x}`:
>>> p = RunTimePredictor(pow, 10)
Give it baseline data to use for prediction and
get the function output values:
>>> p.time_func(range(10, 1000, 200))
>>> for input, result in sorted(p.results.items()):
... print("pow(10, {0})\\n{1}".format(input, result))
pow(10, 10)
10000000000
pow(10, 210)
10000000000...
pow(10, 410)
10000000000...
pow(10, 610)
10000000000...
pow(10, 810)
10000000000...
Fit a straight line assuming :math:`\\text{arg}^{1}` relationship
(coefficients are returned):
>>> p.do_fit() # doctest: +SKIP
array([1.16777420e-05, 1.00135803e-08])
Predict run time for :math:`10^{5000}`:
>>> p.predict_time(5000) # doctest: +SKIP
6.174564361572262e-05
Plot the prediction:
>>> p.plot(xlabeltext='Power of 10') # doctest: +SKIP
.. image:: /_static/timer_prediction_pow10.png
:width: 450px
:alt: Example plot from `astropy.utils.timer.RunTimePredictor`
When the changing argument is not the last, e.g.,
:math:`x^{2}`, something like this might work:
>>> p = RunTimePredictor(lambda x: pow(x, 2))
>>> p.time_func([2, 3, 5])
>>> sorted(p.results.items())
[(2, 4), (3, 9), (5, 25)]
"""
def __init__(self, func, *args, **kwargs):
self._funcname = func.__name__
self._pfunc = partial(func, *args, **kwargs)
self._cache_good = OrderedDict()
self._cache_bad = []
self._cache_est = OrderedDict()
self._cache_out = OrderedDict()
self._fit_func = None
self._power = None
@property
def results(self):
"""Function outputs from `time_func`.
A dictionary mapping input arguments (fixed arguments
are not included) to their respective output values.
"""
return self._cache_out
@timefunc(num_tries=1, verbose=False)
def _timed_pfunc(self, arg):
"""Run partial func once for single arg and time it."""
return self._pfunc(arg)
def _cache_time(self, arg):
"""Cache timing results without repetition."""
if arg not in self._cache_good and arg not in self._cache_bad:
try:
result = self._timed_pfunc(arg)
except Exception as e:
warnings.warn(str(e), AstropyUserWarning)
self._cache_bad.append(arg)
else:
self._cache_good[arg] = result[0] # Run time
self._cache_out[arg] = result[1] # Function output
def time_func(self, arglist):
"""Time the partial function for a list of single args
and store run time in a cache. This forms a baseline for
the prediction.
This also stores function outputs in `results`.
Parameters
----------
arglist : list of numbers
List of input arguments to time.
"""
if not isinstance(arglist, Iterable):
arglist = [arglist]
# Preserve arglist order
for arg in arglist:
self._cache_time(arg)
# FUTURE: Implement N^x * O(log(N)) fancy fitting.
def do_fit(self, model=None, fitter=None, power=1, min_datapoints=3):
"""Fit a function to the lists of arguments and
their respective run time in the cache.
By default, this does a linear least-square fitting
to a straight line on run time w.r.t. argument values
raised to the given power, and returns the optimal
intercept and slope.
Parameters
----------
model : `astropy.modeling.Model`
Model for the expected trend of run time (Y-axis)
w.r.t. :math:`\\text{arg}^{\\text{power}}` (X-axis).
If `None`, will use `~astropy.modeling.polynomial.Polynomial1D`
with ``degree=1``.
fitter : `astropy.modeling.fitting.Fitter`
Fitter for the given model to extract optimal coefficient values.
If `None`, will use `~astropy.modeling.fitting.LinearLSQFitter`.
power : int, optional
Power of values to fit.
min_datapoints : int, optional
Minimum number of data points required for fitting.
They can be built up with `time_func`.
Returns
-------
a : array-like
Fitted `~astropy.modeling.FittableModel` parameters.
Raises
------
ValueError
Insufficient data points for fitting.
ModelsError
Invalid model or fitter.
"""
# Reset related attributes
self._power = power
self._cache_est = OrderedDict()
x_arr = np.array(list(self._cache_good.keys()))
if x_arr.size < min_datapoints:
raise ValueError('requires {0} points but has {1}'.format(
min_datapoints, x_arr.size))
if model is None:
model = modeling.models.Polynomial1D(1)
elif not isinstance(model, modeling.core.Model):
raise modeling.fitting.ModelsError(
'{0} is not a model.'.format(model))
if fitter is None:
fitter = modeling.fitting.LinearLSQFitter()
elif not isinstance(fitter, modeling.fitting.Fitter):
raise modeling.fitting.ModelsError(
'{0} is not a fitter.'.format(fitter))
self._fit_func = fitter(
model, x_arr**power, list(self._cache_good.values()))
return self._fit_func.parameters
def predict_time(self, arg):
"""Predict run time for given argument.
If prediction is already cached, cached value is returned.
Parameters
----------
arg : number
Input argument to predict run time for.
Returns
-------
t_est : float
Estimated run time for given argument.
Raises
------
RuntimeError
No fitted data for prediction.
"""
if arg in self._cache_est:
t_est = self._cache_est[arg]
else:
if self._fit_func is None:
raise RuntimeError('no fitted data for prediction')
t_est = self._fit_func(arg**self._power)
self._cache_est[arg] = t_est
return t_est
def plot(self, xscale='linear', yscale='linear', xlabeltext='args',
save_as=''): # pragma: no cover
"""Plot prediction.
.. note:: Uses `matplotlib <http://matplotlib.org/>`_.
Parameters
----------
xscale, yscale : {'linear', 'log', 'symlog'}
Scaling for `matplotlib.axes.Axes`.
xlabeltext : str, optional
Text for X-label.
save_as : str, optional
Save plot as given filename.
Raises
------
RuntimeError
Insufficient data for plotting.
"""
import matplotlib.pyplot as plt
# Actual data
x_arr = sorted(self._cache_good)
y_arr = np.array([self._cache_good[x] for x in x_arr])
if len(x_arr) <= 1:
raise RuntimeError('insufficient data for plotting')
# Auto-ranging
qmean = y_arr.mean() * u.second
for cur_u in (u.minute, u.second, u.millisecond, u.microsecond,
u.nanosecond):
val = qmean.to_value(cur_u)
if 1000 > val >= 1:
break
y_arr = (y_arr * u.second).to_value(cur_u)
fig, ax = plt.subplots()
ax.plot(x_arr, y_arr, 'kx-', label='Actual')
# Fitted data
if self._fit_func is not None:
x_est = list(self._cache_est.keys())
y_est = (np.array(list(self._cache_est.values())) *
u.second).to_value(cur_u)
ax.scatter(x_est, y_est, marker='o', c='r', label='Predicted')
x_fit = np.array(sorted(x_arr + x_est))
y_fit = (self._fit_func(x_fit**self._power) *
u.second).to_value(cur_u)
ax.plot(x_fit, y_fit, 'b--', label='Fit')
ax.set_xscale(xscale)
ax.set_yscale(yscale)
ax.set_xlabel(xlabeltext)
ax.set_ylabel('Run time ({})'.format(cur_u.to_string()))
ax.set_title(self._funcname)
ax.legend(loc='best', numpoints=1)
plt.draw()
if save_as:
plt.savefig(save_as)
|
e156ade79f5375020c6e0caac93b968b8f167f9ea4c9bec7b19aa390e727579d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
__doctest_skip__ = ['quantity_support']
def quantity_support(format='latex_inline'):
"""
Enable support for plotting `astropy.units.Quantity` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.quantity_support():
... plt.figure()
... plt.plot([1, 2, 3] * u.m)
[...]
... plt.plot([101, 125, 150] * u.cm)
[...]
... plt.draw()
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to ``latex_inline``.
"""
from astropy import units as u
# import Angle just so we have a more or less complete list of Quantity
# subclasses loaded - matplotlib needs them all separately!
# NOTE: in matplotlib >=3.2, subclasses will be recognized automatically,
# and once that becomes our minimum version, we can remove this,
# adding just u.Quantity itself to the registry.
from astropy.coordinates import Angle # noqa
from matplotlib import units
from matplotlib import ticker
# Get all subclass for Quantity, since matplotlib checks on class,
# not subclass.
def all_issubclass(cls):
return {cls}.union(
[s for c in cls.__subclasses__() for s in all_issubclass(c)])
def rad_fn(x, pos=None):
n = int((x / np.pi) * 2.0 + 0.25)
if n == 0:
return '0'
elif n == 1:
return 'π/2'
elif n == 2:
return 'π'
elif n % 2 == 0:
return '{0}π'.format(n / 2)
else:
return '{0}π/2'.format(n)
class MplQuantityConverter(units.ConversionInterface):
_all_issubclass_quantity = all_issubclass(u.Quantity)
def __init__(self):
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = {}
for cls in self._all_issubclass_quantity:
self._original_converter[cls] = units.registry.get(cls)
units.registry[cls] = self
@staticmethod
def axisinfo(unit, axis):
if unit == u.radian:
return units.AxisInfo(
majloc=ticker.MultipleLocator(base=np.pi/2),
majfmt=ticker.FuncFormatter(rad_fn),
label=unit.to_string(),
)
elif unit == u.degree:
return units.AxisInfo(
majloc=ticker.AutoLocator(),
majfmt=ticker.FormatStrFormatter('%i°'),
label=unit.to_string(),
)
elif unit is not None:
return units.AxisInfo(label=unit.to_string(format))
return None
@staticmethod
def convert(val, unit, axis):
if isinstance(val, u.Quantity):
return val.to_value(unit)
elif isinstance(val, list) and isinstance(val[0], u.Quantity):
return [v.to_value(unit) for v in val]
else:
return val
@staticmethod
def default_units(x, axis):
if hasattr(x, 'unit'):
return x.unit
return None
def __enter__(self):
return self
def __exit__(self, type, value, tb):
for cls in self._all_issubclass_quantity:
if self._original_converter[cls] is None:
del units.registry[cls]
else:
units.registry[cls] = self._original_converter[cls]
return MplQuantityConverter()
|
83bf66bfd0b512c6f4189c0d872f66483f14db2a029b91a515d9e0c646a11489 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from datetime import datetime
from astropy.time import Time
from astropy import units as u
__all__ = ['time_support']
__doctest_requires__ = {'time_support': ['matplotlib']}
UNSUPPORTED_FORMATS = ('datetime', 'datetime64')
YMDHMS_FORMATS = ('fits', 'iso', 'isot', 'yday')
STR_FORMATS = YMDHMS_FORMATS + ('byear_str', 'jyear_str')
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs['nbins'] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (self._converter.format != 'yday' and vrange > 31) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(vmin, scale=self._converter.scale, format='mjd').datetime
tmax = Time(vmax, scale=self._converter.scale, format='mjd').datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(datetime(year=ymin + month // 12,
month=month % 12, day=1))
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def __call__(self, value, pos=None):
# Needed for Matplotlib <3.1
if self._converter.format in STR_FORMATS:
return self.format_ticks([value])[0]
else:
return super().__call__(value, pos=pos)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format='mjd', scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ('fits', 'iso', 'isot'):
if all([x.endswith('00:00:00.000') for x in formatted]):
split = ' ' if self._converter.format == 'iso' else 'T'
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == 'yday':
if all([x.endswith(':001:00:00:00.000') for x in formatted]):
formatted = [x.split(':', 1)[0] for x in formatted]
return formatted
elif self._converter.format == 'byear_str':
return Time(values, format='byear', scale=self._converter.scale).byear_str
elif self._converter.format == 'jyear_str':
return Time(values, format='jyear', scale=self._converter.scale).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError('time_support does not support format={0}'.format(value))
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return 'astropy_time'
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
# For Matplotlib < 2.2
if not isinstance(value, Time):
return value
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == 'byear_str':
return scaled.byear
elif self.format == 'jyear_str':
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(majfmt=majfmt,
majloc=majloc,
label='Time ({0})'.format(self.scale))
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
|
342a79400cbce337515bd2f96e8eed484d519b9995be99b4d0e32c012b2b0937 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Under the hood, there are 3 separate classes that perform different
parts of the transformation:
- `~astropy.wcs.Wcsprm`: Is a direct wrapper of the core WCS
functionality in `wcslib`_. (This includes TPV and TPD
polynomial distortion, but not SIP distortion).
- `~astropy.wcs.Sip`: Handles polynomial distortion as defined in the
`SIP`_ convention.
- `~astropy.wcs.DistortionLookupTable`: Handles `distortion paper`_
lookup tables.
Additionally, the class `WCS` aggregates all of these transformations
together in a pipeline:
- Detector to image plane correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `SIP`_ distortion correction (by an underlying `~astropy.wcs.Sip`
object)
- `distortion paper`_ table-lookup correction (by a pair of
`~astropy.wcs.DistortionLookupTable` objects).
- `wcslib`_ WCS transformation (by a `~astropy.wcs.Wcsprm` object)
"""
# STDLIB
import copy
import io
import itertools
import os
import re
import textwrap
import warnings
import builtins
# THIRD-PARTY
import numpy as np
# LOCAL
from astropy import log
from astropy.io import fits
from . import docstrings
from . import _wcs
from astropy.utils.compat import possible_filename
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning, AstropyDeprecationWarning
# Mix-in class that provides the APE 14 API
from .wcsapi.fitswcs import FITSWCSAPIMixin, SlicedFITSWCS
__all__ = ['FITSFixedWarning', 'WCS', 'find_all_wcs',
'DistortionLookupTable', 'Sip', 'Tabprm', 'Wcsprm',
'WCSBase', 'validate', 'WcsError', 'SingularMatrixError',
'InconsistentAxisTypesError', 'InvalidTransformError',
'InvalidCoordinateError', 'NoSolutionError',
'InvalidSubimageSpecificationError', 'NoConvergence',
'NonseparableSubimageCoordinateSystemError',
'NoWcsKeywordsFoundError', 'InvalidTabularParametersError']
__doctest_skip__ = ['WCS.all_world2pix']
NAXIS_DEPRECATE_MESSAGE = """
Private attributes "_naxis1" and "_naxis2" have been deprecated since v3.1.
Instead use the "pixel_shape" property which returns a list of NAXISj keyword values.
"""
if _wcs is not None:
_parsed_version = _wcs.__version__.split('.')
if int(_parsed_version[0]) == 5 and int(_parsed_version[1]) < 8:
raise ImportError(
"astropy.wcs is built with wcslib {0}, but only versions 5.8 and "
"later on the 5.x series are known to work. The version of wcslib "
"that ships with astropy may be used.")
if not _wcs._sanity_check():
raise RuntimeError(
"astropy.wcs did not pass its sanity check for your build "
"on your platform.")
WCSBase = _wcs._Wcs
DistortionLookupTable = _wcs.DistortionLookupTable
Sip = _wcs.Sip
Wcsprm = _wcs.Wcsprm
Tabprm = _wcs.Tabprm
WcsError = _wcs.WcsError
SingularMatrixError = _wcs.SingularMatrixError
InconsistentAxisTypesError = _wcs.InconsistentAxisTypesError
InvalidTransformError = _wcs.InvalidTransformError
InvalidCoordinateError = _wcs.InvalidCoordinateError
NoSolutionError = _wcs.NoSolutionError
InvalidSubimageSpecificationError = _wcs.InvalidSubimageSpecificationError
NonseparableSubimageCoordinateSystemError = _wcs.NonseparableSubimageCoordinateSystemError
NoWcsKeywordsFoundError = _wcs.NoWcsKeywordsFoundError
InvalidTabularParametersError = _wcs.InvalidTabularParametersError
# Copy all the constants from the C extension into this module's namespace
for key, val in _wcs.__dict__.items():
if key.startswith(('WCSSUB', 'WCSHDR', 'WCSHDO')):
locals()[key] = val
__all__.append(key)
else:
WCSBase = object
Wcsprm = object
DistortionLookupTable = object
Sip = object
Tabprm = object
WcsError = None
SingularMatrixError = None
InconsistentAxisTypesError = None
InvalidTransformError = None
InvalidCoordinateError = None
NoSolutionError = None
InvalidSubimageSpecificationError = None
NonseparableSubimageCoordinateSystemError = None
NoWcsKeywordsFoundError = None
InvalidTabularParametersError = None
# Additional relax bit flags
WCSHDO_SIP = 0x80000
# Regular expression defining SIP keyword It matches keyword that starts with A
# or B, optionally followed by P, followed by an underscore then a number in
# range of 0-19, followed by an underscore and another number in range of 0-19.
# Keyword optionally ends with a capital letter.
SIP_KW = re.compile('''^[AB]P?_1?[0-9]_1?[0-9][A-Z]?$''')
def _parse_keysel(keysel):
keysel_flags = 0
if keysel is not None:
for element in keysel:
if element.lower() == 'image':
keysel_flags |= _wcs.WCSHDR_IMGHEAD
elif element.lower() == 'binary':
keysel_flags |= _wcs.WCSHDR_BIMGARR
elif element.lower() == 'pixel':
keysel_flags |= _wcs.WCSHDR_PIXLIST
else:
raise ValueError(
"keysel must be a list of 'image', 'binary' " +
"and/or 'pixel'")
else:
keysel_flags = -1
return keysel_flags
class NoConvergence(Exception):
"""
An error class used to report non-convergence and/or divergence
of numerical methods. It is used to report errors in the
iterative solution used by
the :py:meth:`~astropy.wcs.WCS.all_world2pix`.
Attributes
----------
best_solution : `numpy.ndarray`
Best solution achieved by the numerical method.
accuracy : `numpy.ndarray`
Accuracy of the ``best_solution``.
niter : `int`
Number of iterations performed by the numerical method
to compute ``best_solution``.
divergent : None, `numpy.ndarray`
Indices of the points in ``best_solution`` array
for which the solution appears to be divergent. If the
solution does not diverge, ``divergent`` will be set to `None`.
slow_conv : None, `numpy.ndarray`
Indices of the solutions in ``best_solution`` array
for which the solution failed to converge within the
specified maximum number of iterations. If there are no
non-converging solutions (i.e., if the required accuracy
has been achieved for all input data points)
then ``slow_conv`` will be set to `None`.
"""
def __init__(self, *args, best_solution=None, accuracy=None, niter=None,
divergent=None, slow_conv=None, **kwargs):
super().__init__(*args)
self.best_solution = best_solution
self.accuracy = accuracy
self.niter = niter
self.divergent = divergent
self.slow_conv = slow_conv
if kwargs:
warnings.warn("Function received unexpected arguments ({}) these "
"are ignored but will raise an Exception in the "
"future.".format(list(kwargs)),
AstropyDeprecationWarning)
class FITSFixedWarning(AstropyWarning):
"""
The warning raised when the contents of the FITS header have been
modified to be standards compliant.
"""
pass
class WCS(FITSWCSAPIMixin, WCSBase):
"""WCS objects perform standard WCS transformations, and correct for
`SIP`_ and `distortion paper`_ table-lookup transformations, based
on the WCS keywords and supplementary data read from a FITS file.
See also: http://docs.astropy.org/en/stable/wcs/
Parameters
----------
header : astropy.io.fits header object, Primary HDU, Image HDU, string, dict-like, or None, optional
If *header* is not provided or None, the object will be
initialized to default values.
fobj : An astropy.io.fits file (hdulist) object, optional
It is needed when header keywords point to a `distortion
paper`_ lookup table stored in a different extension.
key : str, optional
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the
``\"a\"`` part of the ``CTYPEia`` cards. *key* may only be
provided if *header* is also provided.
minerr : float, optional
The minimum value a distortion correction must have in order
to be applied. If the value of ``CQERRja`` is smaller than
*minerr*, the corresponding distortion is not applied.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions
of the WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
naxis : int or sequence, optional
Extracts specific coordinate axes using
:meth:`~astropy.wcs.Wcsprm.sub`. If a header is provided, and
*naxis* is not ``None``, *naxis* will be passed to
:meth:`~astropy.wcs.Wcsprm.sub` in order to select specific
axes from the header. See :meth:`~astropy.wcs.Wcsprm.sub` for
more details about this parameter.
keysel : sequence of flags, optional
A sequence of flags used to select the keyword types
considered by wcslib. When ``None``, only the standard image
header keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following
strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
colsel : sequence of int, optional
A sequence of table column numbers used to restrict the WCS
transformations considered to only those pertaining to the
specified columns. If `None`, there is no restriction.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting object to fix any non-standard uses in the
header. `FITSFixedWarning` Warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid key.
KeyError
Key not found in FITS header.
ValueError
Lookup table distortion present in the header but *fobj* was
not provided.
Notes
-----
1. astropy.wcs supports arbitrary *n* dimensions for the core WCS
(the transformations handled by WCSLIB). However, the
`distortion paper`_ lookup table and `SIP`_ distortions must be
two dimensional. Therefore, if you try to create a WCS object
where the core WCS has a different number of dimensions than 2
and that object also contains a `distortion paper`_ lookup
table or `SIP`_ distortion, a `ValueError`
exception will be raised. To avoid this, consider using the
*naxis* kwarg to select two dimensions from the core WCS.
2. The number of coordinate axes in the transformation is not
determined directly from the ``NAXIS`` keyword but instead from
the highest of:
- ``NAXIS`` keyword
- ``WCSAXESa`` keyword
- The highest axis number in any parameterized WCS keyword.
The keyvalue, as well as the keyword, must be
syntactically valid otherwise it will not be considered.
If none of these keyword types is present, i.e. if the header
only contains auxiliary WCS keywords for a particular
coordinate representation, then no coordinate description is
constructed for it.
The number of axes, which is set as the ``naxis`` member, may
differ for different coordinate representations of the same
image.
3. When the header includes duplicate keywords, in most cases the
last encountered is used.
4. `~astropy.wcs.Wcsprm.set` is called immediately after
construction, so any invalid keywords or transformations will
be raised by the constructor, not when subsequently calling a
transformation method.
"""
def __init__(self, header=None, fobj=None, key=' ', minerr=0.0,
relax=True, naxis=None, keysel=None, colsel=None,
fix=True, translate_units='', _do_set=True):
close_fds = []
if header is None:
if naxis is None:
naxis = 2
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, naxis=naxis)
self.naxis = wcsprm.naxis
# Set some reasonable defaults.
det2im = (None, None)
cpdis = (None, None)
sip = None
else:
keysel_flags = _parse_keysel(keysel)
if isinstance(header, (str, bytes)):
try:
is_path = (possible_filename(header) and
os.path.exists(header))
except (OSError, ValueError):
is_path = False
if is_path:
if fobj is not None:
raise ValueError(
"Can not provide both a FITS filename to "
"argument 1 and a FITS file object to argument 2")
fobj = fits.open(header)
close_fds.append(fobj)
header = fobj[0].header
elif isinstance(header, fits.hdu.image._ImageBaseHDU):
header = header.header
elif not isinstance(header, fits.Header):
try:
# Accept any dict-like object
orig_header = header
header = fits.Header()
for dict_key in orig_header.keys():
header[dict_key] = orig_header[dict_key]
except TypeError:
raise TypeError(
"header must be a string, an astropy.io.fits.Header "
"object, or a dict-like object")
if isinstance(header, fits.Header):
header_string = header.tostring().rstrip()
else:
header_string = header
# Importantly, header is a *copy* of the passed-in header
# because we will be modifying it
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
tmp_header = fits.Header.fromstring(header_string)
self._remove_sip_kw(tmp_header)
tmp_header_bytes = tmp_header.tostring().rstrip()
if isinstance(tmp_header_bytes, str):
tmp_header_bytes = tmp_header_bytes.encode('ascii')
tmp_wcsprm = _wcs.Wcsprm(header=tmp_header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel, warnings=False)
except _wcs.NoWcsKeywordsFoundError:
est_naxis = 0
else:
if naxis is not None:
try:
tmp_wcsprm.sub(naxis)
except ValueError:
pass
est_naxis = tmp_wcsprm.naxis
else:
est_naxis = 2
header = fits.Header.fromstring(header_string)
if est_naxis == 0:
est_naxis = 2
self.naxis = est_naxis
det2im = self._read_det2im_kw(header, fobj, err=minerr)
cpdis = self._read_distortion_kw(
header, fobj, dist='CPDIS', err=minerr)
sip = self._read_sip_kw(header, wcskey=key)
self._remove_sip_kw(header)
header_string = header.tostring()
header_string = header_string.replace('END' + ' ' * 77, '')
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
header_string = header_string
else:
header_bytes = header_string
header_string = header_string.decode('ascii')
try:
wcsprm = _wcs.Wcsprm(header=header_bytes, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
except _wcs.NoWcsKeywordsFoundError:
# The header may have SIP or distortions, but no core
# WCS. That isn't an error -- we want a "default"
# (identity) core Wcs transformation in that case.
if colsel is None:
wcsprm = _wcs.Wcsprm(header=None, key=key,
relax=relax, keysel=keysel_flags,
colsel=colsel)
else:
raise
if naxis is not None:
wcsprm = wcsprm.sub(naxis)
self.naxis = wcsprm.naxis
if (wcsprm.naxis != 2 and
(det2im[0] or det2im[1] or cpdis[0] or cpdis[1] or sip)):
raise ValueError(
"""
FITS WCS distortion paper lookup tables and SIP distortions only work
in 2 dimensions. However, WCSLIB has detected {0} dimensions in the
core WCS keywords. To use core WCS in conjunction with FITS WCS
distortion paper lookup tables or SIP distortion, you must select or
reduce these to 2 dimensions using the naxis kwarg.
""".format(wcsprm.naxis))
header_naxis = header.get('NAXIS', None)
if header_naxis is not None and header_naxis < wcsprm.naxis:
warnings.warn(
"The WCS transformation has more axes ({0:d}) than the "
"image it is associated with ({1:d})".format(
wcsprm.naxis, header_naxis), FITSFixedWarning)
self._get_naxis(header)
WCSBase.__init__(self, sip, cpdis, wcsprm, det2im)
if fix:
self.fix(translate_units=translate_units)
if _do_set:
self.wcs.set()
for fd in close_fds:
fd.close()
self._pixel_bounds = None
def __copy__(self):
new_copy = self.__class__()
WCSBase.__init__(new_copy, self.sip,
(self.cpdis1, self.cpdis2),
self.wcs,
(self.det2im1, self.det2im2))
new_copy.__dict__.update(self.__dict__)
return new_copy
def __deepcopy__(self, memo):
from copy import deepcopy
new_copy = self.__class__()
new_copy.naxis = deepcopy(self.naxis, memo)
WCSBase.__init__(new_copy, deepcopy(self.sip, memo),
(deepcopy(self.cpdis1, memo),
deepcopy(self.cpdis2, memo)),
deepcopy(self.wcs, memo),
(deepcopy(self.det2im1, memo),
deepcopy(self.det2im2, memo)))
for key, val in self.__dict__.items():
new_copy.__dict__[key] = deepcopy(val, memo)
return new_copy
def copy(self):
"""
Return a shallow copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
.. warning::
Use `deepcopy` instead of `copy` unless you know why you need a
shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""
Return a deep copy of the object.
Convenience method so user doesn't have to import the
:mod:`copy` stdlib module.
"""
return copy.deepcopy(self)
def sub(self, axes=None):
copy = self.deepcopy()
copy.wcs = self.wcs.sub(axes)
copy.naxis = copy.wcs.naxis
return copy
if _wcs is not None:
sub.__doc__ = _wcs.Wcsprm.sub.__doc__
def _fix_scamp(self):
"""
Remove SCAMP's PVi_m distortion parameters if SIP distortion parameters
are also present. Some projects (e.g., Palomar Transient Factory)
convert SCAMP's distortion parameters (which abuse the PVi_m cards) to
SIP. However, wcslib gets confused by the presence of both SCAMP and
SIP distortion parameters.
See https://github.com/astropy/astropy/issues/299.
"""
# Nothing to be done if no WCS attached
if self.wcs is None:
return
# Nothing to be done if no PV parameters attached
pv = self.wcs.get_pv()
if not pv:
return
# Nothing to be done if axes don't use SIP distortion parameters
if self.sip is None:
return
# Nothing to be done if any radial terms are present...
# Loop over list to find any radial terms.
# Certain values of the `j' index are used for storing
# radial terms; refer to Equation (1) in
# <http://web.ipac.caltech.edu/staff/shupe/reprints/SIP_to_PV_SPIE2012.pdf>.
pv = np.asarray(pv)
# Loop over distinct values of `i' index
for i in set(pv[:, 0]):
# Get all values of `j' index for this value of `i' index
js = set(pv[:, 1][pv[:, 0] == i])
# Find max value of `j' index
max_j = max(js)
for j in (3, 11, 23, 39):
if j < max_j and j in js:
return
self.wcs.set_pv([])
warnings.warn("Removed redundant SCAMP distortion parameters " +
"because SIP parameters are also present", FITSFixedWarning)
def fix(self, translate_units='', naxis=None):
"""
Perform the fix operations from wcslib, and warn about any
changes it has made.
Parameters
----------
translate_units : str, optional
Specify which potentially unsafe translations of
non-standard unit strings to perform. By default,
performs none.
Although ``"S"`` is commonly used to represent seconds,
its translation to ``"s"`` is potentially unsafe since the
standard recognizes ``"S"`` formally as Siemens, however
rarely that may be used. The same applies to ``"H"`` for
hours (Henry), and ``"D"`` for days (Debye).
This string controls what to do in such cases, and is
case-insensitive.
- If the string contains ``"s"``, translate ``"S"`` to
``"s"``.
- If the string contains ``"h"``, translate ``"H"`` to
``"h"``.
- If the string contains ``"d"``, translate ``"D"`` to
``"d"``.
Thus ``''`` doesn't do any unsafe translations, whereas
``'shd'`` does all of them.
naxis : int array[naxis], optional
Image axis lengths. If this array is set to zero or
``None``, then `~astropy.wcs.Wcsprm.cylfix` will not be
invoked.
"""
if self.wcs is not None:
self._fix_scamp()
fixes = self.wcs.fix(translate_units, naxis)
for key, val in fixes.items():
if val != "No change":
warnings.warn(
("'{0}' made the change '{1}'.").
format(key, val),
FITSFixedWarning)
def calc_footprint(self, header=None, undistort=True, axes=None, center=True):
"""
Calculates the footprint of the image on the sky.
A footprint is defined as the positions of the corners of the
image on the sky after all available distortions have been
applied.
Parameters
----------
header : `~astropy.io.fits.Header` object, optional
Used to get ``NAXIS1`` and ``NAXIS2``
header and axes are mutually exclusive, alternative ways
to provide the same information.
undistort : bool, optional
If `True`, take SIP and distortion lookup table into
account
axes : length 2 sequence ints, optional
If provided, use the given sequence as the shape of the
image. Otherwise, use the ``NAXIS1`` and ``NAXIS2``
keywords from the header that was used to create this
`WCS` object.
center : bool, optional
If `True` use the center of the pixel, otherwise use the corner.
Returns
-------
coord : (4, 2) array of (*x*, *y*) coordinates.
The order is clockwise starting with the bottom left corner.
"""
if axes is not None:
naxis1, naxis2 = axes
else:
if header is None:
try:
# classes that inherit from WCS and define naxis1/2
# do not require a header parameter
naxis1, naxis2 = self.pixel_shape
except (AttributeError, TypeError):
warnings.warn("Need a valid header in order to calculate footprint\n", AstropyUserWarning)
return None
else:
naxis1 = header.get('NAXIS1', None)
naxis2 = header.get('NAXIS2', None)
if naxis1 is None or naxis2 is None:
raise ValueError(
"Image size could not be determined.")
if center:
corners = np.array([[1, 1],
[1, naxis2],
[naxis1, naxis2],
[naxis1, 1]], dtype=np.float64)
else:
corners = np.array([[0.5, 0.5],
[0.5, naxis2 + 0.5],
[naxis1 + 0.5, naxis2 + 0.5],
[naxis1 + 0.5, 0.5]], dtype=np.float64)
if undistort:
return self.all_pix2world(corners, 1)
else:
return self.wcs_pix2world(corners, 1)
def _read_det2im_kw(self, header, fobj, err=0.0):
"""
Create a `distortion paper`_ type lookup table for detector to
image plane correction.
"""
if fobj is None:
return (None, None)
if not isinstance(fobj, fits.HDUList):
return (None, None)
try:
axiscorr = header[str('AXISCORR')]
d2imdis = self._read_d2im_old_format(header, fobj, axiscorr)
return d2imdis
except KeyError:
pass
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
tables = {}
for i in range(1, self.naxis + 1):
d_error = header.get(err_kw + str(i), 0.0)
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
if dis == 'lookup':
del header[distortion]
assert isinstance(fobj, fits.HDUList), ('An astropy.io.fits.HDUList'
'is required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}').format(i)
if i == header[dp_axis_key]:
d_data = fobj[str('D2IMARR'), d_extver].data
else:
d_data = (fobj[str('D2IMARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('D2IMARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0), d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0), d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0), d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix,
d_crval, d_cdelt)
tables[i] = d_lookup
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
for key in set(header):
if key.startswith(dp + str('.')):
del header[key]
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _read_d2im_old_format(self, header, fobj, axiscorr):
warnings.warn("The use of ``AXISCORR`` for D2IM correction has been deprecated."
"`~astropy.wcs` will read in files with ``AXISCORR`` but ``to_fits()`` will write "
"out files without it.",
AstropyDeprecationWarning)
cpdis = [None, None]
crpix = [0., 0.]
crval = [0., 0.]
cdelt = [1., 1.]
try:
d2im_data = fobj[(str('D2IMARR'), 1)].data
except KeyError:
return (None, None)
except AttributeError:
return (None, None)
d2im_data = np.array([d2im_data])
d2im_hdr = fobj[(str('D2IMARR'), 1)].header
naxis = d2im_hdr[str('NAXIS')]
for i in range(1, naxis + 1):
crpix[i - 1] = d2im_hdr.get(str('CRPIX') + str(i), 0.0)
crval[i - 1] = d2im_hdr.get(str('CRVAL') + str(i), 0.0)
cdelt[i - 1] = d2im_hdr.get(str('CDELT') + str(i), 1.0)
cpdis = DistortionLookupTable(d2im_data, crpix, crval, cdelt)
if axiscorr == 1:
return (cpdis, None)
elif axiscorr == 2:
return (None, cpdis)
else:
warnings.warn("Expected AXISCORR to be 1 or 2", AstropyUserWarning)
return (None, None)
def _write_det2im(self, hdulist):
"""
Writes a `distortion paper`_ type lookup table to the given
`astropy.io.fits.HDUList`.
"""
if self.det2im1 is None and self.det2im2 is None:
return
dist = 'D2IMDIS'
d_kw = 'D2IM'
err_kw = 'D2IMERR'
def write_d2i(num, det2im):
if det2im is None:
return
str('{0}{1:d}').format(dist, num),
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Detector to image correction type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(det2im.data.shape), 'Number of independent variables in d2im function')
for i in range(det2im.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1, 'Axis number of the jth independent variable in a d2im function')
image = fits.ImageHDU(det2im.data, name=str('D2IMARR'))
header = image.header
header[str('CRPIX1')] = (det2im.crpix[0],
'Coordinate system reference pixel')
header[str('CRPIX2')] = (det2im.crpix[1],
'Coordinate system reference pixel')
header[str('CRVAL1')] = (det2im.crval[0],
'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (det2im.crval[1],
'Coordinate system value at reference pixel')
header[str('CDELT1')] = (det2im.cdelt[0],
'Coordinate increment along axis')
header[str('CDELT2')] = (det2im.cdelt[1],
'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_d2i(1, self.det2im1)
write_d2i(2, self.det2im2)
def _read_distortion_kw(self, header, fobj, dist='CPDIS', err=0.0):
"""
Reads `distortion paper`_ table-lookup keywords and data, and
returns a 2-tuple of `~astropy.wcs.DistortionLookupTable`
objects.
If no `distortion paper`_ keywords are found, ``(None, None)``
is returned.
"""
if isinstance(header, (str, bytes)):
return (None, None)
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
tables = {}
for i in range(1, self.naxis + 1):
d_error_key = err_kw + str(i)
if d_error_key in header:
d_error = header[d_error_key]
del header[d_error_key]
else:
d_error = 0.0
if d_error < err:
tables[i] = None
continue
distortion = dist + str(i)
if distortion in header:
dis = header[distortion].lower()
del header[distortion]
if dis == 'lookup':
if not isinstance(fobj, fits.HDUList):
raise ValueError('an astropy.io.fits.HDUList is '
'required for Lookup table distortion.')
dp = (d_kw + str(i)).strip()
dp_extver_key = dp + str('.EXTVER')
if dp_extver_key in header:
d_extver = header[dp_extver_key]
del header[dp_extver_key]
else:
d_extver = 1
dp_axis_key = dp + str('.AXIS.{0:d}'.format(i))
if i == header[dp_axis_key]:
d_data = fobj[str('WCSDVARR'), d_extver].data
else:
d_data = (fobj[str('WCSDVARR'), d_extver].data).transpose()
del header[dp_axis_key]
d_header = fobj[str('WCSDVARR'), d_extver].header
d_crpix = (d_header.get(str('CRPIX1'), 0.0),
d_header.get(str('CRPIX2'), 0.0))
d_crval = (d_header.get(str('CRVAL1'), 0.0),
d_header.get(str('CRVAL2'), 0.0))
d_cdelt = (d_header.get(str('CDELT1'), 1.0),
d_header.get(str('CDELT2'), 1.0))
d_lookup = DistortionLookupTable(d_data, d_crpix, d_crval, d_cdelt)
tables[i] = d_lookup
for key in set(header):
if key.startswith(dp + str('.')):
del header[key]
else:
warnings.warn('Polynomial distortion is not implemented.\n', AstropyUserWarning)
else:
tables[i] = None
if not tables:
return (None, None)
else:
return (tables.get(1), tables.get(2))
def _write_distortion_kw(self, hdulist, dist='CPDIS'):
"""
Write out `distortion paper`_ keywords to the given
`fits.HDUList`.
"""
if self.cpdis1 is None and self.cpdis2 is None:
return
if dist == 'CPDIS':
d_kw = str('DP')
err_kw = str('CPERR')
else:
d_kw = str('DQ')
err_kw = str('CQERR')
def write_dist(num, cpdis):
if cpdis is None:
return
hdulist[0].header[str('{0}{1:d}').format(dist, num)] = (
'LOOKUP', 'Prior distortion function type')
hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)] = (
num, 'Version number of WCSDVARR extension')
hdulist[0].header[str('{0}{1:d}.NAXES').format(d_kw, num)] = (
len(cpdis.data.shape), 'Number of independent variables in distortion function')
for i in range(cpdis.data.ndim):
hdulist[0].header[str('{0}{1:d}.AXIS.{2:d}').format(d_kw, num, i + 1)] = (
i + 1,
'Axis number of the jth independent variable in a distortion function')
image = fits.ImageHDU(cpdis.data, name=str('WCSDVARR'))
header = image.header
header[str('CRPIX1')] = (cpdis.crpix[0], 'Coordinate system reference pixel')
header[str('CRPIX2')] = (cpdis.crpix[1], 'Coordinate system reference pixel')
header[str('CRVAL1')] = (cpdis.crval[0], 'Coordinate system value at reference pixel')
header[str('CRVAL2')] = (cpdis.crval[1], 'Coordinate system value at reference pixel')
header[str('CDELT1')] = (cpdis.cdelt[0], 'Coordinate increment along axis')
header[str('CDELT2')] = (cpdis.cdelt[1], 'Coordinate increment along axis')
image.ver = int(hdulist[0].header[str('{0}{1:d}.EXTVER').format(d_kw, num)])
hdulist.append(image)
write_dist(1, self.cpdis1)
write_dist(2, self.cpdis2)
def _remove_sip_kw(self, header):
"""
Remove SIP information from a header.
"""
# Never pass SIP coefficients to wcslib
# CTYPE must be passed with -SIP to wcslib
for key in (m.group() for m in map(SIP_KW.match, list(header))
if m is not None):
del header[key]
def _read_sip_kw(self, header, wcskey=""):
"""
Reads `SIP`_ header keywords and returns a `~astropy.wcs.Sip`
object.
If no `SIP`_ header keywords are found, ``None`` is returned.
"""
if isinstance(header, (str, bytes)):
# TODO: Parse SIP from a string without pyfits around
return None
if str("A_ORDER") in header and header[str('A_ORDER')] > 1:
if str("B_ORDER") not in header:
raise ValueError(
"A_ORDER provided without corresponding B_ORDER "
"keyword for SIP distortion")
m = int(header[str("A_ORDER")])
a = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("A_{0}_{1}").format(i, j)
if key in header:
a[i, j] = header[key]
del header[key]
m = int(header[str("B_ORDER")])
if m > 1:
b = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("B_{0}_{1}").format(i, j)
if key in header:
b[i, j] = header[key]
del header[key]
else:
a = None
b = None
del header[str('A_ORDER')]
del header[str('B_ORDER')]
ctype = [header['CTYPE{0}{1}'.format(nax, wcskey)] for nax in range(1, self.naxis + 1)]
if any(not ctyp.endswith('-SIP') for ctyp in ctype):
message = """
Inconsistent SIP distortion information is present in the FITS header and the WCS object:
SIP coefficients were detected, but CTYPE is missing a "-SIP" suffix.
astropy.wcs is using the SIP distortion coefficients,
therefore the coordinates calculated here might be incorrect.
If you do not want to apply the SIP distortion coefficients,
please remove the SIP coefficients from the FITS header or the
WCS object. As an example, if the image is already distortion-corrected
(e.g., drizzled) then distortion components should not apply and the SIP
coefficients should be removed.
While the SIP distortion coefficients are being applied here, if that was indeed the intent,
for consistency please append "-SIP" to the CTYPE in the FITS header or the WCS object.
"""
log.info(message)
elif str("B_ORDER") in header and header[str('B_ORDER')] > 1:
raise ValueError(
"B_ORDER provided without corresponding A_ORDER " +
"keyword for SIP distortion")
else:
a = None
b = None
if str("AP_ORDER") in header and header[str('AP_ORDER')] > 1:
if str("BP_ORDER") not in header:
raise ValueError(
"AP_ORDER provided without corresponding BP_ORDER "
"keyword for SIP distortion")
m = int(header[str("AP_ORDER")])
ap = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("AP_{0}_{1}").format(i, j)
if key in header:
ap[i, j] = header[key]
del header[key]
m = int(header[str("BP_ORDER")])
if m > 1:
bp = np.zeros((m + 1, m + 1), np.double)
for i in range(m + 1):
for j in range(m - i + 1):
key = str("BP_{0}_{1}").format(i, j)
if key in header:
bp[i, j] = header[key]
del header[key]
else:
ap = None
bp = None
del header[str('AP_ORDER')]
del header[str('BP_ORDER')]
elif str("BP_ORDER") in header and header[str('BP_ORDER')] > 1:
raise ValueError(
"BP_ORDER provided without corresponding AP_ORDER "
"keyword for SIP distortion")
else:
ap = None
bp = None
if a is None and b is None and ap is None and bp is None:
return None
if str("CRPIX1{0}".format(wcskey)) not in header or str("CRPIX2{0}".format(wcskey)) not in header:
raise ValueError(
"Header has SIP keywords without CRPIX keywords")
crpix1 = header.get("CRPIX1{0}".format(wcskey))
crpix2 = header.get("CRPIX2{0}".format(wcskey))
return Sip(a, b, ap, bp, (crpix1, crpix2))
def _write_sip_kw(self):
"""
Write out SIP keywords. Returns a dictionary of key-value
pairs.
"""
if self.sip is None:
return {}
keywords = {}
def write_array(name, a):
if a is None:
return
size = a.shape[0]
keywords[str('{0}_ORDER').format(name)] = size - 1
for i in range(size):
for j in range(size - i):
if a[i, j] != 0.0:
keywords[
str('{0}_{1:d}_{2:d}').format(name, i, j)] = a[i, j]
write_array(str('A'), self.sip.a)
write_array(str('B'), self.sip.b)
write_array(str('AP'), self.sip.ap)
write_array(str('BP'), self.sip.bp)
return keywords
def _denormalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be used as input")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial " +
"axes, therefore (ra, dec) data can not be used as input")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude "
"celestial axes, therefore (ra, dec) data can not be " +
"used as input")
out = np.zeros((sky.shape[0], self.wcs.naxis))
out[:, self.wcs.lng] = sky[:, 0]
out[:, self.wcs.lat] = sky[:, 1]
return out
def _normalize_sky(self, sky):
if self.wcs.lngtyp != 'RA':
raise ValueError(
"WCS does not have longitude type of 'RA', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.lattyp != 'DEC':
raise ValueError(
"WCS does not have longitude type of 'DEC', therefore " +
"(ra, dec) data can not be returned")
if self.wcs.naxis == 2:
if self.wcs.lng == 0 and self.wcs.lat == 1:
return sky
elif self.wcs.lng == 1 and self.wcs.lat == 0:
# Reverse the order of the columns
return sky[:, ::-1]
else:
raise ValueError(
"WCS does not have longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
else:
if self.wcs.lng < 0 or self.wcs.lat < 0:
raise ValueError(
"WCS does not have both longitude and latitude celestial "
"axes, therefore (ra, dec) data can not be returned")
out = np.empty((sky.shape[0], 2))
out[:, 0] = sky[:, self.wcs.lng]
out[:, 1] = sky[:, self.wcs.lat]
return out
def _array_converter(self, func, sky, *args, ra_dec_order=False):
"""
A helper function to support reading either a pair of arrays
or a single Nx2 array.
"""
def _return_list_of_arrays(axes, origin):
if any([x.size == 0 for x in axes]):
return axes
try:
axes = np.broadcast_arrays(*axes)
except ValueError:
raise ValueError(
"Coordinate arrays are not broadcastable to each other")
xy = np.hstack([x.reshape((x.size, 1)) for x in axes])
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
output = func(xy, origin)
if ra_dec_order and sky == 'output':
output = self._normalize_sky(output)
return (output[:, 0].reshape(axes[0].shape),
output[:, 1].reshape(axes[0].shape))
return [output[:, i].reshape(axes[0].shape)
for i in range(output.shape[1])]
def _return_single_array(xy, origin):
if xy.shape[-1] != self.naxis:
raise ValueError(
"When providing two arguments, the array must be "
"of shape (N, {0})".format(self.naxis))
if 0 in xy.shape:
return xy
if ra_dec_order and sky == 'input':
xy = self._denormalize_sky(xy)
result = func(xy, origin)
if ra_dec_order and sky == 'output':
result = self._normalize_sky(result)
return result
if len(args) == 2:
try:
xy, origin = args
xy = np.asarray(xy)
origin = int(origin)
except Exception:
raise TypeError(
"When providing two arguments, they must be "
"(coords[N][{0}], origin)".format(self.naxis))
if xy.shape == () or len(xy.shape) == 1:
return _return_list_of_arrays([xy], origin)
return _return_single_array(xy, origin)
elif len(args) == self.naxis + 1:
axes = args[:-1]
origin = args[-1]
try:
axes = [np.asarray(x) for x in axes]
origin = int(origin)
except Exception:
raise TypeError(
"When providing more than two arguments, they must be " +
"a 1-D array for each axis, followed by an origin.")
return _return_list_of_arrays(axes, origin)
raise TypeError(
"WCS projection has {0} dimensions, so expected 2 (an Nx{0} array "
"and the origin argument) or {1} arguments (the position in each "
"dimension, and the origin argument). Instead, {2} arguments were "
"given.".format(
self.naxis, self.naxis + 1, len(args)))
def all_pix2world(self, *args, **kwargs):
return self._array_converter(
self._all_pix2world, 'output', *args, **kwargs)
all_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates.
Performs all of the following in series:
- Detector to image plane correction (if present in the
FITS file)
- `SIP`_ distortion correction (if present in the FITS
file)
- `distortion paper`_ table-lookup correction (if present
in the FITS file)
- `wcslib`_ "core" WCS transformation
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('sky coordinates, in degrees', 8))
def wcs_pix2world(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.p2s(xy, o)['world'],
'output', *args, **kwargs)
wcs_pix2world.__doc__ = """
Transforms pixel coordinates to world coordinates by doing
only the basic `wcslib`_ transformation.
No `SIP`_ or `distortion paper`_ table lookup correction is
applied. To perform distortion correction, see
`~astropy.wcs.WCS.all_pix2world`,
`~astropy.wcs.WCS.sip_pix2foc`, `~astropy.wcs.WCS.p4_pix2foc`,
or `~astropy.wcs.WCS.pix2foc`.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Notes
-----
The order of the axes for the result is determined by the
``CTYPEia`` keywords in the FITS header, therefore it may not
always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('world coordinates, in degrees', 8))
def _all_world2pix(self, world, origin, tolerance, maxiter, adaptive,
detect_divergence, quiet):
# ############################################################
# # DESCRIPTION OF THE NUMERICAL METHOD ##
# ############################################################
# In this section I will outline the method of solving
# the inverse problem of converting world coordinates to
# pixel coordinates (*inverse* of the direct transformation
# `all_pix2world`) and I will summarize some of the aspects
# of the method proposed here and some of the issues of the
# original `all_world2pix` (in relation to this method)
# discussed in https://github.com/astropy/astropy/issues/1977
# A more detailed discussion can be found here:
# https://github.com/astropy/astropy/pull/2373
#
#
# ### Background ###
#
#
# I will refer here to the [SIP Paper]
# (http://fits.gsfc.nasa.gov/registry/sip/SIP_distortion_v1_0.pdf).
# According to this paper, the effect of distortions as
# described in *their* equation (1) is:
#
# (1) x = CD*(u+f(u)),
#
# where `x` is a *vector* of "intermediate spherical
# coordinates" (equivalent to (x,y) in the paper) and `u`
# is a *vector* of "pixel coordinates", and `f` is a vector
# function describing geometrical distortions
# (see equations 2 and 3 in SIP Paper.
# However, I prefer to use `w` for "intermediate world
# coordinates", `x` for pixel coordinates, and assume that
# transformation `W` performs the **linear**
# (CD matrix + projection onto celestial sphere) part of the
# conversion from pixel coordinates to world coordinates.
# Then we can re-write (1) as:
#
# (2) w = W*(x+f(x)) = T(x)
#
# In `astropy.wcs.WCS` transformation `W` is represented by
# the `wcs_pix2world` member, while the combined ("total")
# transformation (linear part + distortions) is performed by
# `all_pix2world`. Below I summarize the notations and their
# equivalents in `astropy.wcs.WCS`:
#
# | Equation term | astropy.WCS/meaning |
# | ------------- | ---------------------------- |
# | `x` | pixel coordinates |
# | `w` | world coordinates |
# | `W` | `wcs_pix2world()` |
# | `W^{-1}` | `wcs_world2pix()` |
# | `T` | `all_pix2world()` |
# | `x+f(x)` | `pix2foc()` |
#
#
# ### Direct Solving of Equation (2) ###
#
#
# In order to find the pixel coordinates that correspond to
# given world coordinates `w`, it is necessary to invert
# equation (2): `x=T^{-1}(w)`, or solve equation `w==T(x)`
# for `x`. However, this approach has the following
# disadvantages:
# 1. It requires unnecessary transformations (see next
# section).
# 2. It is prone to "RA wrapping" issues as described in
# https://github.com/astropy/astropy/issues/1977
# (essentially because `all_pix2world` may return points with
# a different phase than user's input `w`).
#
#
# ### Description of the Method Used here ###
#
#
# By applying inverse linear WCS transformation (`W^{-1}`)
# to both sides of equation (2) and introducing notation `x'`
# (prime) for the pixels coordinates obtained from the world
# coordinates by applying inverse *linear* WCS transformation
# ("focal plane coordinates"):
#
# (3) x' = W^{-1}(w)
#
# we obtain the following equation:
#
# (4) x' = x+f(x),
#
# or,
#
# (5) x = x'-f(x)
#
# This equation is well suited for solving using the method
# of fixed-point iterations
# (http://en.wikipedia.org/wiki/Fixed-point_iteration):
#
# (6) x_{i+1} = x'-f(x_i)
#
# As an initial value of the pixel coordinate `x_0` we take
# "focal plane coordinate" `x'=W^{-1}(w)=wcs_world2pix(w)`.
# We stop iterations when `|x_{i+1}-x_i|<tolerance`. We also
# consider the process to be diverging if
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|`
# **when** `|x_{i+1}-x_i|>=tolerance` (when current
# approximation is close to the true solution,
# `|x_{i+1}-x_i|>|x_i-x_{i-1}|` may be due to rounding errors
# and we ignore such "divergences" when
# `|x_{i+1}-x_i|<tolerance`). It may appear that checking for
# `|x_{i+1}-x_i|<tolerance` in order to ignore divergence is
# unnecessary since the iterative process should stop anyway,
# however, the proposed implementation of this iterative
# process is completely vectorized and, therefore, we may
# continue iterating over *some* points even though they have
# converged to within a specified tolerance (while iterating
# over other points that have not yet converged to
# a solution).
#
# In order to efficiently implement iterative process (6)
# using available methods in `astropy.wcs.WCS`, we add and
# subtract `x_i` from the right side of equation (6):
#
# (7) x_{i+1} = x'-(x_i+f(x_i))+x_i = x'-pix2foc(x_i)+x_i,
#
# where `x'=wcs_world2pix(w)` and it is computed only *once*
# before the beginning of the iterative process (and we also
# set `x_0=x'`). By using `pix2foc` at each iteration instead
# of `all_pix2world` we get about 25% increase in performance
# (by not performing the linear `W` transformation at each
# step) and we also avoid the "RA wrapping" issue described
# above (by working in focal plane coordinates and avoiding
# pix->world transformations).
#
# As an added benefit, the process converges to the correct
# solution in just one iteration when distortions are not
# present (compare to
# https://github.com/astropy/astropy/issues/1977 and
# https://github.com/astropy/astropy/pull/2294): in this case
# `pix2foc` is the identical transformation
# `x_i=pix2foc(x_i)` and from equation (7) we get:
#
# x' = x_0 = wcs_world2pix(w)
# x_1 = x' - pix2foc(x_0) + x_0 = x' - pix2foc(x') + x' = x'
# = wcs_world2pix(w) = x_0
# =>
# |x_1-x_0| = 0 < tolerance (with tolerance > 0)
#
# However, for performance reasons, it is still better to
# avoid iterations altogether and return the exact linear
# solution (`wcs_world2pix`) right-away when non-linear
# distortions are not present by checking that attributes
# `sip`, `cpdis1`, `cpdis2`, `det2im1`, and `det2im2` are
# *all* `None`.
#
#
# ### Outline of the Algorithm ###
#
#
# While the proposed code is relatively long (considering
# the simplicity of the algorithm), this is due to: 1)
# checking if iterative solution is necessary at all; 2)
# checking for divergence; 3) re-implementation of the
# completely vectorized algorithm as an "adaptive" vectorized
# algorithm (for cases when some points diverge for which we
# want to stop iterations). In my tests, the adaptive version
# of the algorithm is about 50% slower than non-adaptive
# version for all HST images.
#
# The essential part of the vectorized non-adaptive algorithm
# (without divergence and other checks) can be described
# as follows:
#
# pix0 = self.wcs_world2pix(world, origin)
# pix = pix0.copy() # 0-order solution
#
# for k in range(maxiter):
# # find correction to the previous solution:
# dpix = self.pix2foc(pix, origin) - pix0
#
# # compute norm (L2) of the correction:
# dn = np.linalg.norm(dpix, axis=1)
#
# # apply correction:
# pix -= dpix
#
# # check convergence:
# if np.max(dn) < tolerance:
# break
#
# return pix
#
# Here, the input parameter `world` can be a `MxN` array
# where `M` is the number of coordinate axes in WCS and `N`
# is the number of points to be converted simultaneously to
# image coordinates.
#
#
# ### IMPORTANT NOTE: ###
#
# If, in the future releases of the `~astropy.wcs`,
# `pix2foc` will not apply all the required distortion
# corrections then in the code below, calls to `pix2foc` will
# have to be replaced with
# wcs_world2pix(all_pix2world(pix_list, origin), origin)
#
# ############################################################
# # INITIALIZE ITERATIVE PROCESS: ##
# ############################################################
# initial approximation (linear WCS based only)
pix0 = self.wcs_world2pix(world, origin)
# Check that an iterative solution is required at all
# (when any of the non-CD-matrix-based corrections are
# present). If not required return the initial
# approximation (pix0).
if not self.has_distortion:
# No non-WCS corrections detected so
# simply return initial approximation:
return pix0
pix = pix0.copy() # 0-order solution
# initial correction:
dpix = self.pix2foc(pix, origin) - pix0
# Update initial solution:
pix -= dpix
# Norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
dnprev = dn.copy() # if adaptive else dn
tol2 = tolerance**2
# Prepare for iterative process
k = 1
ind = None
inddiv = None
# Turn off numpy runtime warnings for 'invalid' and 'over':
old_invalid = np.geterr()['invalid']
old_over = np.geterr()['over']
np.seterr(invalid='ignore', over='ignore')
# ############################################################
# # NON-ADAPTIVE ITERATIONS: ##
# ############################################################
if not adaptive:
# Fixed-point iterations:
while (np.nanmax(dn) >= tol2 and k < maxiter):
# Find correction to the previous solution:
dpix = self.pix2foc(pix, origin) - pix0
# Compute norm (L2) squared of the correction:
dn = np.sum(dpix*dpix, axis=1)
# Check for divergence (we do this in two stages
# to optimize performance for the most common
# scenario when successive approximations converge):
if detect_divergence:
divergent = (dn >= dnprev)
if np.any(divergent):
# Find solutions that have not yet converged:
slowconv = (dn >= tol2)
inddiv, = np.where(divergent & slowconv)
if inddiv.shape[0] > 0:
# Update indices of elements that
# still need correction:
conv = (dn < dnprev)
iconv = np.where(conv)
# Apply correction:
dpixgood = dpix[iconv]
pix[iconv] -= dpixgood
dpix[iconv] = dpixgood
# For the next iteration choose
# non-divergent points that have not yet
# converged to the requested accuracy:
ind, = np.where(slowconv & conv)
pix0 = pix0[ind]
dnprev[ind] = dn[ind]
k += 1
# Switch to adaptive iterations:
adaptive = True
break
# Save current correction magnitudes for later:
dnprev = dn
# Apply correction:
pix -= dpix
k += 1
# ############################################################
# # ADAPTIVE ITERATIONS: ##
# ############################################################
if adaptive:
if ind is None:
ind, = np.where(np.isfinite(pix).all(axis=1))
pix0 = pix0[ind]
# "Adaptive" fixed-point iterations:
while (ind.shape[0] > 0 and k < maxiter):
# Find correction to the previous solution:
dpixnew = self.pix2foc(pix[ind], origin) - pix0
# Compute norm (L2) of the correction:
dnnew = np.sum(np.square(dpixnew), axis=1)
# Bookeeping of corrections:
dnprev[ind] = dn[ind].copy()
dn[ind] = dnnew
if detect_divergence:
# Find indices of pixels that are converging:
conv = (dnnew < dnprev[ind])
iconv = np.where(conv)
iiconv = ind[iconv]
# Apply correction:
dpixgood = dpixnew[iconv]
pix[iiconv] -= dpixgood
dpix[iiconv] = dpixgood
# Find indices of solutions that have not yet
# converged to the requested accuracy
# AND that do not diverge:
subind, = np.where((dnnew >= tol2) & conv)
else:
# Apply correction:
pix[ind] -= dpixnew
dpix[ind] = dpixnew
# Find indices of solutions that have not yet
# converged to the requested accuracy:
subind, = np.where(dnnew >= tol2)
# Choose solutions that need more iterations:
ind = ind[subind]
pix0 = pix0[subind]
k += 1
# ############################################################
# # FINAL DETECTION OF INVALID, DIVERGING, ##
# # AND FAILED-TO-CONVERGE POINTS ##
# ############################################################
# Identify diverging and/or invalid points:
invalid = ((~np.all(np.isfinite(pix), axis=1)) &
(np.all(np.isfinite(world), axis=1)))
# When detect_divergence==False, dnprev is outdated
# (it is the norm of the very first correction).
# Still better than nothing...
inddiv, = np.where(((dn >= tol2) & (dn >= dnprev)) | invalid)
if inddiv.shape[0] == 0:
inddiv = None
# Identify points that did not converge within 'maxiter'
# iterations:
if k >= maxiter:
ind, = np.where((dn >= tol2) & (dn < dnprev) & (~invalid))
if ind.shape[0] == 0:
ind = None
else:
ind = None
# Restore previous numpy error settings:
np.seterr(invalid=old_invalid, over=old_over)
# ############################################################
# # RAISE EXCEPTION IF DIVERGING OR TOO SLOWLY CONVERGING ##
# # DATA POINTS HAVE BEEN DETECTED: ##
# ############################################################
if (ind is not None or inddiv is not None) and not quiet:
if inddiv is None:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy after {:d} "
"iterations.".format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=None)
else:
raise NoConvergence(
"'WCS.all_world2pix' failed to "
"converge to the requested accuracy.\n"
"After {0:d} iterations, the solution is diverging "
"at least for one input point."
.format(k), best_solution=pix,
accuracy=np.abs(dpix), niter=k,
slow_conv=ind, divergent=inddiv)
return pix
def all_world2pix(self, *args, tolerance=1e-4, maxiter=20, adaptive=False,
detect_divergence=True, quiet=False, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda *args, **kwargs:
self._all_world2pix(
*args, tolerance=tolerance, maxiter=maxiter,
adaptive=adaptive, detect_divergence=detect_divergence,
quiet=quiet),
'input', *args, **kwargs
)
all_world2pix.__doc__ = """
all_world2pix(*arg, accuracy=1.0e-4, maxiter=20,
adaptive=False, detect_divergence=True, quiet=False)
Transforms world coordinates to pixel coordinates, using
numerical iteration to invert the full forward transformation
`~astropy.wcs.WCS.all_pix2world` with complete
distortion model.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
tolerance : float, optional (Default = 1.0e-4)
Tolerance of solution. Iteration terminates when the
iterative solver estimates that the "true solution" is
within this many pixels current estimate, more
specifically, when the correction to the solution found
during the previous iteration is smaller
(in the sense of the L2 norm) than ``tolerance``.
maxiter : int, optional (Default = 20)
Maximum number of iterations allowed to reach a solution.
quiet : bool, optional (Default = False)
Do not throw :py:class:`NoConvergence` exceptions when
the method does not converge to a solution with the
required accuracy within a specified number of maximum
iterations set by ``maxiter`` parameter. Instead,
simply return the found solution.
Other Parameters
----------------
adaptive : bool, optional (Default = False)
Specifies whether to adaptively select only points that
did not converge to a solution within the required
accuracy for the next iteration. Default is recommended
for HST as well as most other instruments.
.. note::
The :py:meth:`all_world2pix` uses a vectorized
implementation of the method of consecutive
approximations (see ``Notes`` section below) in which it
iterates over *all* input points *regardless* until
the required accuracy has been reached for *all* input
points. In some cases it may be possible that
*almost all* points have reached the required accuracy
but there are only a few of input data points for
which additional iterations may be needed (this
depends mostly on the characteristics of the geometric
distortions for a given instrument). In this situation
it may be advantageous to set ``adaptive`` = `True` in
which case :py:meth:`all_world2pix` will continue
iterating *only* over the points that have not yet
converged to the required accuracy. However, for the
HST's ACS/WFC detector, which has the strongest
distortions of all HST instruments, testing has
shown that enabling this option would lead to a about
50-100% penalty in computational time (depending on
specifics of the image, geometric distortions, and
number of input points to be converted). Therefore,
for HST and possibly instruments, it is recommended
to set ``adaptive`` = `False`. The only danger in
getting this setting wrong will be a performance
penalty.
.. note::
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will automatically switch
to the adaptive algorithm once divergence has been
detected.
detect_divergence : bool, optional (Default = True)
Specifies whether to perform a more detailed analysis
of the convergence to a solution. Normally
:py:meth:`all_world2pix` may not achieve the required
accuracy if either the ``tolerance`` or ``maxiter`` arguments
are too low. However, it may happen that for some
geometric distortions the conditions of convergence for
the the method of consecutive approximations used by
:py:meth:`all_world2pix` may not be satisfied, in which
case consecutive approximations to the solution will
diverge regardless of the ``tolerance`` or ``maxiter``
settings.
When ``detect_divergence`` is `False`, these divergent
points will be detected as not having achieved the
required accuracy (without further details). In addition,
if ``adaptive`` is `False` then the algorithm will not
know that the solution (for specific points) is diverging
and will continue iterating and trying to "improve"
diverging solutions. This may result in ``NaN`` or
``Inf`` values in the return results (in addition to a
performance penalties). Even when ``detect_divergence``
is `False`, :py:meth:`all_world2pix`, at the end of the
iterative process, will identify invalid results
(``NaN`` or ``Inf``) as "diverging" solutions and will
raise :py:class:`NoConvergence` unless the ``quiet``
parameter is set to `True`.
When ``detect_divergence`` is `True`,
:py:meth:`all_world2pix` will detect points for which
current correction to the coordinates is larger than
the correction applied during the previous iteration
**if** the requested accuracy **has not yet been
achieved**. In this case, if ``adaptive`` is `True`,
these points will be excluded from further iterations and
if ``adaptive`` is `False`, :py:meth:`all_world2pix` will
automatically switch to the adaptive algorithm. Thus, the
reported divergent solution will be the latest converging
solution computed immediately *before* divergence
has been detected.
.. note::
When accuracy has been achieved, small increases in
current corrections may be possible due to rounding
errors (when ``adaptive`` is `False`) and such
increases will be ignored.
.. note::
Based on our testing using HST ACS/WFC images, setting
``detect_divergence`` to `True` will incur about 5-20%
performance penalty with the larger penalty
corresponding to ``adaptive`` set to `True`.
Because the benefits of enabling this
feature outweigh the small performance penalty,
especially when ``adaptive`` = `False`, it is
recommended to set ``detect_divergence`` to `True`,
unless extensive testing of the distortion models for
images from specific instruments show a good stability
of the numerical method for a wide range of
coordinates (even outside the image itself).
.. note::
Indices of the diverging inverse solutions will be
reported in the ``divergent`` attribute of the
raised :py:class:`NoConvergence` exception object.
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp`, and
`~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Using the method of fixed-point iterations approximations we
iterate starting with the initial approximation, which is
computed using the non-distortion-aware
:py:meth:`wcs_world2pix` (or equivalent).
The :py:meth:`all_world2pix` function uses a vectorized
implementation of the method of consecutive approximations and
therefore it is highly efficient (>30x) when *all* data points
that need to be converted from sky coordinates to image
coordinates are passed at *once*. Therefore, it is advisable,
whenever possible, to pass as input a long array of all points
that need to be converted to :py:meth:`all_world2pix` instead
of calling :py:meth:`all_world2pix` for each data point. Also
see the note to the ``adaptive`` parameter.
Raises
------
NoConvergence
The method did not converge to a
solution to the required accuracy within a specified
number of maximum iterations set by the ``maxiter``
parameter. To turn off this exception, set ``quiet`` to
`True`. Indices of the points for which the requested
accuracy was not achieved (if any) will be listed in the
``slow_conv`` attribute of the
raised :py:class:`NoConvergence` exception object.
See :py:class:`NoConvergence` documentation for
more details.
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
Examples
--------
>>> import astropy.io.fits as fits
>>> import astropy.wcs as wcs
>>> import numpy as np
>>> import os
>>> filename = os.path.join(wcs.__path__[0], 'tests/data/j94f05bgq_flt.fits')
>>> hdulist = fits.open(filename)
>>> w = wcs.WCS(hdulist[('sci',1)].header, hdulist)
>>> hdulist.close()
>>> ra, dec = w.all_pix2world([1,2,3], [1,1,1], 1)
>>> print(ra) # doctest: +FLOAT_CMP
[ 5.52645627 5.52649663 5.52653698]
>>> print(dec) # doctest: +FLOAT_CMP
[-72.05171757 -72.05171276 -72.05170795]
>>> radec = w.all_pix2world([[1,1], [2,1], [3,1]], 1)
>>> print(radec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 5.52649663 -72.05171276]
[ 5.52653698 -72.05170795]]
>>> x, y = w.all_world2pix(ra, dec, 1)
>>> print(x) # doctest: +FLOAT_CMP
[ 1.00000238 2.00000237 3.00000236]
>>> print(y) # doctest: +FLOAT_CMP
[ 0.99999996 0.99999997 0.99999997]
>>> xy = w.all_world2pix(radec, 1)
>>> print(xy) # doctest: +FLOAT_CMP
[[ 1.00000238 0.99999996]
[ 2.00000237 0.99999997]
[ 3.00000236 0.99999997]]
>>> xy = w.all_world2pix(radec, 1, maxiter=3,
... tolerance=1.0e-10, quiet=False)
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 3 iterations, the solution is
diverging at least for one input point.
>>> # Now try to use some diverging data:
>>> divradec = w.all_pix2world([[1.0, 1.0],
... [10000.0, 50000.0],
... [3.0, 1.0]], 1)
>>> print(divradec) # doctest: +FLOAT_CMP
[[ 5.52645627 -72.05171757]
[ 7.15976932 -70.8140779 ]
[ 5.52653698 -72.05170795]]
>>> # First, turn detect_divergence on:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=True,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000238e+00 9.99999965e-01]
[ -1.99441636e+06 1.44309097e+06]
[ 3.00000236e+00 9.99999966e-01]]
Achieved accuracy:
[[ 6.13968380e-05 8.59638593e-07]
[ 8.59526812e+11 6.61713548e+11]
[ 6.09398446e-05 8.38759724e-07]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 5 iterations, the solution is
diverging at least for one input point.
>>> # This time turn detect_divergence off:
>>> try: # doctest: +FLOAT_CMP
... xy = w.all_world2pix(divradec, 1, maxiter=20,
... tolerance=1.0e-4, adaptive=False,
... detect_divergence=False,
... quiet=False)
... except wcs.wcs.NoConvergence as e:
... print("Indices of diverging points: {{0}}"
... .format(e.divergent))
... print("Indices of poorly converging points: {{0}}"
... .format(e.slow_conv))
... print("Best solution:\\n{{0}}".format(e.best_solution))
... print("Achieved accuracy:\\n{{0}}".format(e.accuracy))
Indices of diverging points: [1]
Indices of poorly converging points: None
Best solution:
[[ 1.00000009 1. ]
[ nan nan]
[ 3.00000009 1. ]]
Achieved accuracy:
[[ 2.29417358e-06 3.21222995e-08]
[ nan nan]
[ 2.27407877e-06 3.13005639e-08]]
>>> raise e
Traceback (most recent call last):
...
NoConvergence: 'WCS.all_world2pix' failed to converge to the
requested accuracy. After 6 iterations, the solution is
diverging at least for one input point.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def wcs_world2pix(self, *args, **kwargs):
if self.wcs is None:
raise ValueError("No basic WCS settings were created.")
return self._array_converter(
lambda xy, o: self.wcs.s2p(xy, o)['pixcrd'],
'input', *args, **kwargs)
wcs_world2pix.__doc__ = """
Transforms world coordinates to pixel coordinates, using only
the basic `wcslib`_ WCS transformation. No `SIP`_ or
`distortion paper`_ table lookup transformation is applied.
Parameters
----------
{0}
For a transformation that is not two-dimensional, the
two-argument form must be used.
{1}
Returns
-------
{2}
Notes
-----
The order of the axes for the input world array is determined by
the ``CTYPEia`` keywords in the FITS header, therefore it may
not always be of the form (*ra*, *dec*). The
`~astropy.wcs.Wcsprm.lat`, `~astropy.wcs.Wcsprm.lng`,
`~astropy.wcs.Wcsprm.lattyp` and `~astropy.wcs.Wcsprm.lngtyp`
members can be used to determine the order of the axes.
Raises
------
MemoryError
Memory allocation failed.
SingularMatrixError
Linear transformation matrix is singular.
InconsistentAxisTypesError
Inconsistent or unrecognized coordinate axis types.
ValueError
Invalid parameter value.
ValueError
Invalid coordinate transformation parameters.
ValueError
x- and y-coordinate arrays are not the same size.
InvalidTransformError
Invalid coordinate transformation parameters.
InvalidTransformError
Ill-conditioned coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('naxis', 8),
docstrings.RA_DEC_ORDER(8),
docstrings.RETURNS('pixel coordinates', 8))
def pix2foc(self, *args):
return self._array_converter(self._pix2foc, None, *args)
pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention and `distortion
paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def p4_pix2foc(self, *args):
return self._array_converter(self._p4_pix2foc, None, *args)
p4_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def det2im(self, *args):
return self._array_converter(self._det2im, None, *args)
det2im.__doc__ = """
Convert detector coordinates to image plane coordinates using
`distortion paper`_ table-lookup correction.
The output is in absolute pixel coordinates, not relative to
``CRPIX``.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def sip_pix2foc(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.pix2foc, None, *args)
sip_pix2foc.__doc__ = """
Convert pixel coordinates to focal plane coordinates using the
`SIP`_ polynomial distortion convention.
The output is in pixel coordinates, relative to ``CRPIX``.
FITS WCS `distortion paper`_ table lookup correction is not
applied, even if that information existed in the FITS file
that initialized this :class:`~astropy.wcs.WCS` object. To
correct for that, use `~astropy.wcs.WCS.pix2foc` or
`~astropy.wcs.WCS.p4_pix2foc`.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('focal coordinates', 8))
def sip_foc2pix(self, *args):
if self.sip is None:
if len(args) == 2:
return args[0]
elif len(args) == 3:
return args[:2]
else:
raise TypeError("Wrong number of arguments")
return self._array_converter(self.sip.foc2pix, None, *args)
sip_foc2pix.__doc__ = """
Convert focal plane coordinates to pixel coordinates using the
`SIP`_ polynomial distortion convention.
FITS WCS `distortion paper`_ table lookup distortion
correction is not applied, even if that information existed in
the FITS file that initialized this `~astropy.wcs.WCS` object.
Parameters
----------
{0}
Returns
-------
{1}
Raises
------
MemoryError
Memory allocation failed.
ValueError
Invalid coordinate transformation parameters.
""".format(docstrings.TWO_OR_MORE_ARGS('2', 8),
docstrings.RETURNS('pixel coordinates', 8))
def to_fits(self, relax=False, key=None):
"""
Generate an `astropy.io.fits.HDUList` object with all of the
information stored in this object. This should be logically identical
to the input FITS file, but it will be normalized in a number of ways.
See `to_header` for some warnings about the output produced.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
hdulist : `astropy.io.fits.HDUList`
"""
header = self.to_header(relax=relax, key=key)
hdu = fits.PrimaryHDU(header=header)
hdulist = fits.HDUList(hdu)
self._write_det2im(hdulist)
self._write_distortion_kw(hdulist)
return hdulist
def to_header(self, relax=None, key=None):
"""Generate an `astropy.io.fits.Header` object with the basic WCS
and SIP information stored in this object. This should be
logically identical to the input FITS file, but it will be
normalized in a number of ways.
.. warning::
This function does not write out FITS WCS `distortion
paper`_ information, since that requires multiple FITS
header data units. To get a full representation of
everything in this object, use `to_fits`.
Parameters
----------
relax : bool or int, optional
Degree of permissiveness:
- `False` (default): Write all extensions that are
considered to be safe and recommended.
- `True`: Write all recognized informal extensions of the
WCS standard.
- `int`: a bit field selecting specific extensions to
write. See :ref:`relaxwrite` for details.
If the ``relax`` keyword argument is not given and any
keywords were omitted from the output, an
`~astropy.utils.exceptions.AstropyWarning` is displayed.
To override this, explicitly pass a value to ``relax``.
key : str
The name of a particular WCS transform to use. This may be
either ``' '`` or ``'A'``-``'Z'`` and corresponds to the ``"a"``
part of the ``CTYPEia`` cards.
Returns
-------
header : `astropy.io.fits.Header`
Notes
-----
The output header will almost certainly differ from the input in a
number of respects:
1. The output header only contains WCS-related keywords. In
particular, it does not contain syntactically-required
keywords such as ``SIMPLE``, ``NAXIS``, ``BITPIX``, or
``END``.
2. Deprecated (e.g. ``CROTAn``) or non-standard usage will
be translated to standard (this is partially dependent on
whether ``fix`` was applied).
3. Quantities will be converted to the units used internally,
basically SI with the addition of degrees.
4. Floating-point quantities may be given to a different decimal
precision.
5. Elements of the ``PCi_j`` matrix will be written if and
only if they differ from the unit matrix. Thus, if the
matrix is unity then no elements will be written.
6. Additional keywords such as ``WCSAXES``, ``CUNITia``,
``LONPOLEa`` and ``LATPOLEa`` may appear.
7. The original keycomments will be lost, although
`to_header` tries hard to write meaningful comments.
8. Keyword order may be changed.
"""
# default precision for numerical WCS keywords
precision = WCSHDO_P14
display_warning = False
if relax is None:
display_warning = True
relax = False
if relax not in (True, False):
do_sip = relax & WCSHDO_SIP
relax &= ~WCSHDO_SIP
else:
do_sip = relax
relax = WCSHDO_all if relax is True else WCSHDO_safe
relax = precision | relax
if self.wcs is not None:
if key is not None:
orig_key = self.wcs.alt
self.wcs.alt = key
header_string = self.wcs.to_header(relax)
header = fits.Header.fromstring(header_string)
keys_to_remove = ["", " ", "COMMENT"]
for kw in keys_to_remove:
if kw in header:
del header[kw]
else:
header = fits.Header()
if do_sip and self.sip is not None:
if self.wcs is not None and any(not ctyp.endswith('-SIP') for ctyp in self.wcs.ctype):
self._fix_ctype(header, add_sip=True)
for kw, val in self._write_sip_kw().items():
header[kw] = val
if not do_sip and self.wcs is not None and any(self.wcs.ctype) and self.sip is not None:
# This is called when relax is not False or WCSHDO_SIP
# The default case of ``relax=None`` is handled further in the code.
header = self._fix_ctype(header, add_sip=False)
if display_warning:
full_header = self.to_header(relax=True, key=key)
missing_keys = []
for kw, val in full_header.items():
if kw not in header:
missing_keys.append(kw)
if len(missing_keys):
warnings.warn(
"Some non-standard WCS keywords were excluded: {0} "
"Use the ``relax`` kwarg to control this.".format(
', '.join(missing_keys)),
AstropyWarning)
# called when ``relax=None``
# This is different from the case of ``relax=False``.
if any(self.wcs.ctype) and self.sip is not None:
header = self._fix_ctype(header, add_sip=False, log_message=False)
# Finally reset the key. This must be called after ``_fix_ctype``.
if key is not None:
self.wcs.alt = orig_key
return header
def _fix_ctype(self, header, add_sip=True, log_message=True):
"""
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header.
add_sip : bool
Flag indicating whether "-SIP" should be added or removed from CTYPE keywords.
Remove "-SIP" from CTYPE when writing out a header with relax=False.
This needs to be done outside ``to_header`` because ``to_header`` runs
twice when ``relax=False`` and the second time ``relax`` is set to ``True``
to display the missing keywords.
If the user requested SIP distortion to be written out add "-SIP" to
CTYPE if it is missing.
"""
_add_sip_to_ctype = """
Inconsistent SIP distortion information is present in the current WCS:
SIP coefficients were detected, but CTYPE is missing "-SIP" suffix,
therefore the current WCS is internally inconsistent.
Because relax has been set to True, the resulting output WCS will have
"-SIP" appended to CTYPE in order to make the header internally consistent.
However, this may produce incorrect astrometry in the output WCS, if
in fact the current WCS is already distortion-corrected.
Therefore, if current WCS is already distortion-corrected (eg, drizzled)
then SIP distortion components should not apply. In that case, for a WCS
that is already distortion-corrected, please remove the SIP coefficients
from the header.
"""
if log_message:
if add_sip:
log.info(_add_sip_to_ctype)
for i in range(1, self.naxis+1):
# strip() must be called here to cover the case of alt key= " "
kw = 'CTYPE{0}{1}'.format(i, self.wcs.alt).strip()
if kw in header:
if add_sip:
val = header[kw].strip("-SIP") + "-SIP"
else:
val = header[kw].strip("-SIP")
header[kw] = val
else:
continue
return header
def to_header_string(self, relax=None):
"""
Identical to `to_header`, but returns a string containing the
header cards.
"""
return str(self.to_header(relax))
def footprint_to_file(self, filename='footprint.reg', color='green',
width=2, coordsys=None):
"""
Writes out a `ds9`_ style regions file. It can be loaded
directly by `ds9`_.
Parameters
----------
filename : str, optional
Output file name - default is ``'footprint.reg'``
color : str, optional
Color to use when plotting the line.
width : int, optional
Width of the region line.
coordsys : str, optional
Coordinate system. If not specified (default), the ``radesys``
value is used. For all possible values, see
http://ds9.si.edu/doc/ref/region.html#RegionFileFormat
"""
comments = ('# Region file format: DS9 version 4.0 \n'
'# global color=green font="helvetica 12 bold '
'select=1 highlite=1 edit=1 move=1 delete=1 '
'include=1 fixed=0 source\n')
coordsys = coordsys or self.wcs.radesys
if coordsys not in ('PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS', 'LINEAR',
'AMPLIFIER', 'DETECTOR'):
raise ValueError("Coordinate system '{}' is not supported. A valid"
" one can be given with the 'coordsys' argument."
.format(coordsys))
with open(filename, mode='w') as f:
f.write(comments)
f.write('{}\n'.format(coordsys))
f.write('polygon(')
ftpr = self.calc_footprint()
if ftpr is not None:
ftpr.tofile(f, sep=',')
f.write(') # color={0}, width={1:d} \n'.format(color, width))
@property
def _naxis1(self):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
return self._naxis[0]
@_naxis1.setter
def _naxis1(self, value):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
self._naxis[0] = value
@property
def _naxis2(self):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
return self._naxis[1]
@_naxis2.setter
def _naxis2(self, value):
warnings.warn(NAXIS_DEPRECATE_MESSAGE, AstropyDeprecationWarning)
self._naxis[1] = value
def _get_naxis(self, header=None):
_naxis = []
if (header is not None and
not isinstance(header, (str, bytes))):
for naxis in itertools.count(1):
try:
_naxis.append(header['NAXIS{}'.format(naxis)])
except KeyError:
break
if len(_naxis) == 0:
_naxis = [0, 0]
elif len(_naxis) == 1:
_naxis.append(0)
self._naxis = _naxis
def printwcs(self):
print(repr(self))
def __repr__(self):
'''
Return a short description. Simply porting the behavior from
the `printwcs()` method.
'''
description = ["WCS Keywords\n",
"Number of WCS axes: {0!r}".format(self.naxis)]
sfmt = ' : ' + "".join(["{"+"{0}".format(i)+"!r} " for i in range(self.naxis)])
keywords = ['CTYPE', 'CRVAL', 'CRPIX']
values = [self.wcs.ctype, self.wcs.crval, self.wcs.crpix]
for keyword, value in zip(keywords, values):
description.append(keyword+sfmt.format(*value))
if hasattr(self.wcs, 'pc'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += ''.join(['PC', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.pc[i]))
s = 'CDELT' + sfmt
description.append(s.format(*self.wcs.cdelt))
elif hasattr(self.wcs, 'cd'):
for i in range(self.naxis):
s = ''
for j in range(self.naxis):
s += "".join(['CD', str(i+1), '_', str(j+1), ' '])
s += sfmt
description.append(s.format(*self.wcs.cd[i]))
description.append('NAXIS : {}'.format(' '.join(map(str, self._naxis))))
return '\n'.join(description)
def get_axis_types(self):
"""
Similar to `self.wcsprm.axis_types <astropy.wcs.Wcsprm.axis_types>`
but provides the information in a more Python-friendly format.
Returns
-------
result : list of dicts
Returns a list of dictionaries, one for each axis, each
containing attributes about the type of that axis.
Each dictionary has the following keys:
- 'coordinate_type':
- None: Non-specific coordinate type.
- 'stokes': Stokes coordinate.
- 'celestial': Celestial coordinate (including ``CUBEFACE``).
- 'spectral': Spectral coordinate.
- 'scale':
- 'linear': Linear axis.
- 'quantized': Quantized axis (``STOKES``, ``CUBEFACE``).
- 'non-linear celestial': Non-linear celestial axis.
- 'non-linear spectral': Non-linear spectral axis.
- 'logarithmic': Logarithmic axis.
- 'tabular': Tabular axis.
- 'group'
- Group number, e.g. lookup table number
- 'number'
- For celestial axes:
- 0: Longitude coordinate.
- 1: Latitude coordinate.
- 2: ``CUBEFACE`` number.
- For lookup tables:
- the axis number in a multidimensional table.
``CTYPEia`` in ``"4-3"`` form with unrecognized algorithm code will
generate an error.
"""
if self.wcs is None:
raise AttributeError(
"This WCS object does not have a wcsprm object.")
coordinate_type_map = {
0: None,
1: 'stokes',
2: 'celestial',
3: 'spectral'}
scale_map = {
0: 'linear',
1: 'quantized',
2: 'non-linear celestial',
3: 'non-linear spectral',
4: 'logarithmic',
5: 'tabular'}
result = []
for axis_type in self.wcs.axis_types:
subresult = {}
coordinate_type = (axis_type // 1000) % 10
subresult['coordinate_type'] = coordinate_type_map[coordinate_type]
scale = (axis_type // 100) % 10
subresult['scale'] = scale_map[scale]
group = (axis_type // 10) % 10
subresult['group'] = group
number = axis_type % 10
subresult['number'] = number
result.append(subresult)
return result
def __reduce__(self):
"""
Support pickling of WCS objects. This is done by serializing
to an in-memory FITS file and dumping that as a string.
"""
hdulist = self.to_fits(relax=True)
buffer = io.BytesIO()
hdulist.writeto(buffer)
return (__WCS_unpickle__,
(self.__class__, self.__dict__, buffer.getvalue(),))
def dropaxis(self, dropax):
"""
Remove an axis from the WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS with naxis to be chopped to naxis-1
dropax : int
The index of the WCS to drop, counting from 0 (i.e., python convention,
not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with one axis fewer
"""
inds = list(range(self.wcs.naxis))
inds.pop(dropax)
# axis 0 has special meaning to sub
# if wcs.wcs.ctype == ['RA','DEC','VLSR'], you want
# wcs.sub([1,2]) to get 'RA','DEC' back
return self.sub([i+1 for i in inds])
def swapaxes(self, ax0, ax1):
"""
Swap axes in a WCS.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to have its axes swapped
ax0 : int
ax1 : int
The indices of the WCS to be swapped, counting from 0 (i.e., python
convention, not FITS convention)
Returns
-------
A new `~astropy.wcs.WCS` instance with the same number of axes, but two
swapped
"""
inds = list(range(self.wcs.naxis))
inds[ax0], inds[ax1] = inds[ax1], inds[ax0]
return self.sub([i+1 for i in inds])
def reorient_celestial_first(self):
"""
Reorient the WCS such that the celestial axes are first, followed by
the spectral axis, followed by any others.
Assumes at least celestial axes are present.
"""
return self.sub([WCSSUB_CELESTIAL, WCSSUB_SPECTRAL, WCSSUB_STOKES])
def slice(self, view, numpy_order=True):
"""
Slice a WCS instance using a Numpy slice. The order of the slice should
be reversed (as for the data) compared to the natural WCS order.
Parameters
----------
view : tuple
A tuple containing the same number of slices as the WCS system.
The ``step`` method, the third argument to a slice, is not
presently supported.
numpy_order : bool
Use numpy order, i.e. slice the WCS so that an identical slice
applied to a numpy array will slice the array and WCS in the same
way. If set to `False`, the WCS will be sliced in FITS order,
meaning the first slice will be applied to the *last* numpy index
but the *first* WCS axis.
Returns
-------
wcs_new : `~astropy.wcs.WCS`
A new resampled WCS axis
"""
if hasattr(view, '__len__') and len(view) > self.wcs.naxis:
raise ValueError("Must have # of slices <= # of WCS axes")
elif not hasattr(view, '__len__'): # view MUST be an iterable
view = [view]
if not all(isinstance(x, slice) for x in view):
# We need to drop some dimensions, but this may not always be
# possible with .sub due to correlated axes, so instead we use the
# generalized slicing infrastructure from astropy.wcs.wcsapi.
return SlicedFITSWCS(self, view)
# NOTE: we could in principle use SlicedFITSWCS as above for all slicing,
# but in the simple case where there are no axes dropped, we can just
# create a full WCS object with updated WCS parameters which is faster
# for this specific case and also backward-compatible.
wcs_new = self.deepcopy()
if wcs_new.sip is not None:
sip_crpix = wcs_new.sip.crpix.tolist()
for i, iview in enumerate(view):
if iview.step is not None and iview.step < 0:
raise NotImplementedError("Reversing an axis is not "
"implemented.")
if numpy_order:
wcs_index = self.wcs.naxis - 1 - i
else:
wcs_index = i
if iview.step is not None and iview.start is None:
# Slice from "None" is equivalent to slice from 0 (but one
# might want to downsample, so allow slices with
# None,None,step or None,stop,step)
iview = slice(0, iview.stop, iview.step)
if iview.start is not None:
if iview.step not in (None, 1):
crpix = self.wcs.crpix[wcs_index]
cdelt = self.wcs.cdelt[wcs_index]
# equivalently (keep this comment so you can compare eqns):
# wcs_new.wcs.crpix[wcs_index] =
# (crpix - iview.start)*iview.step + 0.5 - iview.step/2.
crp = ((crpix - iview.start - 1.)/iview.step
+ 0.5 + 1./iview.step/2.)
wcs_new.wcs.crpix[wcs_index] = crp
if wcs_new.sip is not None:
sip_crpix[wcs_index] = crp
wcs_new.wcs.cdelt[wcs_index] = cdelt * iview.step
else:
wcs_new.wcs.crpix[wcs_index] -= iview.start
if wcs_new.sip is not None:
sip_crpix[wcs_index] -= iview.start
try:
# range requires integers but the other attributes can also
# handle arbitrary values, so this needs to be in a try/except.
nitems = len(builtins.range(self._naxis[wcs_index])[iview])
except TypeError as exc:
if 'indices must be integers' not in str(exc):
raise
warnings.warn("NAXIS{0} attribute is not updated because at "
"least one index ('{1}') is no integer."
"".format(wcs_index, iview), AstropyUserWarning)
else:
wcs_new._naxis[wcs_index] = nitems
if wcs_new.sip is not None:
wcs_new.sip = Sip(self.sip.a, self.sip.b, self.sip.ap, self.sip.bp,
sip_crpix)
return wcs_new
def __getitem__(self, item):
# "getitem" is a shortcut for self.slice; it is very limited
# there is no obvious and unambiguous interpretation of wcs[1,2,3]
# We COULD allow wcs[1] to link to wcs.sub([2])
# (wcs[i] -> wcs.sub([i+1])
return self.slice(item)
def __iter__(self):
# Having __getitem__ makes Python think WCS is iterable. However,
# Python first checks whether __iter__ is present, so we can raise an
# exception here.
raise TypeError("'{0}' object is not iterable".format(self.__class__.__name__))
@property
def axis_type_names(self):
"""
World names for each coordinate axis
Returns
-------
A list of names along each axis
"""
names = list(self.wcs.cname)
types = self.wcs.ctype
for i in range(len(names)):
if len(names[i]) > 0:
continue
names[i] = types[i].split('-')[0]
return names
@property
def celestial(self):
"""
A copy of the current WCS with only the celestial axes included
"""
return self.sub([WCSSUB_CELESTIAL])
@property
def is_celestial(self):
return self.has_celestial and self.naxis == 2
@property
def has_celestial(self):
try:
return self.wcs.lng >= 0 and self.wcs.lat >= 0
except InconsistentAxisTypesError:
return False
@property
def has_distortion(self):
"""
Returns `True` if any distortion terms are present.
"""
return (self.sip is not None or
self.cpdis1 is not None or self.cpdis2 is not None or
self.det2im1 is not None and self.det2im2 is not None)
@property
def pixel_scale_matrix(self):
try:
cdelt = np.diag(self.wcs.get_cdelt())
pc = self.wcs.get_pc()
except InconsistentAxisTypesError:
try:
# for non-celestial axes, get_cdelt doesn't work
cdelt = np.dot(self.wcs.cd, np.diag(self.wcs.cdelt))
except AttributeError:
cdelt = np.diag(self.wcs.cdelt)
try:
pc = self.wcs.pc
except AttributeError:
pc = 1
pccd = np.array(np.dot(cdelt, pc))
return pccd
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes.
With this method, one can do:
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
def footprint_contains(self, coord, **kwargs):
"""
Determines if a given SkyCoord is contained in the wcs footprint.
Parameters
----------
coord : `~astropy.coordinates.SkyCoord`
The coordinate to check if it is within the wcs coordinate.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
return coord.contained_by(self, **kwargs)
def __WCS_unpickle__(cls, dct, fits_data):
"""
Unpickles a WCS object from a serialized FITS string.
"""
self = cls.__new__(cls)
self.__dict__.update(dct)
buffer = io.BytesIO(fits_data)
hdulist = fits.open(buffer)
WCS.__init__(self, hdulist[0].header, hdulist)
return self
def find_all_wcs(header, relax=True, keysel=None, fix=True,
translate_units='',
_do_set=True):
"""
Find all the WCS transformations in the given header.
Parameters
----------
header : str or astropy.io.fits header object.
relax : bool or int, optional
Degree of permissiveness:
- `True` (default): Admit all recognized informal extensions of the
WCS standard.
- `False`: Recognize only FITS keywords defined by the
published WCS standard.
- `int`: a bit field selecting specific extensions to accept.
See :ref:`relaxread` for details.
keysel : sequence of flags, optional
A list of flags used to select the keyword types considered by
wcslib. When ``None``, only the standard image header
keywords are considered (and the underlying wcspih() C
function is called). To use binary table image array or pixel
list keywords, *keysel* must be set.
Each element in the list should be one of the following strings:
- 'image': Image header keywords
- 'binary': Binary table image array keywords
- 'pixel': Pixel list keywords
Keywords such as ``EQUIna`` or ``RFRQna`` that are common to
binary table image arrays and pixel lists (including
``WCSNna`` and ``TWCSna``) are selected by both 'binary' and
'pixel'.
fix : bool, optional
When `True` (default), call `~astropy.wcs.Wcsprm.fix` on
the resulting objects to fix any non-standard uses in the
header. `FITSFixedWarning` warnings will be emitted if any
changes were made.
translate_units : str, optional
Specify which potentially unsafe translations of non-standard
unit strings to perform. By default, performs none. See
`WCS.fix` for more information about this parameter. Only
effective when ``fix`` is `True`.
Returns
-------
wcses : list of `WCS` objects
"""
if isinstance(header, (str, bytes)):
header_string = header
elif isinstance(header, fits.Header):
header_string = header.tostring()
else:
raise TypeError(
"header must be a string or astropy.io.fits.Header object")
keysel_flags = _parse_keysel(keysel)
if isinstance(header_string, str):
header_bytes = header_string.encode('ascii')
else:
header_bytes = header_string
wcsprms = _wcs.find_all_wcs(header_bytes, relax, keysel_flags)
result = []
for wcsprm in wcsprms:
subresult = WCS(fix=False, _do_set=False)
subresult.wcs = wcsprm
result.append(subresult)
if fix:
subresult.fix(translate_units)
if _do_set:
subresult.wcs.set()
return result
def validate(source):
"""
Prints a WCS validation report for the given FITS file.
Parameters
----------
source : str path, readable file-like object or `astropy.io.fits.HDUList` object
The FITS file to validate.
Returns
-------
results : WcsValidateResults instance
The result is returned as nested lists. The first level
corresponds to the HDUs in the given file. The next level has
an entry for each WCS found in that header. The special
subclass of list will pretty-print the results as a table when
printed.
"""
class _WcsValidateWcsResult(list):
def __init__(self, key):
self._key = key
def __repr__(self):
result = [" WCS key '{0}':".format(self._key or ' ')]
if len(self):
for entry in self:
for i, line in enumerate(entry.splitlines()):
if i == 0:
initial_indent = ' - '
else:
initial_indent = ' '
result.extend(
textwrap.wrap(
line,
initial_indent=initial_indent,
subsequent_indent=' '))
else:
result.append(" No issues.")
return '\n'.join(result)
class _WcsValidateHduResult(list):
def __init__(self, hdu_index, hdu_name):
self._hdu_index = hdu_index
self._hdu_name = hdu_name
list.__init__(self)
def __repr__(self):
if len(self):
if self._hdu_name:
hdu_name = ' ({0})'.format(self._hdu_name)
else:
hdu_name = ''
result = ['HDU {0}{1}:'.format(self._hdu_index, hdu_name)]
for wcs in self:
result.append(repr(wcs))
return '\n'.join(result)
return ''
class _WcsValidateResults(list):
def __repr__(self):
result = []
for hdu in self:
content = repr(hdu)
if len(content):
result.append(content)
return '\n\n'.join(result)
global __warningregistry__
if isinstance(source, fits.HDUList):
hdulist = source
else:
hdulist = fits.open(source)
results = _WcsValidateResults()
for i, hdu in enumerate(hdulist):
hdu_results = _WcsValidateHduResult(i, hdu.name)
results.append(hdu_results)
with warnings.catch_warnings(record=True) as warning_lines:
wcses = find_all_wcs(
hdu.header, relax=_wcs.WCSHDR_reject,
fix=False, _do_set=False)
for wcs in wcses:
wcs_results = _WcsValidateWcsResult(wcs.wcs.alt)
hdu_results.append(wcs_results)
try:
del __warningregistry__
except NameError:
pass
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter(
"always", FITSFixedWarning, append=True)
try:
WCS(hdu.header,
key=wcs.wcs.alt or ' ',
relax=_wcs.WCSHDR_reject,
fix=True, _do_set=False)
except WcsError as e:
wcs_results.append(str(e))
wcs_results.extend([str(x.message) for x in warning_lines])
return results
|
cc65ce455a56a669623c806e2759a2be07e26934d73728f3fbcf60652ce05d34 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import operator
from datetime import datetime, date, timedelta
from time import strftime, strptime
import numpy as np
from astropy import units as u, constants as const
from astropy import _erfa as erfa
from astropy.units import UnitConversionError
from astropy.utils import ShapedLikeNDArray
from astropy.utils.compat.misc import override__dir__
from astropy.utils.data_info import MixinInfo, data_info_factory
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # pylint: disable=W0611
from astropy.extern import _strptime
__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError', 'TimeInfo']
STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
LOCAL_SCALES = ('local',)
TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales)
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = dict((scale, scales)
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales)
TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}}
class TimeInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # unit is read-only and None
attr_names = MixinInfo.attr_names | {'serialize_method'}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = ('format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = 'value'
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == 'formatted_value':
out = ('value',)
elif method == 'jd1_jd2':
out = ('jd1', 'jd2')
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {'fits': 'jd1_jd2',
'ecsv': 'formatted_value',
'hdf5': 'jd1_jd2',
'yaml': 'jd1_jd2',
None: 'jd1_jd2'}
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict_base(self, map):
if 'jd1' in map and 'jd2' in map:
format = map.pop('format')
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
else:
format = map['format']
map['val'] = map.pop('value')
out = self._parent_cls(**map)
out.format = format
return out
def _construct_from_dict(self, map):
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
out = self._construct_from_dict_base(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError('input columns have inconsistent locations')
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
tm_attrs = {attr: getattr(col0, attr)
for attr in ('scale', 'location',
'precision', 'in_subfmt', 'out_subfmt')}
out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeDeltaInfo(TimeInfo):
_represent_as_dict_extra_attrs = ('format', 'scale')
def _construct_from_dict(self, map):
return self._construct_from_dict_base(map)
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new TimeDelta instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd1 = np.zeros(shape, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
out = self._parent_cls(jd1, jd2, format='jd', scale=col0.scale)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Time(ShapedLikeNDArray):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear',
'jyear', 'byear_str', 'jyear_str']
See also: http://docs.astropy.org/en/stable/time/
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Subformat for inputting string times
out_subfmt : str, optional
Subformat for outputting string times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, cls):
self = val.replicate(format=format, copy=copy)
else:
self = super().__new__(cls)
return self
def __getnewargs__(self):
return (self._time,)
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from astropy.coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
self.location = None
if isinstance(val, self.__class__):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (self.location.size > 1 and
self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape,
subok=True)
except Exception:
raise ValueError('The location with shape {0} cannot be '
'broadcast against time with shape {1}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape))
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, str) and
scale.lower() in self.SCALES):
raise ScaleValueError("Scale {0!r} is not in the allowed scales "
"{1}".format(scale,
sorted(self.SCALES)))
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and val.dtype.kind in ('S', 'U', 'O', 'M'):
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
err_msg = ('any of the formats where the format keyword is '
'optional {0}'.format([name for name, cls in formats]))
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, str) and
format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {0!r} is not one of the allowed "
"formats {1}".format(format,
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
err_msg = 'the format class {0}'.format(format)
for format, FormatClass in formats:
try:
return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError):
pass
else:
raise ValueError('Input values did not match {0}'.format(err_msg))
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@classmethod
def strptime(cls, time_string, format_string, **kwargs):
"""
Parse a string to a Time according to a format specification.
See `time.strptime` documentation for format specification.
>>> Time.strptime('2012-Jun-30 23:59:60', '%Y-%b-%d %H:%M:%S')
<Time object: scale='utc' format='isot' value=2012-06-30T23:59:60.000>
Parameters
----------
time_string : string, sequence, ndarray
Objects containing time data of type string
format_string : string
String specifying format of time_string.
kwargs : dict
Any keyword arguments for ``Time``. If the ``format`` keyword
argument is present, this will be used as the Time format.
Returns
-------
time_obj : `~astropy.time.Time`
A new `~astropy.time.Time` object corresponding to the input
``time_string``.
"""
time_array = np.asarray(time_string)
if time_array.dtype.kind not in ('U', 'S'):
err = "Expected type is string, a bytes-like object or a sequence"\
" of these. Got dtype '{}'".format(time_array.dtype.kind)
raise TypeError(err)
to_string = (str if time_array.dtype.kind == 'U' else
lambda x: str(x.item(), encoding='ascii'))
iterator = np.nditer([time_array, None],
op_dtypes=[time_array.dtype, 'U30'])
for time, formatted in iterator:
tt, fraction = _strptime._strptime(to_string(time), format_string)
time_tuple = tt[:6] + (fraction,)
formatted[...] = '{:04}-{:02}-{:02}T{:02}:{:02}:{:02}.{:06}'\
.format(*time_tuple)
format = kwargs.pop('format', None)
out = cls(*iterator.operands[1:], format='isot', **kwargs)
if format is not None:
out.format = format
return out
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'datetime64', 'fits', 'byear',
'jyear', 'byear_str', 'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(self.FORMATS)))
format_cls = self.FORMATS[format]
# If current output subformat is not in the new format then replace
# with default '*'
if hasattr(format_cls, 'subfmts'):
subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts]
if self.out_subfmt not in subfmt_names:
self.out_subfmt = '*'
self._time = format_cls(self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=self.in_subfmt,
out_subfmt=self.out_subfmt,
from_jd=True)
self._format = format
def __repr__(self):
return ("<{0} object: scale='{1}' format='{2}' value={3}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
def strftime(self, format_spec):
"""
Convert Time to a string or a numpy.array of strings according to a
format specification.
See `time.strftime` documentation for format specification.
Parameters
----------
format_spec : string
Format definition of return string.
Returns
-------
formatted : string, numpy.array
String or numpy.array of strings formatted according to the given
format string.
"""
formatted_strings = []
for sk in self.replicate('iso')._time.str_kwargs():
date_tuple = date(sk['year'], sk['mon'], sk['day']).timetuple()
datetime_tuple = (sk['year'], sk['mon'], sk['day'],
sk['hour'], sk['min'], sk['sec'],
date_tuple[6], date_tuple[7], -1)
fmtd_str = format_spec
if '%f' in fmtd_str:
fmtd_str = fmtd_str.replace('%f', '{frac:0{precision}}'.format(frac=sk['fracsec'], precision=self.precision))
fmtd_str = strftime(fmtd_str, datetime_tuple)
formatted_strings.append(fmtd_str)
if self.isscalar:
return formatted_strings[0]
else:
return np.array(formatted_strings).reshape(self.shape)
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{0}_{1}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('in_subfmt attribute must be a string')
self._time.in_subfmt = val
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('out_subfmt attribute must be a string')
self._time.out_subfmt = val
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in ((self._time, 'jd1'),
(self._time, 'jd2'),
(self, '_delta_ut1_utc'),
(self, '_delta_tdb_tt'),
(self, 'location')):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
out = value
if value.dtype.kind == 'M':
return value[()]
if not self._time.jd1.shape and not np.ma.is_masked(value):
out = value.item()
return out
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
@property
def value(self):
"""Time value(s) in current format"""
# The underlying way to get the time values for the current format is:
# self._shaped_like_input(self._time.to_value(parent=self))
# This is done in __getattr__. By calling getattr(self, self.format)
# the ``value`` attribute is cached.
return getattr(self, self.format)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def insert(self, obj, values, axis=0):
"""
Insert values before the given indices in the column and return
a new `~astropy.time.Time` or `~astropy.time.TimeDelta` object.
The values to be inserted must conform to the rules for in-place setting
of ``Time`` objects (see ``Get and set values`` in the ``Time``
documentation).
The API signature matches the ``np.insert`` API, but is more limited.
The specification of insert index ``obj`` must be a single integer,
and the ``axis`` must be ``0`` for simple row insertion before the
index.
Parameters
----------
obj : int
Integer index before which ``values`` is inserted.
values : array_like
Value(s) to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
axis : int, optional
Axis along which to insert ``values``. Default is 0, which is the
only allowed value and will insert a row.
Returns
-------
out : `~astropy.time.Time` subclass
New time object with inserted value(s)
"""
# Validate inputs: obj arg is integer, axis=0, self is not a scalar, and
# input index is in bounds.
try:
idx0 = operator.index(obj)
except TypeError:
raise TypeError('obj arg must be an integer')
if axis != 0:
raise ValueError('axis must be 0')
if not self.shape:
raise TypeError('cannot insert into scalar {} object'
.format(self.__class__.__name__))
if abs(idx0) > len(self):
raise IndexError('index {} is out of bounds for axis 0 with size {}'
.format(idx0, len(self)))
# Turn negative index into positive
if idx0 < 0:
idx0 = len(self) + idx0
# For non-Time object, use numpy to help figure out the length. (Note annoying
# case of a string input that has a length which is not the length we want).
if not isinstance(values, Time):
values = np.asarray(values)
n_values = len(values) if values.shape else 1
# Finally make the new object with the correct length and set values for the
# three sections, before insert, the insert, and after the insert.
out = self.__class__.info.new_like([self], len(self) + n_values, name=self.info.name)
out._time.jd1[:idx0] = self._time.jd1[:idx0]
out._time.jd2[:idx0] = self._time.jd2[:idx0]
# This uses the Time setting machinery to coerce and validate as necessary.
out[idx0:idx0 + n_values] = values
out._time.jd1[idx0 + n_values:] = self._time.jd1[idx0:]
out._time.jd2[idx0 + n_values:] = self._time.jd2[idx0:]
return out
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif ((self_location is None and value.location is not None) or
(self_location is not None and value.location is None)):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError('cannot set to Time with different location: '
'expected location={} and '
'got location={}'
.format(self_location, value.location))
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format,
location=self_location)
except Exception as err:
raise ValueError('cannot convert value to a compatible Time object: {}'
.format(err))
return value
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError('{} object is read-only. Make a '
'copy() or set "writeable" attribute to True.'
.format(self.__class__.__name__))
else:
raise ValueError('scalar {} object is read-only.'
.format(self.__class__.__name__))
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ('_delta_tdb_tt', '_delta_ut1_utc'):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from astropy.coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
---------------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `str`, or `None`; optional
The longitude on the Earth at which to compute the sidereal time.
Can be given as a `~astropy.units.Quantity` with angular units
(or an `~astropy.coordinates.Angle` or
`~astropy.coordinates.Longitude`), or as a name of an
observatory (currently, only ``'greenwich'`` is supported,
equivalent to 0 deg). If `None` (default), the ``lon`` attribute of
the Time object is used.
model : str or `None`; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
sidereal time : `~astropy.coordinates.Longitude`
Sidereal time as a quantity with units of hourangle
""" # docstring is formatted below
from astropy.coordinates import Longitude
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {0}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
else:
if model.upper() not in available_models:
raise ValueError(
'Model {0} not implemented for {1} sidereal time; '
'available models are {2}'
.format(model, kind, sorted(available_models.keys())))
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.lon
elif longitude == 'greenwich':
longitude = Longitude(0., u.degree,
wrap_angle=180.*u.degree)
else:
# sanity check on input
longitude = Longitude(longitude, u.degree,
wrap_angle=180.*u.degree)
gst = self._erfa_sidereal_time(available_models[model.upper()])
return Longitude(gst + longitude, u.hourangle)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _erfa_sidereal_time(self, model):
"""Calculate a sidereal time using a IAU precession/nutation model."""
from astropy.coordinates import Longitude
erfa_function = model['function']
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in model['scales']
for jd_part in ('jd1', 'jd2_filled')]
sidereal_time = erfa_function(*erfa_parameters)
if self.masked:
sidereal_time[self.mask] = np.nan
return Longitude(sidereal_time, u.radian).to(u.hourangle)
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format)
def _apply(self, method, *args, format=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, self.precision,
self.in_subfmt, self.out_subfmt, from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location',
'precision', 'in_subfmt', 'out_subfmt'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only a single element and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'size', 1) > 1:
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(tm.FORMATS)))
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(tm._time.jd1, tm._time.jd2,
tm._time._scale, tm.precision,
tm.in_subfmt, tm.out_subfmt,
from_jd=True)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
return tuple([(indices if i == axis else np.arange(s).reshape(
(1,)*(i if keepdims or i < axis else i-1) + (s,) +
(1,)*(ndim-i-(1 if keepdims or i > axis else 2))))
for i, s in enumerate(self.shape)])
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# first get the minimum at normal precision.
jd = self.jd1 + self.jd2
approx = np.min(jd, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (self.jd1 - approx) + self.jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd = self.jd1 + self.jd2
approx = np.max(jd, axis, keepdims=True)
dt = (self.jd1 - approx) + self.jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
jd_approx = self.jd
jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd
if axis is None:
return np.lexsort((jd_remainder.ravel(), jd_approx.ravel()))
else:
return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims) -
self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
cache = self.cache['format']
if attr not in cache:
if attr == self.format:
tm = self
else:
tm = self.replicate(format=attr)
value = tm._shaped_like_input(tm._time.to_value(parent=tm))
cache[attr] = value
return cache[attr]
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {0} with scale "
"'{1}' to scale '{2}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : ``astropy.utils.iers.IERS`` table, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. If `None`, use default version (see
``astropy.utils.iers``)
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True)
>>> status == TIME_BEFORE_IERS_RANGE
array([ True, False]...)
"""
if iers_table is None:
from astropy.utils.iers import IERS
iers_table = IERS.open()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from astropy.utils.iers import IERS_Auto
iers_table = IERS_Auto.open()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
from astropy.coordinates import EarthLocation
location = EarthLocation.from_geodetic(0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to_value(u.radian),
rxy.to_value(u.km), z.to_value(u.km))
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta - something is dealt with in TimeDelta, so we have
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = isinstance(other, TimeDelta)
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
if other_is_delta: # T - Tdelta
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot subtract Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError("Cannot subtract Time instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta + something is dealt with in TimeDelta, so we have
# T + Tdelta = T
# T + T = error
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '+')
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot add Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError("Cannot compare {0} instances with scales "
"'{1}' and '{2}'".format(self.__class__.__name__,
self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
def to_datetime(self, timezone=None):
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDelta(Time):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
See also:
- http://docs.astropy.org/en/stable/time/
- http://docs.astropy.org/en/stable/time/index.html#time-deltas
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
if format is None:
format = 'datetime' if isinstance(val, timedelta) else 'jd'
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def __add__(self, other):
# only deal with TimeDelta + TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
return other.__add__(self)
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 + other._time.jd1
jd2 = self._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __sub__(self, other):
# only deal with TimeDelta - TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '-')
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot subtract TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 - other._time.jd1
jd2 = self._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# Check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time) and not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '*')
elif ((isinstance(other, u.UnitBase) and
other == u.dimensionless_unscaled) or
(isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just multiple in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) * other
except Exception:
# The various ways we could multiply all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# Cannot do __mul__(1./other) as that looses precision
if ((isinstance(other, u.UnitBase) and
other == u.dimensionless_unscaled) or
(isinstance(other, str) and other == '')):
return self.copy()
# If other is something consistent with a dimensionless quantity
# (could just be a float or an array), then we can just divide in.
try:
other = u.Quantity(other, u.dimensionless_unscaled, copy=False)
except Exception:
# If not consistent with a dimensionless quantity, try downgrading
# self to a quantity and see if things work.
try:
return self.to(u.day) / other
except Exception:
# The various ways we could divide all failed;
# returning NotImplemented to give other a final chance.
return NotImplemented
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other.value)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
# Here, we do not have to worry about returning NotImplemented,
# since other has already had a chance to look at us.
return other / self.to(u.day)
def to(self, unit, equivalencies=[]):
"""
Convert to a quantity in the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
The unit to convert to.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globallyq
or within a context.
Returns
-------
quantity : `~astropy.units.Quantity`
The quantity in the units specified.
See also
--------
to_value : get the numerical value in a given unit.
"""
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(unit, equivalencies=equivalencies)
def to_value(self, unit, equivalencies=[]):
"""
The numerical value in the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If `None`, no
equivalencies will be applied at all, not even any set globally
or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified.
See also
--------
to : Convert to a `~astropy.units.Quantity` instance in a given unit.
value : The time value in the current format.
"""
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to_value(unit, equivalencies=equivalencies)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError('cannot convert value to a compatible TimeDelta '
'object: {}'.format(err))
return value
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
val = np.array(val, copy=copy, subok=True)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if not (val.dtype == np.float64 or val.dtype.kind in 'OSUMa'):
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else ' for {0}'.format(op)
super().__init__(
"Unsupported operand type(s){0}: "
"'{1}' and '{2}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
|
26e6cd745611364011d2b529afc6752f526e856f489d23c05e0682d581988aa7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Time utilities.
In particular, routines to do basic arithmetic on numbers represented by two
doubles, using the procedure of Shewchuk, 1997, Discrete & Computational
Geometry 18(3):305-363 -- http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
import numpy as np
from astropy import units as u
def day_frac(val1, val2, factor=None, divisor=None):
"""Return the sum of ``val1`` and ``val2`` as two float64s.
The returned floats are an integer part and the fractional remainder,
with the latter guaranteed to be within -0.5 and 0.5 (inclusive on
either side, as the integer is rounded to even).
The arithmetic is all done with exact floating point operations so no
precision is lost to rounding error. It is assumed the sum is less
than about 1e16, otherwise the remainder will be greater than 1.0.
Parameters
----------
val1, val2 : array of float
Values to be summed.
factor : float, optional
If given, multiply the sum by it.
divisor : float, optional
If given, divide the sum by it.
Returns
-------
day, frac : float64
Integer and fractional part of val1 + val2.
"""
# Add val1 and val2 exactly, returning the result as two float64s.
# The first is the approximate sum (with some floating point error)
# and the second is the error of the float64 sum.
sum12, err12 = two_sum(val1, val2)
if factor is not None:
sum12, carry = two_product(sum12, factor)
carry += err12 * factor
sum12, err12 = two_sum(sum12, carry)
if divisor is not None:
q1 = sum12 / divisor
p1, p2 = two_product(q1, divisor)
d1, d2 = two_sum(sum12, -p1)
d2 += err12
d2 -= p2
q2 = (d1 + d2) / divisor # 3-part float fine here; nothing can be lost
sum12, err12 = two_sum(q1, q2)
# get integer fraction
day = np.round(sum12)
extra, frac = two_sum(sum12, -day)
frac += extra + err12
# Our fraction can now have gotten >0.5 or <-0.5, which means we would
# loose one bit of precision. So, correct for that.
excess = np.round(frac)
day += excess
extra, frac = two_sum(sum12, -day)
frac += extra + err12
return day, frac
def quantity_day_frac(val1, val2=None):
"""Like ``day_frac``, but for quantities with units of time.
The quantities are separately converted to days. Here, we need to take
care with the conversion since while the routines here can do accurate
multiplication, the conversion factor itself may not be accurate. For
instance, if the quantity is in seconds, the conversion factor is
1./86400., which is not exactly representable as a float.
To work around this, for conversion factors less than unity, rather than
multiply by that possibly inaccurate factor, the value is divided by the
conversion factor of a day to that unit (i.e., by 86400. for seconds). For
conversion factors larger than 1, such as 365.25 for years, we do just
multiply. With this scheme, one has precise conversion factors for all
regular time units that astropy defines. Note, however, that it does not
necessarily work for all custom time units, and cannot work when conversion
to time is via an equivalency. For those cases, one remains limited by the
fact that Quantity calculations are done in double precision, not in
quadruple precision as for time.
"""
if val2 is not None:
res11, res12 = quantity_day_frac(val1)
res21, res22 = quantity_day_frac(val2)
# This summation is can at most lose 1 ULP in the second number.
return res11 + res21, res12 + res22
try:
factor = val1.unit.to(u.day)
except Exception:
# Not a simple scaling, so cannot do the full-precision one.
# But at least try normal conversion, since equivalencies may be set.
return val1.to_value(u.day), 0.
if factor >= 1.:
return day_frac(val1.value, 0., factor=factor)
else:
divisor = u.day.to(val1.unit)
return day_frac(val1.value, 0., divisor=divisor)
def two_sum(a, b):
"""
Add ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate sum (with some floating point error)
and the second is the error of the float64 sum.
Using the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
sum, err : float64
Approximate sum of a + b and the exact floating point error
"""
x = a + b
eb = x - a
eb = b - eb
ea = x - b
ea = a - ea
return x, ea + eb
def two_product(a, b):
"""
Multiple ``a`` and ``b`` exactly, returning the result as two float64s.
The first is the approximate product (with some floating point error)
and the second is the error of the float64 product.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
Returns
-------
prod, err : float64
Approximate product a * b and the exact floating point error
"""
x = a * b
ah, al = split(a)
bh, bl = split(b)
y1 = ah * bh
y = x - y1
y2 = al * bh
y -= y2
y3 = ah * bl
y -= y3
y4 = al * bl
y = y4 - y
return x, y
def split(a):
"""
Split float64 in two aligned parts.
Uses the procedure of Shewchuk, 1997,
Discrete & Computational Geometry 18(3):305-363
http://www.cs.berkeley.edu/~jrs/papers/robustr.pdf
"""
c = 134217729. * a # 2**27+1.
abig = c - a
ah = c - abig
al = a - ah
return ah, al
|
569de7c23d180c29b3687e75e5e3545f7a4164763bbffa858fd513baf9785fb5 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitConversionError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from astropy.utils.compat import NUMPY_LT_1_14, NUMPY_LT_1_16, NUMPY_LT_1_17
from astropy.utils.compat.misc import override__dir__
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy.utils.misc import isiterable, InheritDocstrings
from astropy.utils.data_info import ParentDtypeInfo
from astropy import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return '{0.value:}'.format(val)
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Quantity(np.ndarray, metaclass=InheritDocstrings):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
See also: http://docs.astropy.org/en/stable/units/quantity.html
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if not (np.can_cast(np.float32, value.dtype) or
value.dtype.fields):
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{0}" as a {1}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {0!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{1}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(() if value.ndim == 0 else 0),
numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and (not (np.can_cast(np.float32, value.dtype)
or value.dtype.fields)
or value.dtype.kind == 'O'):
value = value.astype(float)
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more, since we require '
'numpy >=1.13. Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = []
for input_, converter in zip(inputs, converters):
input_ = getattr(input_, 'value', input_)
arrays.append(converter(input_) if converter else input_)
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit`
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
return tuple(self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
elif unit is self.unit and self.__class__ is Quantity:
# The second part is because we should not presume what other
# classes want to do for the same unit. E.g., Constant will
# always want to fall back to Quantity, and relies on going
# through `__quantity_subclass__`.
quantity_subclass = Quantity
else:
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{0} instances require {1} units, not {2} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[]):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
return self._new_view(self._to_value(unit, equivalencies), unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
return value if self.shape else (value[()] if self.dtype.fields
else value.item())
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{0}' object has no '{1}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{0} instance has no attribute '{1}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality needs to be handled explicitly as ndarray.__eq__ gives
# DeprecationWarnings on any error, which is distracting. On the other
# hand, for structured arrays, the ufunc does not work, so we do use
# __eq__ and live with the warnings.
def __eq__(self, other):
try:
if self.dtype.kind == 'V':
return super().__eq__(other)
else:
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
if self.dtype.kind == 'V':
return super().__ne__(other)
else:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Unit conversion operator (<<).
def __lshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
def __ilshift__(self, other):
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
try:
factor = self.unit._to(other)
except UnitConversionError:
# Maybe via equivalencies? Now we do make a temporary copy.
try:
value = self._to_value(other)
except UnitConversionError:
return NotImplemented
self.view(np.ndarray)[...] = value
else:
self.view(np.ndarray)[...] *= factor
self._set_unit(other)
return self
def __rlshift__(self, other):
if not self.isscalar:
return NotImplemented
return Unit(self).__rlshift__(other)
# Give warning for other >> self, since probably other << self was meant.
def __rrshift__(self, other):
warnings.warn(">> is not implemented. Did you mean to convert "
"something to this quantity as a unit using '<<'?",
AstropyWarning)
return NotImplemented
# Also define __rshift__ and __irshift__ so we override default ndarray
# behaviour, but instead of emitting a warning here, let it be done by
# other (which likely is a unit if this was a mistake).
def __rshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# For Py>=3.5
if NUMPY_LT_1_16:
def __matmul__(self, other):
result_unit = self.unit * getattr(other, 'unit',
dimensionless_unscaled)
result_array = np.matmul(self.value,
getattr(other, 'value', other))
return self._new_view(result_array, result_unit)
def __rmatmul__(self, other):
result_unit = self.unit * getattr(other, 'unit',
dimensionless_unscaled)
result_array = np.matmul(getattr(other, 'value', other),
self.value)
return self._new_view(result_array, result_unit)
# In numpy 1.13, 1.14, a np.positive ufunc exists, but ndarray.__pos__
# does not go through it, so we define it, to allow subclasses to override
# it inside __array_ufunc__. This can be removed if a solution to
# https://github.com/numpy/numpy/issues/9081 is merged.
def __pos__(self):
"""Plus the quantity."""
return np.positive(self)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
# TODO: we may want to add a hook for dimensionless quantities?
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
def to_string(self, unit=None, precision=None, format=None, subfmt=None):
"""
Generate a string representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Parameters
----------
unit : `~astropy.units.UnitBase`, optional
Specifies the unit. If not provided,
the unit used to initialize the quantity will be used.
precision : numeric, optional
The level of decimal precision. If `None`, or not provided,
it will be determined from NumPy print options.
format : str, optional
The format of the result. If not provided, an unadorned
string is returned. Supported values are:
- 'latex': Return a LaTeX-formatted string
subfmt : str, optional
Subformat of the result. For the moment,
only used for format="latex". Supported values are:
- 'inline': Use ``$ ... $`` as delimiters.
- 'display': Use ``$\\displaystyle ... $`` as delimiters.
Returns
-------
lstr
A string with the contents of this Quantity
"""
if unit is not None and unit != self.unit:
return self.to(unit).to_string(
unit=None, precision=precision, format=format, subfmt=subfmt)
formats = {
None: None,
"latex": {
None: ("$", "$"),
"inline": ("$", "$"),
"display": (r"$\displaystyle ", r"$"),
},
}
if format not in formats:
raise ValueError("Unknown format '{0}'".format(format))
elif format is None:
return '{0}{1:s}'.format(self.value, self._unitstr)
# else, for the moment we assume format="latex"
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(
precision if precision is not None else pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
def complex_formatter(value):
return '({0}{1}i)'.format(
Latex.format_exponential_notation(value.real,
format_spec=format_spec),
Latex.format_exponential_notation(value.imag,
format_spec='+' + format_spec))
try:
formatter = {'float_kind': float_formatter,
'complex_kind': complex_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
if NUMPY_LT_1_14: # style deprecated in 1.14
latex_value = np.array2string(
self.view(np.ndarray),
style=(float_formatter if self.dtype.kind == 'f'
else complex_formatter if self.dtype.kind == 'c'
else repr),
max_line_width=np.inf, separator=',~')
else:
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
delimiter_left, delimiter_right = formats[format][subfmt]
return r'{left}{0} \; {1}{right}'.format(latex_value, latex_unit,
left=delimiter_left,
right=delimiter_right)
def __str__(self):
return self.to_string()
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
sep = ',' if NUMPY_LT_1_14 else ', '
arrstr = np.array2string(self.view(np.ndarray), separator=sep,
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# NOTE: This should change to display format in a future release
return self.to_string(format='latex', subfmt='inline')
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format("{0}{1:s}".format(value, self._unitstr),
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.list()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
as_quantity = Quantity(value)
try:
_value = as_quantity.to_value(self.unit)
except UnitsError:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(as_quantity.value)):
_value = as_quantity.value
else:
raise
if check_precision:
# If, e.g., we are casting double to float, we want to fail if
# precision is lost, but let things pass if it works.
_value = np.array(_value, copy=False)
if not np.can_cast(_value.dtype, self.dtype):
self_dtype_array = np.array(_value, self.dtype)
if not np.all(np.logical_or(self_dtype_array == _value,
np.isnan(_value))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# repeat, sort, compress, diagonal OK
def take(self, indices, axis=None, out=None, mode='raise'):
out = super().take(indices, axis=axis, out=out, mode=mode)
# For single elements, ndarray.take returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
if NUMPY_LT_1_17:
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{0} instances require units equivalent to '{1}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
", so cannot set it to '{0}'.".format(unit)))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`.
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
134f67e704c4d7fc977652535a88ba8100a92964406e9c3a7a076e77d4e39844 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains convenience functions for retrieving solar system
ephemerides from jplephem.
"""
from urllib.parse import urlparse
from collections import OrderedDict
import numpy as np
import os.path
from .sky_coordinate import SkyCoord
from astropy.utils.data import download_file
from astropy.utils.decorators import classproperty
from astropy.utils.state import ScienceState
from astropy.utils import indent
from astropy import units as u
from astropy import _erfa as erfa
from astropy.constants import c as speed_of_light
from .representation import CartesianRepresentation
from .orbital_elements import calc_moon
from .builtin_frames import GCRS, ICRS
from .builtin_frames.utils import get_jd12
__all__ = ["get_body", "get_moon", "get_body_barycentric",
"get_body_barycentric_posvel", "solar_system_ephemeris"]
DEFAULT_JPL_EPHEMERIS = 'de430'
"""List of kernel pairs needed to calculate positions of a given object."""
BODY_NAME_TO_KERNEL_SPEC = OrderedDict(
(('sun', [(0, 10)]),
('mercury', [(0, 1), (1, 199)]),
('venus', [(0, 2), (2, 299)]),
('earth-moon-barycenter', [(0, 3)]),
('earth', [(0, 3), (3, 399)]),
('moon', [(0, 3), (3, 301)]),
('mars', [(0, 4)]),
('jupiter', [(0, 5)]),
('saturn', [(0, 6)]),
('uranus', [(0, 7)]),
('neptune', [(0, 8)]),
('pluto', [(0, 9)]))
)
"""Indices to the plan94 routine for the given object."""
PLAN94_BODY_NAME_TO_PLANET_INDEX = OrderedDict(
(('mercury', 1),
('venus', 2),
('earth-moon-barycenter', 3),
('mars', 4),
('jupiter', 5),
('saturn', 6),
('uranus', 7),
('neptune', 8)))
_EPHEMERIS_NOTE = """
You can either give an explicit ephemeris or use a default, which is normally
a built-in ephemeris that does not require ephemeris files. To change
the default to be the JPL ephemeris::
>>> from astropy.coordinates import solar_system_ephemeris
>>> solar_system_ephemeris.set('jpl') # doctest: +SKIP
Use of any JPL ephemeris requires the jplephem package
(https://pypi.python.org/pypi/jplephem).
If needed, the ephemeris file will be downloaded (and cached).
One can check which bodies are covered by a given ephemeris using::
>>> solar_system_ephemeris.bodies
('earth', 'sun', 'moon', 'mercury', 'venus', 'earth-moon-barycenter', 'mars', 'jupiter', 'saturn', 'uranus', 'neptune')
"""[1:-1]
class solar_system_ephemeris(ScienceState):
"""Default ephemerides for calculating positions of Solar-System bodies.
This can be one of the following::
- 'builtin': polynomial approximations to the orbital elements.
- 'de430' or 'de432s': short-cuts for recent JPL dynamical models.
- 'jpl': Alias for the default JPL ephemeris (currently, 'de430').
- URL: (str) The url to a SPK ephemeris in SPICE binary (.bsp) format.
- PATH: (str) File path to a SPK ephemeris in SPICE binary (.bsp) format.
- `None`: Ensure an Exception is raised without an explicit ephemeris.
The default is 'builtin', which uses the ``epv00`` and ``plan94``
routines from the ``erfa`` implementation of the Standards Of Fundamental
Astronomy library.
Notes
-----
Any file required will be downloaded (and cached) when the state is set.
The default Satellite Planet Kernel (SPK) file from NASA JPL (de430) is
~120MB, and covers years ~1550-2650 CE [1]_. The smaller de432s file is
~10MB, and covers years 1950-2050 [2]_. Older versions of the JPL
ephemerides (such as the widely used de200) can be used via their URL [3]_.
.. [1] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de430-de431.txt
.. [2] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/aareadme_de432s.txt
.. [3] http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/a_old_versions/
"""
_value = 'builtin'
_kernel = None
@classmethod
def validate(cls, value):
# make no changes if value is None
if value is None:
return cls._value
# Set up Kernel; if the file is not in cache, this will download it.
cls.get_kernel(value)
return value
@classmethod
def get_kernel(cls, value):
# ScienceState only ensures the `_value` attribute is up to date,
# so we need to be sure any kernel returned is consistent.
if cls._kernel is None or cls._kernel.origin != value:
if cls._kernel is not None:
cls._kernel.daf.file.close()
cls._kernel = None
kernel = _get_kernel(value)
if kernel is not None:
kernel.origin = value
cls._kernel = kernel
return cls._kernel
@classproperty
def kernel(cls):
return cls.get_kernel(cls._value)
@classproperty
def bodies(cls):
if cls._value is None:
return None
if cls._value.lower() == 'builtin':
return (('earth', 'sun', 'moon') +
tuple(PLAN94_BODY_NAME_TO_PLANET_INDEX.keys()))
else:
return tuple(BODY_NAME_TO_KERNEL_SPEC.keys())
def _get_kernel(value):
"""
Try importing jplephem, download/retrieve from cache the Satellite Planet
Kernel corresponding to the given ephemeris.
"""
if value is None or value.lower() == 'builtin':
return None
try:
from jplephem.spk import SPK
except ImportError:
raise ImportError("Solar system JPL ephemeris calculations require "
"the jplephem package "
"(https://pypi.python.org/pypi/jplephem)")
if value.lower() == 'jpl':
value = DEFAULT_JPL_EPHEMERIS
if value.lower() in ('de430', 'de432s'):
value = ('http://naif.jpl.nasa.gov/pub/naif/generic_kernels'
'/spk/planets/{:s}.bsp'.format(value.lower()))
elif os.path.isfile(value):
return SPK.open(value)
else:
try:
urlparse(value)
except Exception:
raise ValueError('{} was not one of the standard strings and '
'could not be parsed as a file path or URL'.format(value))
return SPK.open(download_file(value, cache=True))
def _get_body_barycentric_posvel(body, time, ephemeris=None,
get_velocity=True):
"""Calculate the barycentric position (and velocity) of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
get_velocity : bool, optional
Whether or not to calculate the velocity as well as the position.
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation` or tuple
Barycentric (ICRS) position or tuple of position and velocity.
Notes
-----
No velocity can be calculated with the built-in ephemeris for the Moon.
Whether or not velocities are calculated makes little difference for the
built-in ephemerides, but for most JPL ephemeris files, the execution time
roughly doubles.
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
if ephemeris is None:
raise ValueError(_EPHEMERIS_NOTE)
kernel = solar_system_ephemeris.kernel
else:
kernel = _get_kernel(ephemeris)
jd1, jd2 = get_jd12(time, 'tdb')
if kernel is None:
body = body.lower()
earth_pv_helio, earth_pv_bary = erfa.epv00(jd1, jd2)
if body == 'earth':
body_pv_bary = earth_pv_bary
elif body == 'moon':
if get_velocity:
raise KeyError("the Moon's velocity cannot be calculated with "
"the '{0}' ephemeris.".format(ephemeris))
return calc_moon(time).cartesian
else:
sun_pv_bary = erfa.pvmpv(earth_pv_bary, earth_pv_helio)
if body == 'sun':
body_pv_bary = sun_pv_bary
else:
try:
body_index = PLAN94_BODY_NAME_TO_PLANET_INDEX[body]
except KeyError:
raise KeyError("{0}'s position and velocity cannot be "
"calculated with the '{1}' ephemeris."
.format(body, ephemeris))
body_pv_helio = erfa.plan94(jd1, jd2, body_index)
body_pv_bary = erfa.pvppv(body_pv_helio, sun_pv_bary)
body_pos_bary = CartesianRepresentation(
body_pv_bary['p'], unit=u.au, xyz_axis=-1, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(
body_pv_bary['v'], unit=u.au/u.day, xyz_axis=-1,
copy=False)
else:
if isinstance(body, str):
# Look up kernel chain for JPL ephemeris, based on name
try:
kernel_spec = BODY_NAME_TO_KERNEL_SPEC[body.lower()]
except KeyError:
raise KeyError("{0}'s position cannot be calculated with "
"the {1} ephemeris.".format(body, ephemeris))
else:
# otherwise, assume the user knows what their doing and intentionally
# passed in a kernel chain
kernel_spec = body
# jplephem cannot handle multi-D arrays, so convert to 1D here.
jd1_shape = getattr(jd1, 'shape', ())
if len(jd1_shape) > 1:
jd1, jd2 = jd1.ravel(), jd2.ravel()
# Note that we use the new jd1.shape here to create a 1D result array.
# It is reshaped below.
body_posvel_bary = np.zeros((2 if get_velocity else 1, 3) +
getattr(jd1, 'shape', ()))
for pair in kernel_spec:
spk = kernel[pair]
if spk.data_type == 3:
# Type 3 kernels contain both position and velocity.
posvel = spk.compute(jd1, jd2)
if get_velocity:
body_posvel_bary += posvel.reshape(body_posvel_bary.shape)
else:
body_posvel_bary[0] += posvel[:4]
else:
# spk.generate first yields the position and then the
# derivative. If no velocities are desired, body_posvel_bary
# has only one element and thus the loop ends after a single
# iteration, avoiding the velocity calculation.
for body_p_or_v, p_or_v in zip(body_posvel_bary,
spk.generate(jd1, jd2)):
body_p_or_v += p_or_v
body_posvel_bary.shape = body_posvel_bary.shape[:2] + jd1_shape
body_pos_bary = CartesianRepresentation(body_posvel_bary[0],
unit=u.km, copy=False)
if get_velocity:
body_vel_bary = CartesianRepresentation(body_posvel_bary[1],
unit=u.km/u.day, copy=False)
return (body_pos_bary, body_vel_bary) if get_velocity else body_pos_bary
def get_body_barycentric_posvel(body, time, ephemeris=None):
"""Calculate the barycentric position and velocity of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position, velocity : tuple of `~astropy.coordinates.CartesianRepresentation`
Tuple of barycentric (ICRS) position and velocity.
See also
--------
get_body_barycentric : to calculate position only.
This is faster by about a factor two for JPL kernels, but has no
speed advantage for the built-in ephemeris.
Notes
-----
The velocity cannot be calculated for the Moon. To just get the position,
use :func:`~astropy.coordinates.get_body_barycentric`.
"""
return _get_body_barycentric_posvel(body, time, ephemeris)
get_body_barycentric_posvel.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body_barycentric(body, time, ephemeris=None):
"""Calculate the barycentric position of a solar system body.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) position of the body in cartesian coordinates
See also
--------
get_body_barycentric_posvel : to calculate both position and velocity.
Notes
-----
"""
return _get_body_barycentric_posvel(body, time, ephemeris,
get_velocity=False)
get_body_barycentric.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _get_apparent_body_position(body, time, ephemeris):
"""Calculate the apparent position of body ``body`` relative to Earth.
This corrects for the light-travel time to the object.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
ephemeris : str, optional
Ephemeris to use. By default, use the one set with
``~astropy.coordinates.solar_system_ephemeris.set``
Returns
-------
cartesian_position : `~astropy.coordinates.CartesianRepresentation`
Barycentric (ICRS) apparent position of the body in cartesian coordinates
"""
if ephemeris is None:
ephemeris = solar_system_ephemeris.get()
# builtin ephemeris and moon is a special case, with no need to account for
# light travel time, since this is already included in the Meeus algorithm
# used.
if ephemeris == 'builtin' and body.lower() == 'moon':
return get_body_barycentric(body, time, ephemeris)
# Calculate position given approximate light travel time.
delta_light_travel_time = 20. * u.s
emitted_time = time
light_travel_time = 0. * u.s
earth_loc = get_body_barycentric('earth', time, ephemeris)
while np.any(np.fabs(delta_light_travel_time) > 1.0e-8*u.s):
body_loc = get_body_barycentric(body, emitted_time, ephemeris)
earth_distance = (body_loc - earth_loc).norm()
delta_light_travel_time = (light_travel_time -
earth_distance/speed_of_light)
light_travel_time = earth_distance/speed_of_light
emitted_time = time - light_travel_time
return get_body_barycentric(body, emitted_time, ephemeris)
_get_apparent_body_position.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_body(body, time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for a solar system body as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
body : str or other
The solar system body for which to calculate positions. Can also be a
kernel specifier (list of 2-tuples) if the ``ephemeris`` is a JPL
kernel.
time : `~astropy.time.Time`
Time of observation.
location : `~astropy.coordinates.EarthLocation`, optional
Location of observer on the Earth. If not given, will be taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the body
Notes
-----
"""
if location is None:
location = time.location
cartrep = _get_apparent_body_position(body, time, ephemeris)
icrs = ICRS(cartrep)
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(time)
gcrs = icrs.transform_to(GCRS(obstime=time,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
else:
gcrs = icrs.transform_to(GCRS(obstime=time))
return SkyCoord(gcrs)
get_body.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def get_moon(time, location=None, ephemeris=None):
"""
Get a `~astropy.coordinates.SkyCoord` for the Earth's Moon as observed
from a location on Earth in the `~astropy.coordinates.GCRS` reference
system.
Parameters
----------
time : `~astropy.time.Time`
Time of observation
location : `~astropy.coordinates.EarthLocation`
Location of observer on the Earth. If none is supplied, taken from
``time`` (if not present, a geocentric observer will be assumed).
ephemeris : str, optional
Ephemeris to use. If not given, use the one set with
``astropy.coordinates.solar_system_ephemeris.set`` (which is
set to 'builtin' by default).
Returns
-------
skycoord : `~astropy.coordinates.SkyCoord`
GCRS Coordinate for the Moon
Notes
-----
"""
return get_body('moon', time, location=location, ephemeris=ephemeris)
get_moon.__doc__ += indent(_EPHEMERIS_NOTE)[4:]
def _apparent_position_in_true_coordinates(skycoord):
"""
Convert Skycoord in GCRS frame into one in which RA and Dec
are defined w.r.t to the true equinox and poles of the Earth
"""
jd1, jd2 = get_jd12(skycoord.obstime, 'tt')
_, _, _, _, _, _, _, rbpn = erfa.pn00a(jd1, jd2)
return SkyCoord(skycoord.frame.realize_frame(
skycoord.cartesian.transform(rbpn)))
|
2e4edd1dda79546c4e9247ccbff96c731b89e382043a9e564c66ea28aa975e62 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
This module contains utility functions that are for internal use in
astropy.coordinates.angles. Mainly they are conversions from one format
of data to another.
"""
import os
from warnings import warn
import numpy as np
from .errors import (IllegalHourWarning, IllegalHourError,
IllegalMinuteWarning, IllegalMinuteError,
IllegalSecondWarning, IllegalSecondError)
from astropy.utils import format_exception
from astropy import units as u
TAB_HEADER = """# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file was automatically generated from ply. To re-generate this file,
# remove it from this folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/coordinates
#
# You can then commit the changes to this file.
"""
class _AngleParser:
"""
Parses the various angle formats including:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
This class should not be used directly. Use `parse_angle`
instead.
"""
def __init__(self):
# TODO: in principle, the parser should be invalidated if we change unit
# system (from CDS to FITS, say). Might want to keep a link to the
# unit_registry used, and regenerate the parser/lexer if it changes.
# Alternatively, perhaps one should not worry at all and just pre-
# generate the parser for each release (as done for unit formats).
# For some discussion of this problem, see
# https://github.com/astropy/astropy/issues/5350#issuecomment-248770151
if '_parser' not in _AngleParser.__dict__:
_AngleParser._parser, _AngleParser._lexer = self._make_parser()
@classmethod
def _get_simple_unit_names(cls):
simple_units = set(
u.radian.find_equivalent_units(include_prefix_units=True))
simple_unit_names = set()
# We filter out degree and hourangle, since those are treated
# separately.
for unit in simple_units:
if unit != u.deg and unit != u.hourangle:
simple_unit_names.update(unit.names)
return sorted(simple_unit_names)
@classmethod
def _make_parser(cls):
from astropy.extern.ply import lex, yacc
# List of token names.
tokens = (
'SIGN',
'UINT',
'UFLOAT',
'COLON',
'DEGREE',
'HOUR',
'MINUTE',
'SECOND',
'SIMPLE_UNIT'
)
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.\d*)|(\.\d+))([eE][+-−]?\d+)?'
# The above includes Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
t.value = float(t.value.replace('−', '-'))
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+−-]'
# The above include Unicode "MINUS SIGN" \u2212. It is
# important to include the hyphen last, or the regex will
# treat this as a range.
if t.value == '+':
t.value = 1.0
else:
t.value = -1.0
return t
def t_SIMPLE_UNIT(t):
t.value = u.Unit(t.value)
return t
t_SIMPLE_UNIT.__doc__ = '|'.join(
'(?:{0})'.format(x) for x in cls._get_simple_unit_names())
t_COLON = ':'
t_DEGREE = r'd(eg(ree(s)?)?)?|°'
t_HOUR = r'hour(s)?|h(r)?|ʰ'
t_MINUTE = r'm(in(ute(s)?)?)?|′|\'|ᵐ'
t_SECOND = r's(ec(ond(s)?)?)?|″|\"|ˢ'
# A string containing ignored characters (spaces)
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
"Invalid character at col {0}".format(t.lexpos))
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_lextab.py'))
# Build the lexer
lexer = lex.lex(optimize=True, lextab='angle_lextab',
outputdir=os.path.dirname(__file__))
if not lexer_exists:
cls._add_tab_header('angle_lextab')
def p_angle(p):
'''
angle : hms
| dms
| arcsecond
| arcminute
| simple
'''
p[0] = p[1]
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1.0
def p_ufloat(p):
'''
ufloat : UFLOAT
| UINT
'''
p[0] = float(p[1])
def p_colon(p):
'''
colon : sign UINT COLON ufloat
| sign UINT COLON UINT COLON ufloat
'''
if len(p) == 5:
p[0] = (p[1] * p[2], p[4])
elif len(p) == 7:
p[0] = (p[1] * p[2], p[4], p[6])
def p_spaced(p):
'''
spaced : sign UINT ufloat
| sign UINT UINT ufloat
'''
if len(p) == 4:
p[0] = (p[1] * p[2], p[3])
elif len(p) == 5:
p[0] = (p[1] * p[2], p[3], p[4])
def p_generic(p):
'''
generic : colon
| spaced
| sign UFLOAT
| sign UINT
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[1] * p[2]
def p_hms(p):
'''
hms : sign UINT HOUR
| sign UINT HOUR ufloat
| sign UINT HOUR UINT MINUTE
| sign UINT HOUR UFLOAT MINUTE
| sign UINT HOUR UINT MINUTE ufloat
| sign UINT HOUR UINT MINUTE ufloat SECOND
| generic HOUR
'''
if len(p) == 3:
p[0] = (p[1], u.hourangle)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.hourangle)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.hourangle)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.hourangle)
def p_dms(p):
'''
dms : sign UINT DEGREE
| sign UINT DEGREE ufloat
| sign UINT DEGREE UINT MINUTE
| sign UINT DEGREE UFLOAT MINUTE
| sign UINT DEGREE UINT MINUTE ufloat
| sign UINT DEGREE UINT MINUTE ufloat SECOND
| generic DEGREE
'''
if len(p) == 3:
p[0] = (p[1], u.degree)
elif len(p) == 4:
p[0] = (p[1] * p[2], u.degree)
elif len(p) in (5, 6):
p[0] = ((p[1] * p[2], p[4]), u.degree)
elif len(p) in (7, 8):
p[0] = ((p[1] * p[2], p[4], p[6]), u.degree)
def p_simple(p):
'''
simple : generic
| generic SIMPLE_UNIT
'''
if len(p) == 2:
p[0] = (p[1], None)
else:
p[0] = (p[1], p[2])
def p_arcsecond(p):
'''
arcsecond : generic SECOND
'''
p[0] = (p[1], u.arcsecond)
def p_arcminute(p):
'''
arcminute : generic MINUTE
'''
p[0] = (p[1], u.arcminute)
def p_error(p):
raise ValueError
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'angle_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='angle_parsetab',
outputdir=os.path.dirname(__file__),
write_tables=True)
if not parser_exists:
cls._add_tab_header('angle_parsetab')
return parser, lexer
@classmethod
def _add_tab_header(cls, name):
lextab_file = os.path.join(os.path.dirname(__file__), name + '.py')
with open(lextab_file, 'r') as f:
contents = f.read()
with open(lextab_file, 'w') as f:
f.write(TAB_HEADER)
f.write(contents)
def parse(self, angle, unit, debug=False):
try:
found_angle, found_unit = self._parser.parse(
angle, lexer=self._lexer, debug=debug)
except ValueError as e:
if str(e):
raise ValueError("{0} in angle {1!r}".format(
str(e), angle))
else:
raise ValueError(
"Syntax error parsing angle {0!r}".format(angle))
if unit is None and found_unit is None:
raise u.UnitsError("No unit specified")
return found_angle, found_unit
def _check_hour_range(hrs):
"""
Checks that the given value is in the range (-24, 24).
"""
if np.any(np.abs(hrs) == 24.):
warn(IllegalHourWarning(hrs, 'Treating as 24 hr'))
elif np.any(hrs < -24.) or np.any(hrs > 24.):
raise IllegalHourError(hrs)
def _check_minute_range(m):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(m == 60.):
warn(IllegalMinuteWarning(m, 'Treating as 0 min, +1 hr/deg'))
elif np.any(m < -60.) or np.any(m > 60.):
# "Error: minutes not in range [-60,60) ({0}).".format(min))
raise IllegalMinuteError(m)
def _check_second_range(sec):
"""
Checks that the given value is in the range [0,60]. If the value
is equal to 60, then a warning is raised.
"""
if np.any(sec == 60.):
warn(IllegalSecondWarning(sec, 'Treating as 0 sec, +1 min'))
elif sec is None:
pass
elif np.any(sec < -60.) or np.any(sec > 60.):
# "Error: seconds not in range [-60,60) ({0}).".format(sec))
raise IllegalSecondError(sec)
def check_hms_ranges(h, m, s):
"""
Checks that the given hour, minute and second are all within
reasonable range.
"""
_check_hour_range(h)
_check_minute_range(m)
_check_second_range(s)
return None
def parse_angle(angle, unit=None, debug=False):
"""
Parses an input string value into an angle value.
Parameters
----------
angle : str
A string representing the angle. May be in one of the following forms:
* 01:02:30.43 degrees
* 1 2 0 hours
* 1°2′3″
* 1d2m3s
* -1h2m3s
unit : `~astropy.units.UnitBase` instance, optional
The unit used to interpret the string. If ``unit`` is not
provided, the unit must be explicitly represented in the
string, either at the end or as number separators.
debug : bool, optional
If `True`, print debugging information from the parser.
Returns
-------
value, unit : tuple
``value`` is the value as a floating point number or three-part
tuple, and ``unit`` is a `Unit` instance which is either the
unit passed in or the one explicitly mentioned in the input
string.
"""
return _AngleParser().parse(angle, unit, debug=debug)
def degrees_to_dms(d):
"""
Convert a floating-point degree value into a ``(degree, arcminute,
arcsecond)`` tuple.
"""
sign = np.copysign(1.0, d)
(df, d) = np.modf(np.abs(d)) # (degree fraction, degree)
(mf, m) = np.modf(df * 60.) # (minute fraction, minute)
s = mf * 60.
return np.floor(sign * d), sign * np.floor(m), sign * s
def dms_to_degrees(d, m, s=None):
"""
Convert degrees, arcminute, arcsecond to a float degrees value.
"""
_check_minute_range(m)
_check_second_range(s)
# determine sign
sign = np.copysign(1.0, d)
try:
d = np.floor(np.abs(d))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: dms values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", d, m, s))
return sign * (d + m / 60. + s / 3600.)
def hms_to_hours(h, m, s=None):
"""
Convert hour, minute, second to a float hour value.
"""
check_hms_ranges(h, m, s)
# determine sign
sign = np.copysign(1.0, h)
try:
h = np.floor(np.abs(h))
if s is None:
m = np.abs(m)
s = 0
else:
m = np.floor(np.abs(m))
s = np.abs(s)
except ValueError:
raise ValueError(format_exception(
"{func}: HMS values ({1[0]},{2[1]},{3[2]}) could not be "
"converted to numbers.", h, m, s))
return sign * (h + m / 60. + s / 3600.)
def hms_to_degrees(h, m, s):
"""
Convert hour, minute, second to a float degrees value.
"""
return hms_to_hours(h, m, s) * 15.
def hms_to_radians(h, m, s):
"""
Convert hour, minute, second to a float radians value.
"""
return u.degree.to(u.radian, hms_to_degrees(h, m, s))
def hms_to_dms(h, m, s):
"""
Convert degrees, arcminutes, arcseconds to an ``(hour, minute, second)``
tuple.
"""
return degrees_to_dms(hms_to_degrees(h, m, s))
def hours_to_decimal(h):
"""
Convert any parseable hour value into a float value.
"""
from . import angles
return angles.Angle(h, unit=u.hourangle).hour
def hours_to_radians(h):
"""
Convert an angle in Hours to Radians.
"""
return u.hourangle.to(u.radian, h)
def hours_to_hms(h):
"""
Convert an floating-point hour value into an ``(hour, minute,
second)`` tuple.
"""
sign = np.copysign(1.0, h)
(hf, h) = np.modf(np.abs(h)) # (degree fraction, degree)
(mf, m) = np.modf(hf * 60.0) # (minute fraction, minute)
s = mf * 60.0
return (np.floor(sign * h), sign * np.floor(m), sign * s)
def radians_to_degrees(r):
"""
Convert an angle in Radians to Degrees.
"""
return u.radian.to(u.degree, r)
def radians_to_hours(r):
"""
Convert an angle in Radians to Hours.
"""
return u.radian.to(u.hourangle, r)
def radians_to_hms(r):
"""
Convert an angle in Radians to an ``(hour, minute, second)`` tuple.
"""
hours = radians_to_hours(r)
return hours_to_hms(hours)
def radians_to_dms(r):
"""
Convert an angle in Radians to an ``(degree, arcminute,
arcsecond)`` tuple.
"""
degrees = u.radian.to(u.degree, r)
return degrees_to_dms(degrees)
def sexagesimal_to_string(values, precision=None, pad=False, sep=(':',),
fields=3):
"""
Given an already separated tuple of sexagesimal values, returns
a string.
See `hours_to_string` and `degrees_to_string` for a higher-level
interface to this functionality.
"""
# Check to see if values[0] is negative, using np.copysign to handle -0
sign = np.copysign(1.0, values[0])
# If the coordinates are negative, we need to take the absolute values.
# We use np.abs because abs(-0) is -0
# TODO: Is this true? (MHvK, 2018-02-01: not on my system)
values = [np.abs(value) for value in values]
if pad:
if sign == -1:
pad = 3
else:
pad = 2
else:
pad = 0
if not isinstance(sep, tuple):
sep = tuple(sep)
if fields < 1 or fields > 3:
raise ValueError(
"fields must be 1, 2, or 3")
if not sep: # empty string, False, or None, etc.
sep = ('', '', '')
elif len(sep) == 1:
if fields == 3:
sep = sep + (sep[0], '')
elif fields == 2:
sep = sep + ('', '')
else:
sep = ('', '', '')
elif len(sep) == 2:
sep = sep + ('',)
elif len(sep) != 3:
raise ValueError(
"Invalid separator specification for converting angle to string.")
# Simplify the expression based on the requested precision. For
# example, if the seconds will round up to 60, we should convert
# it to 0 and carry upwards. If the field is hidden (by the
# fields kwarg) we round up around the middle, 30.0.
if precision is None:
rounding_thresh = 60.0 - (10.0 ** -4)
else:
rounding_thresh = 60.0 - (10.0 ** -precision)
if fields == 3 and values[2] >= rounding_thresh:
values[2] = 0.0
values[1] += 1.0
elif fields < 3 and values[2] >= 30.0:
values[1] += 1.0
if fields >= 2 and values[1] >= 60.0:
values[1] = 0.0
values[0] += 1.0
elif fields < 2 and values[1] >= 30.0:
values[0] += 1.0
literal = []
last_value = ''
literal.append('{0:0{pad}.0f}{sep[0]}')
if fields >= 2:
literal.append('{1:02d}{sep[1]}')
if fields == 3:
if precision is None:
last_value = '{0:.4f}'.format(abs(values[2]))
last_value = last_value.rstrip('0').rstrip('.')
else:
last_value = '{0:.{precision}f}'.format(
abs(values[2]), precision=precision)
if len(last_value) == 1 or last_value[1] == '.':
last_value = '0' + last_value
literal.append('{last_value}{sep[2]}')
literal = ''.join(literal)
return literal.format(np.copysign(values[0], sign),
int(values[1]), values[2],
sep=sep, pad=pad,
last_value=last_value)
def hours_to_string(h, precision=5, pad=False, sep=('h', 'm', 's'),
fields=3):
"""
Takes a decimal hour value and returns a string formatted as hms with
separator specified by the 'sep' parameter.
``h`` must be a scalar.
"""
h, m, s = hours_to_hms(h)
return sexagesimal_to_string((h, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def degrees_to_string(d, precision=5, pad=False, sep=':', fields=3):
"""
Takes a decimal hour value and returns a string formatted as dms with
separator specified by the 'sep' parameter.
``d`` must be a scalar.
"""
d, m, s = degrees_to_dms(d)
return sexagesimal_to_string((d, m, s), precision=precision, pad=pad,
sep=sep, fields=fields)
def angular_separation(lon1, lat1, lon2, lat2):
"""
Angular separation between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
angular separation : `~astropy.units.Quantity` or float
Type depends on input; `Quantity` in angular units, or float in
radians.
Notes
-----
The angular separation is calculated using the Vincenty formula [1]_,
which is slightly more complex and computationally expensive than
some alternatives, but is stable at at all distances, including the
poles and antipodes.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
sdlon = np.sin(lon2 - lon1)
cdlon = np.cos(lon2 - lon1)
slat1 = np.sin(lat1)
slat2 = np.sin(lat2)
clat1 = np.cos(lat1)
clat2 = np.cos(lat2)
num1 = clat2 * sdlon
num2 = clat1 * slat2 - slat1 * clat2 * cdlon
denominator = slat1 * slat2 + clat1 * clat2 * cdlon
return np.arctan2(np.hypot(num1, num2), denominator)
def position_angle(lon1, lat1, lon2, lat2):
"""
Position Angle (East of North) between two points on a sphere.
Parameters
----------
lon1, lat1, lon2, lat2 : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the two points. Quantities should be in
angular units; floats in radians.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from position 1 to
position 2. If any of the angles are arrays, this will contain an array
following the appropriate `numpy` broadcasting rules.
"""
from .angles import Angle
deltalon = lon2 - lon1
colat = np.cos(lat2)
x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(deltalon)
y = np.sin(deltalon) * colat
return Angle(np.arctan2(y, x), u.radian).wrap_at(360*u.deg)
def offset_by(lon, lat, posang, distance):
"""
Point with the given offset from the given point.
Parameters
----------
lon, lat, posang, distance : `Angle`, `~astropy.units.Quantity` or float
Longitude and latitude of the starting point,
position angle and distance to the final point.
Quantities should be in angular units; floats in radians.
Polar points at lat= +/-90 are treated as limit of +/-(90-epsilon) and same lon.
Returns
-------
lon, lat : `~astropy.coordinates.Angle`
The position of the final point. If any of the angles are arrays,
these will contain arrays following the appropriate `numpy` broadcasting rules.
0 <= lon < 2pi.
Notes
-----
"""
from .angles import Angle
# Calculations are done using the spherical trigonometry sine and cosine rules
# of the triangle A at North Pole, B at starting point, C at final point
# with angles A (change in lon), B (posang), C (not used, but negative reciprocal posang)
# with sides a (distance), b (final co-latitude), c (starting colatitude)
# B, a, c are knowns; A and b are unknowns
# https://en.wikipedia.org/wiki/Spherical_trigonometry
cos_a = np.cos(distance)
sin_a = np.sin(distance)
cos_c = np.sin(lat)
sin_c = np.cos(lat)
cos_B = np.cos(posang)
sin_B = np.sin(posang)
# cosine rule: Know two sides: a,c and included angle: B; get unknown side b
cos_b = cos_c * cos_a + sin_c * sin_a * cos_B
# sin_b = np.sqrt(1 - cos_b**2)
# sine rule and cosine rule for A (using both lets arctan2 pick quadrant).
# multiplying both sin_A and cos_A by x=sin_b * sin_c prevents /0 errors
# at poles. Correct for the x=0 multiplication a few lines down.
# sin_A/sin_a == sin_B/sin_b # Sine rule
xsin_A = sin_a * sin_B * sin_c
# cos_a == cos_b * cos_c + sin_b * sin_c * cos_A # cosine rule
xcos_A = cos_a - cos_b * cos_c
A = Angle(np.arctan2(xsin_A, xcos_A), u.radian)
# Treat the poles as if they are infinitesimally far from pole but at given lon
small_sin_c = sin_c < 1e-12
if small_sin_c.any():
# For south pole (cos_c = -1), A = posang; for North pole, A=180 deg - posang
A_pole = (90*u.deg + cos_c*(90*u.deg-Angle(posang, u.radian))).to(u.rad)
if A.shape:
# broadcast to ensure the shape is like that of A, which is also
# affected by the (possible) shapes of lat, posang, and distance.
small_sin_c = np.broadcast_to(small_sin_c, A.shape)
A[small_sin_c] = A_pole[small_sin_c]
else:
A = A_pole
outlon = (Angle(lon, u.radian) + A).wrap_at(360.0*u.deg).to(u.deg)
outlat = Angle(np.arcsin(cos_b), u.radian).to(u.deg)
return outlon, outlat
|
46a0e4302697618c642e52cc1357797e43f3b70df697ae6f9246ddbe76d0575c | """Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from functools import wraps
from astropy.config.paths import set_temp_config, set_temp_cache
from astropy.utils import find_current_module
from astropy.utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super().__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
This method builds arguments for and then calls ``pytest.main``.
Parameters
----------
{keywords}
"""
_required_dependancies = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
_missing_dependancy_error = "Test dependencies are missing. You should install the 'pytest-astropy' package."
@classmethod
def _has_test_dependencies(cls): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
for module in cls._required_dependancies:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
raise RuntimeError(cls._missing_dependancy_error)
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
self._has_test_dependencies() # pragma: no cover
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if kwargs.get('plugins', None) is not None:
plugins = kwargs.pop('plugins')
elif self.keywords.get('plugins', None) is not None:
plugins = self.keywords['plugins']
else:
plugins = []
# Override the config locations to not make a new directory nor use
# existing cache or config. Note that we need to do this here in
# addition to in conftest.py - for users running tests interactively
# in e.g. IPython, conftest.py would get read in too late, so we need
# to do it here - but at the same time the code here doesn't work when
# running tests in parallel mode because this uses subprocesses which
# don't know about the temporary config/cache.
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=plugins)
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
test.__test__ = False
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
def packages_path(self, packages, base_path, error=None, warning=None):
"""
Generates the path for multiple packages.
Parameters
----------
packages : str
Comma separated string of packages.
base_path : str
Base path to the source code or documentation.
error : str
Error message to be raised as ``ValueError``. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No error is raised if `None`. (Default: `None`)
warning : str
Warning message to be issued. Individual package
name and path can be accessed by ``{name}`` and ``{path}``
respectively. No warning is issues if `None`. (Default: `None`)
Returns
-------
paths : list of str
List of stings of existing package paths.
"""
packages = packages.split(",")
paths = []
for package in packages:
path = os.path.join(
base_path, package.replace('.', os.path.sep))
if not os.path.isdir(path):
info = {'name': package, 'path': path}
if error is not None:
raise ValueError(error.format(**info))
if warning is not None:
warnings.warn(warning.format(**info))
else:
paths.append(path)
return paths
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or
'utils'. Accepts comma separated string to specify multiple
packages. If nothing is specified all default tests are run.
"""
if package is None:
self.package_path = [self.base_path]
else:
error_message = ('package to test is not found: {name} '
'(at path {path}).')
self.package_path = self.packages_path(package, self.base_path,
error=error_message)
if not kwargs['test_path']:
return self.package_path
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
# Check that the extensions are in the path and not at the end to
# support specifying the name of the test, i.e.
# test_quantity.py::test_unit
if not (os.path.isdir(test_path) or ('.py' in test_path or '.rst' in test_path)):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword(default_value=['astropy.tests.plugins.display'])
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
# Plugins are handled independently by `run_tests` so we define this
# keyword just for the docstring
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int or 'auto', optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is ``'auto'``, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
paths = []
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
warning_message = ("Can not test .rst docs for {name}, since "
"docs path ({path}) does not exist.")
paths = self.packages_path(kwargs['package'], docs_path,
warning=warning_message)
elif not kwargs['test_path']:
paths = [docs_path, ]
if len(paths) and not kwargs['test_path']:
paths.append('--doctest-rst')
return paths
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from astropy.table import Table # pylint: disable=W0611
return super().run_tests(**kwargs)
|
1a2467b69cda9c55c3e1b10242f7939eb5f6dccbb4a519f585d612c18df08857 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import numpy as np
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
class CODATA2014(Constant):
default_reference = 'CODATA 2014'
_registry = {}
_has_incompatible_units = set()
class EMCODATA2014(CODATA2014, EMConstant):
_registry = CODATA2014._registry
h = CODATA2014('h', "Planck constant", 6.626070040e-34,
'J s', 0.000000081e-34, system='si')
hbar = CODATA2014('hbar', "Reduced Planck constant", 1.054571800e-34,
'J s', 0.000000013e-34, system='si')
k_B = CODATA2014('k_B', "Boltzmann constant", 1.38064852e-23,
'J / (K)', 0.00000079e-23, system='si')
c = CODATA2014('c', "Speed of light in vacuum", 299792458.,
'm / (s)', 0.0, system='si')
G = CODATA2014('G', "Gravitational constant", 6.67408e-11,
'm3 / (kg s2)', 0.00031e-11, system='si')
g0 = CODATA2014('g0', "Standard acceleration of gravity", 9.80665,
'm / s2', 0.0, system='si')
m_p = CODATA2014('m_p', "Proton mass", 1.672621898e-27,
'kg', 0.000000021e-27, system='si')
m_n = CODATA2014('m_n', "Neutron mass", 1.674927471e-27,
'kg', 0.000000021e-27, system='si')
m_e = CODATA2014('m_e', "Electron mass", 9.10938356e-31,
'kg', 0.00000011e-31, system='si')
u = CODATA2014('u', "Atomic mass", 1.660539040e-27,
'kg', 0.000000020e-27, system='si')
sigma_sb = CODATA2014('sigma_sb', "Stefan-Boltzmann constant", 5.670367e-8,
'W / (K4 m2)', 0.000013e-8, system='si')
e = EMCODATA2014('e', 'Electron charge', 1.6021766208e-19,
'C', 0.0000000098e-19, system='si')
eps0 = EMCODATA2014('eps0', 'Electric constant', 8.854187817e-12,
'F/m', 0.0, system='si')
N_A = CODATA2014('N_A', "Avogadro's number", 6.022140857e23,
'1 / (mol)', 0.000000074e23, system='si')
R = CODATA2014('R', "Gas constant", 8.3144598,
'J / (K mol)', 0.0000048, system='si')
Ryd = CODATA2014('Ryd', 'Rydberg constant', 10973731.568508,
'1 / (m)', 0.000065, system='si')
a0 = CODATA2014('a0', "Bohr radius", 0.52917721067e-10,
'm', 0.00000000012e-10, system='si')
muB = CODATA2014('muB', "Bohr magneton", 927.4009994e-26,
'J/T', 0.00002e-26, system='si')
alpha = CODATA2014('alpha', "Fine-structure constant", 7.2973525664e-3,
'', 0.0000000017e-3, system='si')
atm = CODATA2014('atm', "Standard atmosphere", 101325,
'Pa', 0.0, system='si')
mu0 = CODATA2014('mu0', "Magnetic constant", 4.0e-7 * np.pi, 'N/A2', 0.0,
system='si')
sigma_T = CODATA2014('sigma_T', "Thomson scattering cross-section",
0.66524587158e-28, 'm2', 0.00000000091e-28,
system='si')
b_wien = CODATA2014('b_wien', 'Wien wavelength displacement law constant',
2.8977729e-3, 'm K', 0.0000017e-3, system='si')
# cgs constants
# Only constants that cannot be converted directly from S.I. are defined here.
e_esu = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0,
'statC', e.uncertainty * c.value * 10.0, system='esu')
e_emu = EMCODATA2014(e.abbrev, e.name, e.value / 10, 'abC',
e.uncertainty / 10, system='emu')
e_gauss = EMCODATA2014(e.abbrev, e.name, e.value * c.value * 10.0,
'Fr', e.uncertainty * c.value * 10.0, system='gauss')
|
b204b3f802f764be09a477ce14d248e36047e03d22e460f03a0403c7f5571641 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains astronomical and physical constants for use in Astropy or other
places.
A typical use case might be::
>>> from astropy.constants import c, m_e
>>> # ... define the mass of something you want the rest energy of as m ...
>>> m = m_e
>>> E = m * c**2
>>> E.to('MeV') # doctest: +FLOAT_CMP
<Quantity 0.510998927603161 MeV>
"""
import warnings
from contextlib import contextmanager
from astropy.utils import find_current_module
# Hack to make circular imports with units work
from astropy import units
del units
# These lines import some namespaces into the top level
from .constant import Constant, EMConstant # noqa
from . import si # noqa
from . import cgs # noqa
from .config import codata, iaudata
from . import utils as _utils
# for updating the constants module docstring
_lines = [
'The following constants are available:\n',
'========== ============== ================ =========================',
' Name Value Unit Description',
'========== ============== ================ =========================',
]
# Catch warnings about "already has a definition in the None system"
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'Constant .*already has a definition')
_utils._set_c(codata, iaudata, find_current_module(),
not_in_module_only=False, doclines=_lines, set_class=True)
_lines.append(_lines[1])
if __doc__ is not None:
__doc__ += '\n'.join(_lines)
# TODO: Re-implement in a way that is more consistent with astropy.units.
# See https://github.com/astropy/astropy/pull/7008 discussions.
@contextmanager
def set_enabled_constants(modname):
"""
Context manager to temporarily set values in the ``constants``
namespace to an older version.
See :ref:`astropy-constants-prior` for usage.
Parameters
----------
modname : {'astropyconst13'}
Name of the module containing an older version.
"""
# Re-import here because these were deleted from namespace on init.
import importlib
import warnings
from astropy.utils import find_current_module
from . import utils as _utils
try:
modmodule = importlib.import_module('.constants.' + modname, 'astropy')
codata_context = modmodule.codata
iaudata_context = modmodule.iaudata
except ImportError as exc:
exc.args += ('Context manager does not currently handle {}'
.format(modname),)
raise
module = find_current_module()
# Ignore warnings about "Constant xxx already has a definition..."
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Constant .*already has a definition')
_utils._set_c(codata_context, iaudata_context, module,
not_in_module_only=False, set_class=True)
try:
yield
finally:
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
'Constant .*already has a definition')
_utils._set_c(codata, iaudata, module,
not_in_module_only=False, set_class=True)
# Clean up namespace
del find_current_module
del warnings
del contextmanager
del _utils
del _lines
|
0d2497df52f4a6edcb2eaa63849922bcd5affd535d9f7421022a879a66a8f87f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v1.3 and earlier.
See :mod:`astropy.constants` for a complete listing of constants
defined in Astropy.
"""
from astropy.utils import find_current_module
from . import utils as _utils
from . import codata2010, iau2012
codata = codata2010
iaudata = iau2012
_utils._set_c(codata, iaudata, find_current_module())
# Clean up namespace
del find_current_module
del _utils
|
d69a5b75c154b28f5e845b688eea92e264c3f911f566e5c1e24f247de81b4812 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Configures the codata and iaudata used, possibly using user configuration.
"""
# Note: doing this in __init__ causes import problems with units,
# as si.py and cgs.py have to import the result.
import importlib
import astropy
phys_version = astropy.physical_constants.get()
astro_version = astropy.astronomical_constants.get()
codata = importlib.import_module('.constants.' + phys_version, 'astropy')
iaudata = importlib.import_module('.constants.' + astro_version, 'astropy')
|
e1bdc518550c178fe7ec38835af77de8a223211b8b78d01747bee6bd1b34fab7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v2.0. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
from astropy.utils import find_current_module
from . import utils as _utils
from . import codata2014, iau2015
codata = codata2014
iaudata = iau2015
_utils._set_c(codata, iaudata, find_current_module())
# Clean up namespace
del find_current_module
del _utils
|
1e46b78e34e0db15ac1cafd09df15a552f68893eddd6af075ba6597c9275d1fc | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in cgs units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import itertools
from .constant import Constant
from .config import codata, iaudata
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if (isinstance(_c, Constant) and _c.abbrev not in locals()
and _c.system in ['esu', 'gauss', 'emu']):
locals()[_c.abbrev] = _c
|
d659d750be5df7f8966cbf5df92a9ce364d83957182e42f2cc6397a59db4f264 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import itertools
from .constant import Constant
from .config import codata, iaudata
for _nm, _c in itertools.chain(sorted(vars(codata).items()),
sorted(vars(iaudata).items())):
if (isinstance(_c, Constant) and _c.abbrev not in locals()
and _c.system == 'si'):
locals()[_c.abbrev] = _c
|
453e41a26e7540d60b74f43c14fc3ff9aff57ea717775d1649dd541933ed4357 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants in SI units. See :mod:`astropy.constants`
for a complete listing of constants defined in Astropy.
"""
import math
from .constant import Constant, EMConstant
# PHYSICAL CONSTANTS
# https://en.wikipedia.org/wiki/2019_redefinition_of_SI_base_units
class CODATA2018(Constant):
default_reference = 'CODATA 2018'
_registry = {}
_has_incompatible_units = set()
class EMCODATA2018(CODATA2018, EMConstant):
_registry = CODATA2018._registry
h = CODATA2018('h', "Planck constant", 6.62607015e-34,
'J s', 0.0, system='si')
hbar = CODATA2018('hbar', "Reduced Planck constant", h.value / (2 * math.pi),
'J s', 0.0, system='si')
k_B = CODATA2018('k_B', "Boltzmann constant", 1.380649e-23,
'J / (K)', 0.0, system='si')
c = CODATA2018('c', "Speed of light in vacuum", 299792458.,
'm / (s)', 0.0, system='si')
G = CODATA2018('G', "Gravitational constant", 6.67430e-11,
'm3 / (kg s2)', 0.00015e-11, system='si')
g0 = CODATA2018('g0', "Standard acceleration of gravity", 9.80665,
'm / s2', 0.0, system='si')
m_p = CODATA2018('m_p', "Proton mass", 1.67262192369e-27,
'kg', 0.00000000051e-27, system='si')
m_n = CODATA2018('m_n', "Neutron mass", 1.67492749804e-27,
'kg', 0.00000000095e-27, system='si')
m_e = CODATA2018('m_e', "Electron mass", 9.1093837015e-31,
'kg', 0.0000000028e-31, system='si')
u = CODATA2018('u', "Atomic mass", 1.66053906660e-27,
'kg', 0.00000000050e-27, system='si')
sigma_sb = CODATA2018(
'sigma_sb', "Stefan-Boltzmann constant",
2 * math.pi ** 5 * k_B.value ** 4 / (15 * h.value ** 3 * c.value ** 2),
'W / (K4 m2)', 0.0, system='si')
e = EMCODATA2018('e', 'Electron charge', 1.602176634e-19,
'C', 0.0, system='si')
eps0 = EMCODATA2018('eps0', 'Vacuum electric permittivity', 8.8541878128e-12,
'F/m', 0.0000000013e-12, system='si')
N_A = CODATA2018('N_A', "Avogadro's number", 6.02214076e23,
'1 / (mol)', 0.0, system='si')
R = CODATA2018('R', "Gas constant", k_B.value * N_A.value,
'J / (K mol)', 0.0, system='si')
Ryd = CODATA2018('Ryd', 'Rydberg constant', 10973731.568160,
'1 / (m)', 0.000021, system='si')
a0 = CODATA2018('a0', "Bohr radius", 5.29177210903e-11,
'm', 0.00000000080e-11, system='si')
muB = CODATA2018('muB', "Bohr magneton", 9.2740100783e-24,
'J/T', 0.0000000028e-24, system='si')
alpha = CODATA2018('alpha', "Fine-structure constant", 7.2973525693e-3,
'', 0.0000000011e-3, system='si')
atm = CODATA2018('atm', "Standard atmosphere", 101325,
'Pa', 0.0, system='si')
mu0 = CODATA2018('mu0', "Vacuum magnetic permeability", 1.25663706212e-6,
'N/A2', 0.00000000019e-6, system='si')
sigma_T = CODATA2018('sigma_T', "Thomson scattering cross-section",
6.6524587321e-29, 'm2', 0.0000000060e-29,
system='si')
# Formula taken from NIST wall chart.
# The numerical factor is from a numerical solution to the equation for the
# maximum. See https://en.wikipedia.org/wiki/Wien%27s_displacement_law
b_wien = CODATA2018('b_wien', 'Wien wavelength displacement law constant',
h.value * c.value / (k_B.value * 4.965114231744276), 'm K',
0.0, system='si')
# CGS constants.
# Only constants that cannot be converted directly from S.I. are defined here.
# Because both e and c are exact, these are also exact by definition.
e_esu = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0,
'statC', 0.0, system='esu')
e_emu = EMCODATA2018(e.abbrev, e.name, e.value / 10, 'abC',
0.0, system='emu')
e_gauss = EMCODATA2018(e.abbrev, e.name, e.value * c.value * 10.0,
'Fr', 0.0, system='gauss')
|
48107bd491f70eb4083b75ed475ae6437a63dd3166ee9c4b473402d88fff3a5b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from astropy.utils.data import get_pkg_data_filename
from astropy import table
from astropy import units as u
from astropy.time import Time, TimeDelta
from .conftest import MaskedTable, MIXIN_COLS
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([(str('a'), 'int'),
(str('b'), 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc'))
assert t.colnames == ['aa', 'bb', 'cc']
t.rename_columns(['bb', 'cc'], ['b', 'c'])
assert t.colnames == ['aa', 'b', 'c']
with pytest.raises(TypeError):
t.rename_columns(('aa'), ['a'])
with pytest.raises(ValueError):
t.rename_columns(['a'], ['b', 'c'])
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a', reverse=True)
assert np.all(t['a'] == np.array([3, 2, 1]))
assert np.all(t['b'] == np.array([4, 6, 5]))
assert np.all(t['c'] == np.array([[4, 5],
[1, 2],
[3, 4]]))
t.sort('b', reverse=True)
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
assert np.all(t['c'] == np.array([[1, 2],
[3, 4],
[4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
@pytest.mark.parametrize('reverse', [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1', reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'], reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(['b', 'a'], reverse=True)
assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(('a', 'b'), reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort('a', reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[(str('c'), 'i8'), (str('d'), 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_equality():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize('uni', ['питона', 'ascii'])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. Thi
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode('utf-8')
t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t['col0'].description = 'col0'
t['col1'].description = 'col1'
t['col0'].meta['val'] = 'val0'
t['col1'].meta['val'] = 'val1'
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1['col0'])[0] == byt
assert np.array(t1['col1'])[0] == byt
assert np.array(t1['col2'])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1['col0'])[0] == uni
assert np.array(t1['col1'])[0] == uni
assert np.array(t1['col2'])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError) as exc:
t.to_pandas()
assert (exc.value.args[0] ==
"Cannot convert a table with multi-dimensional columns "
"to a pandas DataFrame. Offending columns are: ['b']")
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if name != 'ndarray':
t[name] = MIXIN_COLS[name]
t['dt'] = TimeDelta([0, 2, 4, 6], format='sec')
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2['quantity'], [0, 1, 2, 3])
assert np.allclose(t2['longitude'], [0., 1., 5., 6.])
assert np.allclose(t2['latitude'], [5., 6., 10., 11.])
assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3])
assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3])
assert np.allclose(t2['arraywrap'], [0, 1, 2, 3])
assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2['time'], Time)
assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003])
assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000',
'2000-12-31T18:00:00.000',
'2002-01-01T00:00:00.000',
'2003-01-01T06:00:00.000'])
assert t2['time'].format == 'isot'
# TimeDelta
assert isinstance(t2['dt'], TimeDelta)
assert np.allclose(t2['dt'].value, [0, 2, 4, 6])
assert t2['dt'].format == 'sec'
def test_to_pandas_index(self):
import pandas as pd
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'],
dtype='datetime64[ns]',
name='tm', freq=None)
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.QTable([tm, x], names=['tm', 'x'])
tp = t.to_pandas()
assert np.all(tp.index == row_index)
tp = t.to_pandas(index='tm')
assert np.all(tp.index == tm_index)
t.add_index('tm')
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t['tm'].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index='not a column')
assert 'index must be None, False' in str(err)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format='cxcsec')
dt = TimeDelta([1, 2, 3], format='sec')
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=['tm', 'dt'])
tp = t.to_pandas()
assert np.all(tp['tm'].isnull() == [False, True, False])
assert np.all(tp['dt'].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2['tm'].mask == tm.mask)
assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2['dt'].mask == dt.mask)
assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.Table([tm, x], names=['tm', 'x'])
tp = t.to_pandas(index='tm')
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ['x']
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ['tm', 'x']
assert np.allclose(t2['tm'].jyear, tm.jyear)
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t['Source'] = [2584290278794471936, 2584290038276303744,
2584288728310999296]
t['Source'].mask = [False, False, False]
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
if np.any(column.mask):
assert t2[name].dtype.kind == 'f'
else:
assert t2[name].dtype.kind == 'i'
assert_array_equal(column.data,
t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError):
t.replace_column('not there', [1, 2, 3])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.MaskedColumn
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.MaskedColumn)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
meta = OrderedDict([('c', 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] is 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=['col'])
row = t1[-1]
t2 = table.Table(row)['col']
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for cheking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
# Creating a table with three columns
t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'),
meta={'name': 'first table'},
dtype=('i4', 'f8', 'S1'))
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.), (4, 5.), (5, 8.2)],
dtype=[('a', '<i4'), ('b', '<f8')])
# Values fo sliced column c is stored in a numpy array
b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=['a', 'b']))
assert np.array_equal(b, t1.as_array(names=['c']))
def test_tolist():
t = table.Table([[1, 2, 3], [1.1, 2.2, 3.3], [b'foo', b'bar', b'hello']],
names=('a', 'b', 'c'))
assert t['a'].tolist() == [1, 2, 3]
assert_array_equal(t['b'].tolist(), [1.1, 2.2, 3.3])
assert t['c'].tolist() == ['foo', 'bar', 'hello']
assert isinstance(t['a'].tolist()[0], int)
assert isinstance(t['b'].tolist()[0], float)
assert isinstance(t['c'].tolist()[0], str)
t = table.Table([[[1, 2], [3, 4]],
[[b'foo', b'bar'], [b'hello', b'world']]],
names=('a', 'c'))
assert t['a'].tolist() == [[1, 2], [3, 4]]
assert t['c'].tolist() == [['foo', 'bar'], ['hello', 'world']]
assert isinstance(t['a'].tolist()[0][0], int)
assert isinstance(t['c'].tolist()[0][0], str)
|
9234fa223cc995b2322dd72eb71c395752fb5b615262c7f1fa6879e6afadb643 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import operator
import pytest
import numpy as np
from astropy.tests.helper import assert_follows_unicode_guidelines, catch_warnings
from astropy import table
from astropy import units as u
class TestColumn():
def test_subclass(self, Column):
c = Column(name='a')
assert isinstance(c, np.ndarray)
c2 = c * 2
assert isinstance(c2, Column)
assert isinstance(c2, np.ndarray)
def test_numpy_ops(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for op, test_equal in ((operator.eq, True),
(operator.ne, False),
(operator.ge, True),
(operator.gt, False),
(operator.le, True),
(operator.lt, False)):
for eq in (op(c, arr), op(arr, c)):
assert np.all(eq) if test_equal else not np.any(eq)
assert len(eq) == 3
if Column is table.Column:
assert type(eq) == np.ndarray
else:
assert type(eq) == np.ma.core.MaskedArray
assert eq.dtype.str == '|b1'
lt = c - 1 < arr
assert np.all(lt)
def test_numpy_boolean_ufuncs(self, Column):
"""Show that basic numpy operations with Column behave sensibly"""
arr = np.array([1, 2, 3])
c = Column(arr, name='a')
for ufunc, test_true in ((np.isfinite, True),
(np.isinf, False),
(np.isnan, False),
(np.sign, True),
(np.signbit, False)):
result = ufunc(c)
assert len(result) == len(c)
assert np.all(result) if test_true else not np.any(result)
if Column is table.Column:
assert type(result) == np.ndarray
else:
assert type(result) == np.ma.core.MaskedArray
if ufunc is not np.sign:
assert result.dtype.str == '|b1'
def test_view(self, Column):
c = np.array([1, 2, 3], dtype=np.int64).view(Column)
assert repr(c) == "<{0} dtype='int64' length=3>\n1\n2\n3".format(Column.__name__)
def test_format(self, Column):
"""Show that the formatted output from str() works"""
from astropy import conf
with conf.set_temp('max_lines', 8):
c1 = Column(np.arange(2000), name='a', dtype=float,
format='%6.2f')
assert str(c1).splitlines() == [' a ',
'-------',
' 0.00',
' 1.00',
' ...',
'1998.00',
'1999.00',
'Length = 2000 rows']
def test_convert_numpy_array(self, Column):
d = Column([1, 2, 3], name='a', dtype='i8')
np_data = np.array(d)
assert np.all(np_data == d)
np_data = np.array(d, copy=False)
assert np.all(np_data == d)
np_data = np.array(d, dtype='i4')
assert np.all(np_data == d)
def test_convert_unit(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
d.convert_unit_to("km")
assert np.all(d.data == [0.001, 0.002, 0.003])
def test_array_wrap(self):
"""Test that the __array_wrap__ method converts a reduction ufunc
output that has a different shape into an ndarray view. Without this a
method call like c.mean() returns a Column array object with length=1."""
# Mean and sum for a 1-d float column
c = table.Column(name='a', data=[1., 2., 3.])
assert np.allclose(c.mean(), 2.0)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 6.)
assert isinstance(c.sum(), (np.floating, float))
# Non-reduction ufunc preserves Column class
assert isinstance(np.cos(c), table.Column)
# Sum for a 1-d int column
c = table.Column(name='a', data=[1, 2, 3])
assert np.allclose(c.sum(), 6)
assert isinstance(c.sum(), (np.integer, int))
# Sum for a 2-d int column
c = table.Column(name='a', data=[[1, 2, 3],
[4, 5, 6]])
assert c.sum() == 21
assert isinstance(c.sum(), (np.integer, int))
assert np.all(c.sum(axis=0) == [5, 7, 9])
assert c.sum(axis=0).shape == (3,)
assert isinstance(c.sum(axis=0), np.ndarray)
# Sum and mean for a 1-d masked column
c = table.MaskedColumn(name='a', data=[1., 2., 3.], mask=[0, 0, 1])
assert np.allclose(c.mean(), 1.5)
assert isinstance(c.mean(), (np.floating, float))
assert np.allclose(c.sum(), 3.)
assert isinstance(c.sum(), (np.floating, float))
def test_name_none(self, Column):
"""Can create a column without supplying name, which defaults to None"""
c = Column([1, 2])
assert c.name is None
assert np.all(c == np.array([1, 2]))
def test_quantity_init(self, Column):
c = Column(data=np.array([1, 2, 3]) * u.m)
assert np.all(c.data == np.array([1, 2, 3]))
assert np.all(c.unit == u.m)
c = Column(data=np.array([1, 2, 3]) * u.m, unit=u.cm)
assert np.all(c.data == np.array([100, 200, 300]))
assert np.all(c.unit == u.cm)
def test_attrs_survive_getitem_after_change(self, Column):
"""
Test for issue #3023: when calling getitem with a MaskedArray subclass
the original object attributes are not copied.
"""
c1 = Column([1, 2, 3], name='a', unit='m', format='%i',
description='aa', meta={'a': 1})
c1.name = 'b'
c1.unit = 'km'
c1.format = '%d'
c1.description = 'bb'
c1.meta = {'bbb': 2}
for item in (slice(None, None), slice(None, 1), np.array([0, 2]),
np.array([False, True, False])):
c2 = c1[item]
assert c2.name == 'b'
assert c2.unit is u.km
assert c2.format == '%d'
assert c2.description == 'bb'
assert c2.meta == {'bbb': 2}
# Make sure that calling getitem resulting in a scalar does
# not copy attributes.
val = c1[1]
for attr in ('name', 'unit', 'format', 'description', 'meta'):
assert not hasattr(val, attr)
def test_to_quantity(self, Column):
d = Column([1, 2, 3], name='a', dtype="f8", unit="m")
assert np.all(d.quantity == ([1, 2, 3.] * u.m))
assert np.all(d.quantity.value == ([1, 2, 3.] * u.m).value)
assert np.all(d.quantity == d.to('m'))
assert np.all(d.quantity.value == d.to('m').value)
np.testing.assert_allclose(d.to(u.km).value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to('km').value, ([.001, .002, .003] * u.km).value)
np.testing.assert_allclose(d.to(u.MHz, u.equivalencies.spectral()).value,
[299.792458, 149.896229, 99.93081933])
d_nounit = Column([1, 2, 3], name='a', dtype="f8", unit=None)
with pytest.raises(u.UnitsError):
d_nounit.to(u.km)
assert np.all(d_nounit.to(u.dimensionless_unscaled) == np.array([1, 2, 3]))
# make sure the correct copy/no copy behavior is happening
q = [1, 3, 5]*u.km
# to should always make a copy
d.to(u.km)[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# explcit copying of the quantity should not change the column
d.quantity.copy()[:] = q
np.testing.assert_allclose(d, [1, 2, 3])
# but quantity directly is a "view", accessing the underlying column
d.quantity[:] = q
np.testing.assert_allclose(d, [1000, 3000, 5000])
# view should also work for integers
d2 = Column([1, 2, 3], name='a', dtype=int, unit="m")
d2.quantity[:] = q
np.testing.assert_allclose(d2, [1000, 3000, 5000])
# but it should fail for strings or other non-numeric tables
d3 = Column(['arg', 'name', 'stuff'], name='a', unit="m")
with pytest.raises(TypeError):
d3.quantity
def test_to_funcunit_quantity(self, Column):
"""
Tests for #8424, check if function-unit can be retrieved from column.
"""
d = Column([1, 2, 3], name='a', dtype="f8", unit="dex(AA)")
assert np.all(d.quantity == ([1, 2, 3] * u.dex(u.AA)))
assert np.all(d.quantity.value == ([1, 2, 3] * u.dex(u.AA)).value)
assert np.all(d.quantity == d.to("dex(AA)"))
assert np.all(d.quantity.value == d.to("dex(AA)").value)
# make sure, casting to linear unit works
q = [10, 100, 1000] * u.AA
np.testing.assert_allclose(d.to(u.AA), q)
def test_item_access_type(self, Column):
"""
Tests for #3095, which forces integer item access to always return a plain
ndarray or MaskedArray, even in the case of a multi-dim column.
"""
integer_types = (int, np.int_)
for int_type in integer_types:
c = Column([[1, 2], [3, 4]])
i0 = int_type(0)
i1 = int_type(1)
assert np.all(c[i0] == [1, 2])
assert type(c[i0]) == (np.ma.MaskedArray if hasattr(Column, 'mask') else np.ndarray)
assert c[i0].shape == (2,)
c01 = c[i0:i1]
assert np.all(c01 == [[1, 2]])
assert isinstance(c01, Column)
assert c01.shape == (1, 2)
c = Column([1, 2])
assert np.all(c[i0] == 1)
assert isinstance(c[i0], np.integer)
assert c[i0].shape == ()
c01 = c[i0:i1]
assert np.all(c01 == [1])
assert isinstance(c01, Column)
assert c01.shape == (1,)
def test_insert_basic(self, Column):
c = Column([0, 1, 2], name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1 == [0, 100, 1, 2])
assert c1.attrs_equal(c)
assert type(c) is type(c1)
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
c1 = c.insert(-1, 100)
assert np.all(c1 == [0, 1, 100, 2])
c1 = c.insert(3, 100)
assert np.all(c1 == [0, 1, 2, 100])
c1 = c.insert(-3, 100)
assert np.all(c1 == [100, 0, 1, 2])
c1 = c.insert(1, [100, 200, 300])
if hasattr(c1, 'mask'):
assert c1.data.shape == c1.mask.shape
# Out of bounds index
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(-4, 100)
with pytest.raises((ValueError, IndexError)):
c1 = c.insert(4, 100)
def test_insert_axis(self, Column):
"""Insert with non-default axis kwarg"""
c = Column([[1, 2], [3, 4]])
c1 = c.insert(1, [5, 6], axis=None)
assert np.all(c1 == [1, 5, 6, 2, 3, 4])
c1 = c.insert(1, [5, 6], axis=1)
assert np.all(c1 == [[1, 5, 2], [3, 6, 4]])
def test_insert_multidim(self, Column):
c = Column([[1, 2],
[3, 4]], name='a', dtype=int)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == [[1, 2], [100, 200], [3, 4]])
# Broadcast
c1 = c.insert(1, 100)
assert np.all(c1 == [[1, 2], [100, 100], [3, 4]])
# Wrong shape
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200, 300])
def test_insert_object(self, Column):
c = Column(['a', 1, None], name='a', dtype=object)
# Basic insert
c1 = c.insert(1, [100, 200])
assert np.all(c1 == ['a', [100, 200], 1, None])
def test_insert_masked(self):
c = table.MaskedColumn([0, 1, 2], name='a', fill_value=9999,
mask=[False, True, False])
# Basic insert
c1 = c.insert(1, 100)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert c1.fill_value == 9999
assert np.all(c1.data.mask == [False, False, True, False])
assert type(c) is type(c1)
for mask in (False, True):
c1 = c.insert(1, 100, mask=mask)
assert np.all(c1.data.data == [0, 100, 1, 2])
assert np.all(c1.data.mask == [False, mask, True, False])
def test_masked_multidim_as_list(self):
data = np.ma.MaskedArray([1, 2], mask=[True, False])
c = table.MaskedColumn([data])
assert c.shape == (1, 2)
assert np.all(c[0].mask == [True, False])
def test_insert_masked_multidim(self):
c = table.MaskedColumn([[1, 2],
[3, 4]], name='a', dtype=int)
c1 = c.insert(1, [100, 200], mask=True)
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, True], [False, False]])
c1 = c.insert(1, [100, 200], mask=[True, False])
assert np.all(c1.data.data == [[1, 2], [100, 200], [3, 4]])
assert np.all(c1.data.mask == [[False, False], [True, False], [False, False]])
with pytest.raises(ValueError):
c1 = c.insert(1, [100, 200], mask=[True, False, True])
def test_mask_on_non_masked_table(self):
"""
When table is not masked and trying to set mask on column then
it's Raise AttributeError.
"""
t = table.Table([[1, 2], [3, 4]], names=('a', 'b'), dtype=('i4', 'f8'))
with pytest.raises(AttributeError):
t['a'].mask = [True, False]
class TestAttrEqual():
"""Bunch of tests originally from ATpy that test the attrs_equal method."""
def test_5(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy')
c2 = Column(name='a', dtype=int, unit='mJy')
assert c1.attrs_equal(c2)
def test_6(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
def test_7(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='b', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_8(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=float, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_9(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='erg.cm-2.s-1.Hz-1', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_10(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%g',
description='test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_11(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='another test column', meta={'c': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_12(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'e': 8, 'd': 12})
assert not c1.attrs_equal(c2)
def test_13(self, Column):
c1 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 9, 'd': 12})
assert not c1.attrs_equal(c2)
def test_col_and_masked_col(self):
c1 = table.Column(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
c2 = table.MaskedColumn(name='a', dtype=int, unit='mJy', format='%i',
description='test column', meta={'c': 8, 'd': 12})
assert c1.attrs_equal(c2)
assert c2.attrs_equal(c1)
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaColumn(MetaBaseTest):
test_class = table.Column
args = ()
class TestMetaMaskedColumn(MetaBaseTest):
test_class = table.MaskedColumn
args = ()
def test_getitem_metadata_regression():
"""
Regression test for #1471: MaskedArray does not call __array_finalize__ so
the meta-data was not getting copied over. By overloading _update_from we
are able to work around this bug.
"""
# Make sure that meta-data gets propagated with __getitem__
c = table.Column(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
c = table.MaskedColumn(data=[1, 2], name='a', description='b', unit='m', format="%i", meta={'c': 8})
assert c[1:2].name == 'a'
assert c[1:2].description == 'b'
assert c[1:2].unit == 'm'
assert c[1:2].format == '%i'
assert c[1:2].meta['c'] == 8
# As above, but with take() - check the method and the function
c = table.Column(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.Column)
c = table.MaskedColumn(data=[1, 2, 3], name='a', description='b', unit='m', format="%i", meta={'c': 8})
for subset in [c.take([0, 1]), np.take(c, [0, 1])]:
assert subset.name == 'a'
assert subset.description == 'b'
assert subset.unit == 'm'
assert subset.format == '%i'
assert subset.meta['c'] == 8
# Metadata isn't copied for scalar values
for subset in [c.take(0), np.take(c, 0)]:
assert subset == 1
assert subset.shape == ()
assert not isinstance(subset, table.MaskedColumn)
def test_unicode_guidelines():
arr = np.array([1, 2, 3])
c = table.Column(arr, name='a')
assert_follows_unicode_guidelines(c)
def test_scalar_column():
"""
Column is not designed to hold scalars, but for numpy 1.6 this can happen:
>> type(np.std(table.Column([1, 2])))
astropy.table.column.Column
"""
c = table.Column(1.5)
assert repr(c) == '1.5'
assert str(c) == '1.5'
def test_qtable_column_conversion():
"""
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
"""
qtab = table.QTable([[1, 2], [3, 4.2]], names=['i', 'f'])
assert isinstance(qtab['i'], table.column.Column)
assert isinstance(qtab['f'], table.column.Column)
qtab['i'].unit = 'km/s'
assert isinstance(qtab['i'], u.Quantity)
assert isinstance(qtab['f'], table.column.Column)
# should follow from the above, but good to make sure as a #4497 regression test
assert isinstance(qtab['i'][0], u.Quantity)
assert isinstance(qtab[0]['i'], u.Quantity)
assert not isinstance(qtab['f'][0], u.Quantity)
assert not isinstance(qtab[0]['f'], u.Quantity)
# Regression test for #5342: if a function unit is assigned, the column
# should become the appropriate FunctionQuantity subclass.
qtab['f'].unit = u.dex(u.cm/u.s**2)
assert isinstance(qtab['f'], u.Dex)
@pytest.mark.parametrize('masked', [True, False])
def test_string_truncation_warning(masked):
"""
Test warnings associated with in-place assignment to a string
column that results in truncation of the right hand side.
"""
t = table.Table([['aa', 'bb']], names=['a'], masked=masked)
with catch_warnings() as w:
from inspect import currentframe, getframeinfo
t['a'][1] = 'cc'
assert len(w) == 0
t['a'][:] = 'dd'
assert len(w) == 0
with catch_warnings() as w:
frameinfo = getframeinfo(currentframe())
t['a'][0] = 'eee' # replace item with string that gets truncated
assert t['a'][0] == 'ee'
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.StringTruncateWarning
assert 'test_column' in w[0].filename
with catch_warnings() as w:
t['a'][:] = ['ff', 'ggg'] # replace item with string that gets truncated
assert np.all(t['a'] == ['ff', 'gg'])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
with catch_warnings() as w:
# Test the obscure case of assigning from an array that was originally
# wider than any of the current elements (i.e. dtype is U4 but actual
# elements are U1 at the time of assignment).
val = np.array(['ffff', 'gggg'])
val[:] = ['f', 'g']
t['a'][:] = val
assert np.all(t['a'] == ['f', 'g'])
assert len(w) == 0
def test_string_truncation_warning_masked():
"""
Test warnings associated with in-place assignment to a string
to a masked column, specifically where the right hand side
contains np.ma.masked.
"""
# Test for strings, but also cover assignment of np.ma.masked to
# int and float masked column setting. This was previously only
# covered in an unrelated io.ascii test (test_line_endings) which
# showed an unexpected difference between handling of str and numeric
# masked arrays.
for values in (['a', 'b'], [1, 2], [1.0, 2.0]):
mc = table.MaskedColumn(values)
with catch_warnings() as w:
mc[1] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [False, True])
mc[:] = np.ma.masked
assert len(w) == 0
assert np.all(mc.mask == [True, True])
mc = table.MaskedColumn(['aa', 'bb'])
with catch_warnings() as w:
mc[:] = [np.ma.masked, 'ggg'] # replace item with string that gets truncated
assert mc[1] == 'gg'
assert np.all(mc.mask == [True, False])
assert len(w) == 1
assert ('truncated right side string(s) longer than 2 character(s)'
in str(w[0].message))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_create_from_str(Column):
"""
Create a bytestring Column from strings (including unicode) in Py3.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
c = Column([uba, 'def'], dtype='S')
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_col_unicode_sandwich_bytes(Column):
"""
Create a bytestring Column from bytes and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
# a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding.
# Stress the system by injecting non-ASCII characters.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = Column([uba8, b'def'])
assert c.dtype.char == 'S'
assert c[0] == uba
assert isinstance(c[0], str)
assert isinstance(c[:0], table.Column)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'S'
# Array / list comparisons
assert np.all(c == [uba, 'def'])
ok = c == [uba8, b'def']
assert type(ok) is type(c.data)
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c == np.array([uba, u'def']))
assert np.all(c == np.array([uba8, b'def']))
# Scalar compare
cmps = (uba, uba8)
for cmp in cmps:
ok = c == cmp
assert type(ok) is type(c.data)
assert np.all(ok == [True, False])
def test_col_unicode_sandwich_unicode():
"""
Sanity check that Unicode Column behaves normally.
"""
# On Py2 the unicode must be ASCII-compatible, else the final test fails.
uba = u'bä'
uba8 = uba.encode('utf-8')
c = table.Column([uba, 'def'], dtype='U')
assert c[0] == uba
assert isinstance(c[:0], table.Column)
assert isinstance(c[0], str)
assert np.all(c[:2] == np.array([uba, 'def']))
assert isinstance(c[:], table.Column)
assert c[:].dtype.char == 'U'
ok = c == [uba, 'def']
assert type(ok) == np.ndarray
assert ok.dtype.char == '?'
assert np.all(ok)
assert np.all(c != [uba8, b'def'])
def test_masked_col_unicode_sandwich():
"""
Create a bytestring MaskedColumn and ensure that it works in Python 3 in
a convenient way like in Python 2.
"""
c = table.MaskedColumn([b'abc', b'def'])
c[1] = np.ma.masked
assert isinstance(c[:0], table.MaskedColumn)
assert isinstance(c[0], str)
assert c[0] == 'abc'
assert c[1] is np.ma.masked
assert isinstance(c[:], table.MaskedColumn)
assert c[:].dtype.char == 'S'
ok = c == ['abc', 'def']
assert ok[0] == True
assert ok[1] is np.ma.masked
assert np.all(c == [b'abc', b'def'])
assert np.all(c == np.array([u'abc', u'def']))
assert np.all(c == np.array([b'abc', b'def']))
for cmp in (u'abc', b'abc'):
ok = c == cmp
assert type(ok) is np.ma.MaskedArray
assert ok[0] == True
assert ok[1] is np.ma.masked
@pytest.mark.parametrize('Column', (table.Column, table.MaskedColumn))
def test_unicode_sandwich_set(Column):
"""
Test setting
"""
uba = u'bä'
c = Column([b'abc', b'def'])
c[0] = b'aa'
assert np.all(c == [u'aa', u'def'])
c[0] = uba # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding
assert np.all(c == [uba, u'def'])
assert c.pformat() == [u'None', u'----', ' ' + uba, u' def']
c[:] = b'cc'
assert np.all(c == [u'cc', u'cc'])
c[:] = uba
assert np.all(c == [uba, uba])
c[:] = ''
c[:] = [uba, b'def']
assert np.all(c == [uba, b'def'])
@pytest.mark.parametrize('class1', [table.MaskedColumn, table.Column])
@pytest.mark.parametrize('class2', [table.MaskedColumn, table.Column, str, list])
def test_unicode_sandwich_compare(class1, class2):
"""Test that comparing a bytestring Column/MaskedColumn with various
str (unicode) object types gives the expected result. Tests #6838.
"""
obj1 = class1([b'a', b'c'])
if class2 is str:
obj2 = 'a'
elif class2 is list:
obj2 = ['a', 'b']
else:
obj2 = class2(['a', 'b'])
assert np.all((obj1 == obj2) == [True, False])
assert np.all((obj2 == obj1) == [True, False])
assert np.all((obj1 != obj2) == [False, True])
assert np.all((obj2 != obj1) == [False, True])
assert np.all((obj1 > obj2) == [False, True])
assert np.all((obj2 > obj1) == [False, False])
assert np.all((obj1 <= obj2) == [True, False])
assert np.all((obj2 <= obj1) == [True, True])
assert np.all((obj1 < obj2) == [False, False])
assert np.all((obj2 < obj1) == [False, True])
assert np.all((obj1 >= obj2) == [True, True])
assert np.all((obj2 >= obj1) == [True, False])
def test_unicode_sandwich_masked_compare():
"""Test the fix for #6839 from #6899."""
c1 = table.MaskedColumn(['a', 'b', 'c', 'd'],
mask=[True, False, True, False])
c2 = table.MaskedColumn([b'a', b'b', b'c', b'd'],
mask=[True, True, False, False])
for cmp in ((c1 == c2), (c2 == c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert cmp[3]
for cmp in ((c1 != c2), (c2 != c1)):
assert cmp[0] is np.ma.masked
assert cmp[1] is np.ma.masked
assert cmp[2] is np.ma.masked
assert not cmp[3]
# Note: comparisons <, >, >=, <= fail to return a masked array entirely,
# see https://github.com/numpy/numpy/issues/10092.
|
b1cc7679202b138df739c9c5eeb3b9ea99d57aead79af4e1961f79b01c8d3585 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal, assert_array_less
from astropy.modeling import models, InputParameterError
from astropy.coordinates import Angle
from astropy.modeling import fitting
from astropy.tests.helper import catch_warnings
from astropy.utils.exceptions import AstropyDeprecationWarning
try:
from scipy import optimize # pylint: disable=W0611
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_sigma_constant():
"""
Test that the GAUSSIAN_SIGMA_TO_FWHM constant matches the
gaussian_sigma_to_fwhm constant in astropy.stats. We define
it manually in astropy.modeling to avoid importing from
astropy.stats.
"""
from astropy.stats.funcs import gaussian_sigma_to_fwhm
from astropy.modeling.functional_models import GAUSSIAN_SIGMA_TO_FWHM
assert gaussian_sigma_to_fwhm == GAUSSIAN_SIGMA_TO_FWHM
def test_Trapezoid1D():
"""Regression test for https://github.com/astropy/astropy/issues/1721"""
model = models.Trapezoid1D(amplitude=4.2, x_0=2.0, width=1.0, slope=3)
xx = np.linspace(0, 4, 8)
yy = model(xx)
yy_ref = [0., 1.41428571, 3.12857143, 4.2, 4.2, 3.12857143, 1.41428571, 0.]
assert_allclose(yy, yy_ref, rtol=0, atol=1e-6)
def test_Gaussian2D():
"""
Test rotated elliptical Gaussian2D model.
https://github.com/astropy/astropy/pull/2038
"""
model = models.Gaussian2D(4.2, 1.7, 3.1, x_stddev=5.1, y_stddev=3.3,
theta=np.pi/6.)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[3.01907812, 2.99051889, 2.81271552, 2.5119566, 2.13012709],
[3.55982239, 3.6086023, 3.4734158, 3.17454575, 2.75494838],
[3.88059142, 4.0257528, 3.96554926, 3.70908389, 3.29410187],
[3.91095768, 4.15212857, 4.18567526, 4.00652015, 3.64146544],
[3.6440466, 3.95922417, 4.08454159, 4.00113878, 3.72161094]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
assert_allclose([model.x_fwhm, model.y_fwhm],
[12.009582229657841, 7.7709061486021325])
def test_Gaussian2DCovariance():
"""
Test rotated elliptical Gaussian2D model when cov_matrix is input.
https://github.com/astropy/astropy/pull/2199
"""
cov_matrix = [[49., -16.], [-16., 9.]]
model = models.Gaussian2D(17., 2.0, 2.5, cov_matrix=cov_matrix)
y, x = np.mgrid[0:5, 0:5]
g = model(x, y)
g_ref = [[4.3744505, 5.8413977, 7.42988694, 9.00160175, 10.38794269],
[8.83290201, 10.81772851, 12.61946384, 14.02225593, 14.84113227],
[13.68528889, 15.37184621, 16.44637743, 16.76048705, 16.26953638],
[16.26953638, 16.76048705, 16.44637743, 15.37184621, 13.68528889],
[14.84113227, 14.02225593, 12.61946384, 10.81772851, 8.83290201]]
assert_allclose(g, g_ref, rtol=0, atol=1e-6)
def test_Gaussian2DRotation():
amplitude = 42
x_mean, y_mean = 0, 0
x_stddev, y_stddev = 2, 3
theta = Angle(10, 'deg')
pars = dict(amplitude=amplitude, x_mean=x_mean, y_mean=y_mean,
x_stddev=x_stddev, y_stddev=y_stddev)
rotation = models.Rotation2D(angle=theta.degree)
point1 = (x_mean + 2 * x_stddev, y_mean + 2 * y_stddev)
point2 = rotation(*point1)
g1 = models.Gaussian2D(theta=0, **pars)
g2 = models.Gaussian2D(theta=theta.radian, **pars)
value1 = g1(*point1)
value2 = g2(*point2)
assert_allclose(value1, value2)
def test_Gaussian2D_invalid_inputs():
x_stddev = 5.1
y_stddev = 3.3
theta = 10
cov_matrix = [[49., -16.], [-16., 9.]]
# first make sure the valid ones are OK
models.Gaussian2D()
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=None, y_stddev=y_stddev, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=None, theta=theta)
models.Gaussian2D(x_stddev=x_stddev, y_stddev=y_stddev, theta=None)
models.Gaussian2D(cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(x_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(y_stddev=0, cov_matrix=cov_matrix)
with pytest.raises(InputParameterError):
models.Gaussian2D(theta=0, cov_matrix=cov_matrix)
@pytest.mark.parametrize('gamma', (10, -10))
def test_moffat_fwhm(gamma):
ans = 34.641016151377542
kwargs = {'gamma': gamma, 'alpha': 0.5}
m1 = models.Moffat1D(**kwargs)
m2 = models.Moffat2D(**kwargs)
assert_allclose([m1.fwhm, m2.fwhm], ans)
assert_array_less(0, [m1.fwhm, m2.fwhm])
def test_RedshiftScaleFactor():
"""Like ``test_ScaleModel()``."""
# Scale by a scalar
m = models.RedshiftScaleFactor(0.4)
assert m(0) == 0
assert_array_equal(m([1, 2]), [1.4, 2.8])
assert_allclose(m.inverse(m([1, 2])), [1, 2])
# Scale by a list
m = models.RedshiftScaleFactor([-0.5, 0, 0.5], n_models=3)
assert_array_equal(m(0), 0)
assert_array_equal(m([1, 2], model_set_axis=False),
[[0.5, 1], [1, 2], [1.5, 3]])
assert_allclose(m.inverse(m([1, 2], model_set_axis=False)),
[[1, 2], [1, 2], [1, 2]])
def test_Ellipse2D():
"""Test Ellipse2D model."""
amplitude = 7.5
x0, y0 = 15, 15
theta = Angle(45, 'deg')
em = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta.radian)
y, x = np.mgrid[0:30, 0:30]
e = em(x, y)
assert np.all(e[e > 0] == amplitude)
assert e[y0, x0] == amplitude
rotation = models.Rotation2D(angle=theta.degree)
point1 = [2, 0] # Rotation2D center is (0, 0)
point2 = rotation(*point1)
point1 = np.array(point1) + [x0, y0]
point2 = np.array(point2) + [x0, y0]
e1 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=0.)
e2 = models.Ellipse2D(amplitude, x0, y0, 7, 3, theta=theta.radian)
assert e1(*point1) == e2(*point2)
def test_Ellipse2D_circular():
"""Test that circular Ellipse2D agrees with Disk2D [3736]."""
amplitude = 7.5
radius = 10
size = (radius * 2) + 1
y, x = np.mgrid[0:size, 0:size]
ellipse = models.Ellipse2D(amplitude, radius, radius, radius, radius,
theta=0)(x, y)
disk = models.Disk2D(amplitude, radius, radius, radius)(x, y)
assert np.all(ellipse == disk)
def test_Scale_inverse():
m = models.Scale(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Multiply_inverse():
m = models.Multiply(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
def test_Shift_inverse():
m = models.Shift(1.2345)
assert_allclose(m.inverse(m(6.789)), 6.789)
@pytest.mark.skipif('not HAS_SCIPY')
def test_Shift_model_levmar_fit():
"""Test fitting Shift model with LevMarLSQFitter (issue #6103)."""
init_model = models.Shift()
x = np.arange(10)
y = x+0.1
fitter = fitting.LevMarLSQFitter()
fitted_model = fitter(init_model, x, y)
assert_allclose(fitted_model.parameters, [0.1], atol=1e-15)
def test_Shift_model_set_linear_fit():
"""Test linear fitting of Shift model (issue #6103)."""
init_model = models.Shift(offset=[0, 0], n_models=2)
x = np.arange(10)
yy = np.array([x+0.1, x-0.2])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [0.1, -0.2], atol=1e-15)
@pytest.mark.parametrize('Model', (models.Scale, models.Multiply))
def test_Scale_model_set_linear_fit(Model):
"""Test linear fitting of Scale model (#6103)."""
init_model = Model(factor=[0, 0], n_models=2)
x = np.arange(-3, 7)
yy = np.array([1.15*x, 0.96*x])
fitter = fitting.LinearLSQFitter()
fitted_model = fitter(init_model, x, yy)
assert_allclose(fitted_model.parameters, [1.15, 0.96], atol=1e-15)
# https://github.com/astropy/astropy/issues/6178
def test_Ring2D_rout():
m = models.Ring2D(amplitude=1, x_0=1, y_0=1, r_in=2, r_out=5)
assert m.width.value == 3
@pytest.mark.skipif("not HAS_SCIPY")
def test_Voigt1D():
voi = models.Voigt1D(amplitude_L=-0.5, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
xarr = np.linspace(-5.0, 5.0, num=40)
yarr = voi(xarr)
voi_init = models.Voigt1D(amplitude_L=-1.0, x_0=1.0, fwhm_L=5.0, fwhm_G=5.0)
fitter = fitting.LevMarLSQFitter()
voi_fit = fitter(voi_init, xarr, yarr)
assert_allclose(voi_fit.param_sets, voi.param_sets)
@pytest.mark.skipif("not HAS_SCIPY")
def test_compound_models_with_class_variables():
models_2d = [models.AiryDisk2D, models.Sersic2D]
models_1d = [models.Sersic1D]
for model_2d in models_2d:
class CompoundModel2D(models.Const2D + model_2d):
pass
x, y = np.mgrid[:10, :10]
f = CompoundModel2D()(x, y)
assert f.shape == (10, 10)
for model_1d in models_1d:
class CompoundModel1D(models.Const1D + model_1d):
pass
x = np.arange(10)
f = CompoundModel1D()(x)
assert f.shape == (10,)
|
dbc7c1a47b4ebb3f46e33a4f2cde149c0559338bec5af040e0a89f688da98bc7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import hashlib
import os
import pathlib
import sys
import tempfile
import urllib.request
import urllib.error
import pytest
from astropy.utils.data import (_get_download_cache_locs, CacheMissingWarning,
get_pkg_data_filename, get_readable_fileobj, conf)
from astropy.tests.helper import raises, catch_warnings
TESTURL = 'http://www.astropy.org'
TESTLOCAL = get_pkg_data_filename(os.path.join('data', 'local.dat'))
# General file object function
try:
import bz2 # noqa
except ImportError:
HAS_BZ2 = False
else:
HAS_BZ2 = True
try:
import lzma # noqa
except ImportError:
HAS_XZ = False
else:
HAS_XZ = True
@pytest.mark.remote_data(source='astropy')
def test_download_nocache():
from astropy.utils.data import download_file
fnout = download_file(TESTURL)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_parallel():
from astropy.utils.data import download_files_in_parallel
main_url = conf.dataurl
mirror_url = conf.dataurl_mirror
fileloc = 'intersphinx/README'
try:
fnout = download_files_in_parallel([main_url, main_url + fileloc])
except urllib.error.URLError: # Use mirror if timed out
fnout = download_files_in_parallel([mirror_url, mirror_url + fileloc])
assert all([os.path.isfile(f) for f in fnout]), fnout
# NOTE: Does not need remote data.
def test_download_mirror_cache():
import pathlib
import shelve
from astropy.utils.data import _find_pkg_data_path, download_file, get_cached_urls
main_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl'))).as_uri() + '/'
mirror_url = pathlib.Path(
_find_pkg_data_path(os.path.join('data', 'dataurl_mirror'))).as_uri() + '/' # noqa
main_file = main_url + 'index.html'
mirror_file = mirror_url + 'index.html'
# Temporarily change data.conf.
# This also test https://github.com/astropy/astropy/pull/8163 because
# urlopen() on a local dir URI also gives URLError.
with conf.set_temp('dataurl', main_url):
with conf.set_temp('dataurl_mirror', mirror_url):
# "Download" files by rerouting URLs to local URIs.
download_file(main_file, cache=True)
download_file(mirror_file, cache=True)
# Now test that download_file looks in mirror's cache before
# download.
# https://github.com/astropy/astropy/issues/6982
dldir, urlmapfn = _get_download_cache_locs()
with shelve.open(urlmapfn) as url2hash:
del url2hash[main_file]
# Comparing hash makes sure they download the same file
# but does not guarantee they were downloaded from the same URL.
assert (download_file(main_file, cache=True) ==
download_file(mirror_file, cache=True))
# This has to be called after the last download to obtain
# an accurate view of cached URLs.
# This is to ensure that main_file was not re-downloaded
# unnecessarily.
# This test also tests for "assert TESTURL in get_cached_urls()".
c_urls = get_cached_urls()
assert (mirror_file in c_urls) and (main_file not in c_urls)
@pytest.mark.remote_data(source='astropy')
def test_download_noprogress():
from astropy.utils.data import download_file
fnout = download_file(TESTURL, show_progress=False)
assert os.path.isfile(fnout)
@pytest.mark.remote_data(source='astropy')
def test_download_cache():
from astropy.utils.data import download_file, clear_download_cache
download_dir = _get_download_cache_locs()[0]
# Download the test URL and make sure it exists, then clear just that
# URL and make sure it got deleted.
fnout = download_file(TESTURL, cache=True)
assert os.path.isdir(download_dir)
assert os.path.isfile(fnout)
clear_download_cache(TESTURL)
assert not os.path.exists(fnout)
# Test issues raised in #4427 with clear_download_cache() without a URL,
# followed by subsequent download.
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
clear_download_cache()
assert not os.path.exists(fnout)
assert not os.path.exists(download_dir)
fnout = download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
# Clearing download cache succeeds even if the URL does not exist.
clear_download_cache('http://this_was_never_downloaded_before.com')
# Make sure lockdir was released
lockdir = os.path.join(download_dir, 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_url_nocache():
with get_readable_fileobj(TESTURL, cache=False, encoding='utf-8') as page:
assert page.read().find('Astropy') > -1
@pytest.mark.remote_data(source='astropy')
def test_find_by_hash():
from astropy.utils.data import clear_download_cache
with get_readable_fileobj(TESTURL, encoding="binary", cache=True) as page:
hash = hashlib.md5(page.read())
hashstr = 'hash/' + hash.hexdigest()
fnout = get_pkg_data_filename(hashstr)
assert os.path.isfile(fnout)
clear_download_cache(hashstr[5:])
assert not os.path.isfile(fnout)
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.remote_data(source='astropy')
def test_find_invalid():
# this is of course not a real data file and not on any remote server, but
# it should *try* to go to the remote server
with pytest.raises(urllib.error.URLError):
get_pkg_data_filename('kjfrhgjklahgiulrhgiuraehgiurhgiuhreglhurieghruelighiuerahiulruli')
# Package data functions
@pytest.mark.parametrize(('filename'), ['local.dat', 'local.dat.gz',
'local.dat.bz2', 'local.dat.xz'])
def test_local_data_obj(filename):
from astropy.utils.data import get_pkg_data_fileobj
if (not HAS_BZ2 and 'bz2' in filename) or (not HAS_XZ and 'xz' in filename):
with pytest.raises(ValueError) as e:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
# assert f.read().rstrip() == b'CONTENT'
assert ' format files are not supported' in str(e)
else:
with get_pkg_data_fileobj(os.path.join('data', filename), encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.fixture(params=['invalid.dat.bz2', 'invalid.dat.gz'])
def bad_compressed(request, tmpdir):
# These contents have valid headers for their respective file formats, but
# are otherwise malformed and invalid.
bz_content = b'BZhinvalid'
gz_content = b'\x1f\x8b\x08invalid'
datafile = tmpdir.join(request.param)
filename = datafile.strpath
if filename.endswith('.bz2'):
contents = bz_content
elif filename.endswith('.gz'):
contents = gz_content
else:
contents = 'invalid'
datafile.write(contents, mode='wb')
return filename
def test_local_data_obj_invalid(bad_compressed):
is_bz2 = bad_compressed.endswith('.bz2')
is_xz = bad_compressed.endswith('.xz')
# Note, since these invalid files are created on the fly in order to avoid
# problems with detection by antivirus software
# (see https://github.com/astropy/astropy/issues/6520), it is no longer
# possible to use ``get_pkg_data_fileobj`` to read the files. Technically,
# they're not local anymore: they just live in a temporary directory
# created by pytest. However, we can still use get_readable_fileobj for the
# test.
if (not HAS_BZ2 and is_bz2) or (not HAS_XZ and is_xz):
with pytest.raises(ValueError) as e:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
f.read()
assert ' format files are not supported' in str(e)
else:
with get_readable_fileobj(bad_compressed, encoding='binary') as f:
assert f.read().rstrip().endswith(b'invalid')
def test_local_data_name():
assert os.path.isfile(TESTLOCAL) and TESTLOCAL.endswith('local.dat')
# TODO: if in the future, the root data/ directory is added in, the below
# test should be uncommented and the README.rst should be replaced with
# whatever file is there
# get something in the astropy root
# fnout2 = get_pkg_data_filename('../../data/README.rst')
# assert os.path.isfile(fnout2) and fnout2.endswith('README.rst')
def test_data_name_third_party_package():
"""Regression test for issue #1256
Tests that `get_pkg_data_filename` works in a third-party package that
doesn't make any relative imports from the module it's used from.
Uses a test package under ``data/test_package``.
"""
# Get the actual data dir:
data_dir = os.path.join(os.path.dirname(__file__), 'data')
sys.path.insert(0, data_dir)
try:
import test_package
filename = test_package.get_data_filename()
assert filename == os.path.join(data_dir, 'test_package', 'data',
'foo.txt')
finally:
sys.path.pop(0)
@raises(RuntimeError)
def test_local_data_nonlocalfail():
# this would go *outside* the astropy tree
get_pkg_data_filename('../../../data/README.rst')
def test_compute_hash(tmpdir):
from astropy.utils.data import compute_hash
rands = b'1234567890abcdefghijklmnopqrstuvwxyz'
filename = tmpdir.join('tmp.dat').strpath
with open(filename, 'wb') as ntf:
ntf.write(rands)
ntf.flush()
chhash = compute_hash(filename)
shash = hashlib.md5(rands).hexdigest()
assert chhash == shash
def test_get_pkg_data_contents():
from astropy.utils.data import get_pkg_data_fileobj, get_pkg_data_contents
with get_pkg_data_fileobj('data/local.dat') as f:
contents1 = f.read()
contents2 = get_pkg_data_contents('data/local.dat')
assert contents1 == contents2
@pytest.mark.remote_data(source='astropy')
def test_data_noastropy_fallback(monkeypatch):
"""
Tests to make sure the default behavior when the cache directory can't
be located is correct
"""
from astropy.utils import data
from astropy.config import paths
# needed for testing the *real* lock at the end
lockdir = os.path.join(_get_download_cache_locs()[0], 'lock')
# better yet, set the configuration to make sure the temp files are deleted
conf.delete_temporary_downloads_at_exit = True
# make sure the config and cache directories are not searched
monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo')
monkeypatch.delenv(str('XDG_CONFIG_HOME'))
monkeypatch.setenv(str('XDG_CACHE_HOME'), 'bar')
monkeypatch.delenv(str('XDG_CACHE_HOME'))
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser)
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_cache_dir()
# first try with cache
with catch_warnings(CacheMissingWarning) as w:
fnout = data.download_file(TESTURL, cache=True)
assert os.path.isfile(fnout)
assert len(w) > 1
w1 = w.pop(0)
w2 = w.pop(0)
assert w1.category == CacheMissingWarning
assert 'Remote data cache could not be accessed' in w1.message.args[0]
assert w2.category == CacheMissingWarning
assert 'File downloaded to temporary location' in w2.message.args[0]
assert fnout == w2.message.args[1]
# clearing the cache should be a no-up that doesn't affect fnout
with catch_warnings(CacheMissingWarning) as w:
data.clear_download_cache(TESTURL)
assert os.path.isfile(fnout)
# now remove it so tests don't clutter up the temp dir this should get
# called at exit, anyway, but we do it here just to make sure it's working
# correctly
data._deltemps()
assert not os.path.isfile(fnout)
assert len(w) > 0
w3 = w.pop()
assert w3.category == data.CacheMissingWarning
assert 'Not clearing data cache - cache inacessable' in str(w3.message)
# now try with no cache
with catch_warnings(CacheMissingWarning) as w:
fnnocache = data.download_file(TESTURL, cache=False)
with open(fnnocache, 'rb') as page:
assert page.read().decode('utf-8').find('Astropy') > -1
# no warnings should be raise in fileobj because cache is unnecessary
assert len(w) == 0
# lockdir determined above as the *real* lockdir, not the temp one
assert not os.path.isdir(lockdir), 'Cache dir lock was not released!'
@pytest.mark.parametrize(('filename'), [
'unicode.txt',
'unicode.txt.gz',
pytest.param('unicode.txt.bz2', marks=pytest.mark.xfail(not HAS_BZ2, reason='no bz2 support')),
pytest.param('unicode.txt.xz', marks=pytest.mark.xfail(not HAS_XZ, reason='no lzma support'))])
def test_read_unicode(filename):
from astropy.utils.data import get_pkg_data_contents
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='utf-8')
assert isinstance(contents, str)
contents = contents.splitlines()[1]
assert contents == "האסטרונומי פייתון"
contents = get_pkg_data_contents(os.path.join('data', filename), encoding='binary')
assert isinstance(contents, bytes)
x = contents.splitlines()[1]
assert x == (b"\xff\xd7\x94\xd7\x90\xd7\xa1\xd7\x98\xd7\xa8\xd7\x95\xd7\xa0"
b"\xd7\x95\xd7\x9e\xd7\x99 \xd7\xa4\xd7\x99\xd7\x99\xd7\xaa\xd7\x95\xd7\x9f"[1:])
def test_compressed_stream():
import base64
gzipped_data = (b"H4sICIxwG1AAA2xvY2FsLmRhdAALycgsVkjLzElVANKlxakpCpl5CiUZqQ"
b"olqcUl8Tn5yYk58SmJJYnxWmCRzLx0hbTSvOSSzPy8Yi5nf78QV78QLgAlLytnRQAAAA==")
gzipped_data = base64.b64decode(gzipped_data)
assert isinstance(gzipped_data, bytes)
class FakeStream:
"""
A fake stream that has `read`, but no `seek`.
"""
def __init__(self, data):
self.data = data
def read(self, nbytes=None):
if nbytes is None:
result = self.data
self.data = b''
else:
result = self.data[:nbytes]
self.data = self.data[nbytes:]
return result
stream = FakeStream(gzipped_data)
with get_readable_fileobj(stream, encoding='binary') as f:
f.readline()
assert f.read().rstrip() == b'CONTENT'
@pytest.mark.remote_data(source='astropy')
def test_invalid_location_download():
"""
checks that download_file gives a URLError and not an AttributeError,
as its code pathway involves some fiddling with the exception.
"""
from astropy.utils.data import download_file
with pytest.raises(urllib.error.URLError):
download_file('http://www.astropy.org/nonexistentfile')
def test_invalid_location_download_noconnect():
"""
checks that download_file gives an OSError if the socket is blocked
"""
from astropy.utils.data import download_file
# This should invoke socket's monkeypatched failure
with pytest.raises(OSError):
download_file('http://astropy.org/nonexistentfile')
@pytest.mark.remote_data(source='astropy')
def test_is_url_in_cache():
from astropy.utils.data import download_file, is_url_in_cache
assert not is_url_in_cache('http://astropy.org/nonexistentfile')
download_file(TESTURL, cache=True, show_progress=False)
assert is_url_in_cache(TESTURL)
def test_get_readable_fileobj_cleans_up_temporary_files(tmpdir, monkeypatch):
"""checks that get_readable_fileobj leaves no temporary files behind"""
# Create a 'file://' URL pointing to a path on the local filesystem
url = 'file://' + urllib.request.pathname2url(TESTLOCAL)
# Save temporary files to a known location
monkeypatch.setattr(tempfile, 'tempdir', str(tmpdir))
# Call get_readable_fileobj() as a context manager
with get_readable_fileobj(url):
pass
# Get listing of files in temporary directory
tempdir_listing = tmpdir.listdir()
# Assert that the temporary file was empty after get_readable_fileobj()
# context manager finished running
assert len(tempdir_listing) == 0
def test_path_objects_get_readable_fileobj():
fpath = pathlib.Path(TESTLOCAL)
with get_readable_fileobj(fpath) as f:
assert f.read().rstrip() == ('This file is used in the test_local_data_* '
'testing functions\nCONTENT')
def test_nested_get_readable_fileobj():
"""Ensure fileobj state is as expected when get_readable_fileobj()
is called inside another get_readable_fileobj().
"""
with get_readable_fileobj(TESTLOCAL, encoding='binary') as fileobj:
with get_readable_fileobj(fileobj, encoding='UTF-8') as fileobj2:
fileobj2.seek(1)
fileobj.seek(1)
# Theoretically, fileobj2 should be closed already here but it is not.
# See https://github.com/astropy/astropy/pull/8675.
# UNCOMMENT THIS WHEN PYTHON FINALLY LETS IT HAPPEN.
#assert fileobj2.closed
assert fileobj.closed and fileobj2.closed
|
bdc9e0ce7dd7f9e949ba04b42dd42674abfb3949e01446bde7ba21e7b0a44787 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
from astropy.utils import minversion
__all__ = ['NUMPY_LT_1_14', 'NUMPY_LT_1_14_1', 'NUMPY_LT_1_14_2',
'NUMPY_LT_1_16', 'NUMPY_LT_1_17']
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_14 = not minversion('numpy', '1.14')
NUMPY_LT_1_14_1 = not minversion('numpy', '1.14.1')
NUMPY_LT_1_14_2 = not minversion('numpy', '1.14.2')
NUMPY_LT_1_16 = not minversion('numpy', '1.16')
NUMPY_LT_1_17 = not minversion('numpy', '1.17')
|
21e916fd9a3bc58eaf7f069a32b9c033f2314b8b91696a2e870dd4a14fed62a6 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import io
import pytest
try:
import matplotlib.pyplot as plt
except ImportError:
HAS_PLT = False
else:
HAS_PLT = True
from astropy import units as u
from astropy.coordinates import Angle
from astropy.visualization.units import quantity_support
@pytest.mark.skipif('not HAS_PLT')
def test_units():
plt.figure()
with quantity_support():
buff = io.BytesIO()
plt.plot([1, 2, 3] * u.m, [3, 4, 5] * u.kg, label='label')
plt.plot([105, 210, 315] * u.cm, [3050, 3025, 3010] * u.g)
plt.legend()
# Also test fill_between, which requires actual conversion to ndarray
# with numpy >=1.10 (#4654).
plt.fill_between([1, 3] * u.m, [3, 5] * u.kg, [3050, 3010] * u.g)
plt.savefig(buff, format='svg')
assert plt.gca().xaxis.get_units() == u.m
assert plt.gca().yaxis.get_units() == u.kg
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_units_errbarr():
pytest.importorskip("matplotlib", minversion="2.2")
plt.figure()
with quantity_support():
x = [1, 2, 3] * u.s
y = [1, 2, 3] * u.m
yerr = [3, 2, 1] * u.cm
fig, ax = plt.subplots()
ax.errorbar(x, y, yerr=yerr)
assert ax.xaxis.get_units() == u.s
assert ax.yaxis.get_units() == u.m
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_incompatible_units():
# NOTE: minversion check does not work properly for matplotlib dev.
try:
# https://github.com/matplotlib/matplotlib/pull/13005
from matplotlib.units import ConversionError
except ImportError:
err_type = u.UnitConversionError
else:
err_type = ConversionError
plt.figure()
with quantity_support():
plt.plot([1, 2, 3] * u.m)
with pytest.raises(err_type):
plt.plot([105, 210, 315] * u.kg)
plt.clf()
@pytest.mark.skipif('not HAS_PLT')
def test_quantity_subclass():
"""Check that subclasses are recognized.
This sadly is not done by matplotlib.units itself, though
there is a PR to change it:
https://github.com/matplotlib/matplotlib/pull/13536
"""
plt.figure()
with quantity_support():
plt.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
plt.scatter([105, 210, 315] * u.arcsec, [3050, 3025, 3010] * u.g)
plt.plot(Angle([105, 210, 315], u.arcsec), [3050, 3025, 3010] * u.g)
assert plt.gca().xaxis.get_units() == u.deg
assert plt.gca().yaxis.get_units() == u.kg
@pytest.mark.skipif('not HAS_PLT')
def test_nested():
with quantity_support():
with quantity_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.deg), [3, 4, 5] * u.kg)
assert ax.xaxis.get_units() == u.deg
assert ax.yaxis.get_units() == u.kg
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(Angle([1, 2, 3], u.arcsec), [3, 4, 5] * u.pc)
assert ax.xaxis.get_units() == u.arcsec
assert ax.yaxis.get_units() == u.pc
|
715c366f8f0c70499f85dac2e0715d2532af98b5626ae587871edb8b1aacfd66 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
pytest.importorskip('matplotlib') # noqa
import matplotlib.pyplot as plt
from astropy.time import Time
from astropy.visualization.time import time_support
def get_ticklabels(axis):
axis.figure.canvas.draw()
return [x.get_text() for x in axis.get_ticklabels()]
# We first check that we get the expected labels for different time intervals
# for standard ISO formatting. This is a way to check both the locator and
# formatter code.
RANGE_CASES = [
# Interval of many years
(('2014-03-22T12:30:30.9', '2077-03-22T12:30:32.1'),
['2020-01-01',
'2040-01-01',
'2060-01-01']),
# Interval of a few years
(('2014-03-22T12:30:30.9', '2017-03-22T12:30:32.1'),
['2015-01-01',
'2016-01-01',
'2017-01-01']),
# Interval of just under a year
(('2014-03-22T12:30:30.9', '2015-01-22T12:30:32.1'),
['2014-05-01',
'2014-10-01']),
# Interval of just over a month
(('2014-03-22T12:30:30.9', '2014-04-23T12:30:32.1'),
['2014-04-01']),
# Interval of just under a month
(('2014-03-22T12:30:30.9', '2014-04-21T12:30:32.1'),
['2014-03-24',
'2014-04-03',
'2014-04-13']),
# Interval of just over an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:31:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of just under an hour
(('2014-03-22T12:30:30.9', '2014-03-22T13:28:30.9'),
['2014-03-22T12:40:00.000',
'2014-03-22T13:00:00.000',
'2014-03-22T13:20:00.000']),
# Interval of a few minutes
(('2014-03-22T12:30:30.9', '2014-03-22T12:38:30.9'),
['2014-03-22T12:33:00.000',
'2014-03-22T12:36:00.000']),
# Interval of a few seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:40.9'),
['2014-03-22T12:30:33.000',
'2014-03-22T12:30:36.000',
'2014-03-22T12:30:39.000']),
# Interval of a couple of seconds
(('2014-03-22T12:30:30.9', '2014-03-22T12:30:32.1'),
['2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.500',
'2014-03-22T12:30:32.000']),
# Interval of under a second
(('2014-03-22T12:30:30.89', '2014-03-22T12:30:31.19'),
['2014-03-22T12:30:30.900',
'2014-03-22T12:30:31.000',
'2014-03-22T12:30:31.100']),
]
@pytest.mark.parametrize(('interval', 'expected'), RANGE_CASES)
def test_formatter_locator(interval, expected):
# Check that the ticks and labels returned for the above cases are correct.
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time(interval[0]), Time(interval[1]))
assert get_ticklabels(ax.xaxis) == expected
FORMAT_CASES = [
('byear', ['2020', '2040', '2060']),
('byear_str', ['B2020.000', 'B2040.000', 'B2060.000']),
('cxcsec', ['1000000000', '1500000000', '2000000000', '2500000000']),
('decimalyear', ['2020', '2040', '2060']),
('fits', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('gps', ['1500000000', '2000000000', '2500000000', '3000000000']),
('iso', ['2020-01-01 00:00:00.000', '2040-01-01 00:00:00.000', '2060-01-01 00:00:00.000']),
('isot', ['2020-01-01T00:00:00.000', '2040-01-01T00:00:00.000', '2060-01-01T00:00:00.000']),
('jd', ['2458000', '2464000', '2470000', '2476000']),
('jyear', ['2020', '2040', '2060']),
('jyear_str', ['J2020.000', 'J2040.000', 'J2060.000']),
('mjd', ['60000', '66000', '72000', '78000']),
('plot_date', ['738000', '744000', '750000', '756000']),
('unix', ['1500000000', '2000000000', '2500000000', '3000000000']),
('yday', ['2020:001:00:00:00.000', '2040:001:00:00:00.000', '2060:001:00:00:00.000']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_formats(format, expected):
# Check that the locators/formatters work fine for all time formats
with time_support(format=format, simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9'), Time('2077-03-22T12:30:32.1'))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == 'Time ({0})'.format(format)
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES)
def test_auto_formats(format, expected):
# Check that the format/scale is taken from the first time used.
with time_support(simplify=False):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time(Time('2014-03-22T12:30:30.9'), format=format),
Time('2077-03-22T12:30:32.1'))
assert get_ticklabels(ax.xaxis) == expected
ax.get_xlabel() == 'Time ({0})'.format(format)
FORMAT_CASES_SIMPLIFY = [
('fits', ['2020-01-01', '2040-01-01', '2060-01-01']),
('iso', ['2020-01-01', '2040-01-01', '2060-01-01']),
('isot', ['2020-01-01', '2040-01-01', '2060-01-01']),
('yday', ['2020', '2040', '2060']),
]
@pytest.mark.parametrize(('format', 'expected'), FORMAT_CASES_SIMPLIFY)
def test_formats_simplify(format, expected):
# Check the use of the simplify= option
with time_support(format=format, simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9'), Time('2077-03-22T12:30:32.1'))
assert get_ticklabels(ax.xaxis) == expected
def test_plot():
# Make sure that plot() works properly
with time_support():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9'), Time('2077-03-22T12:30:32.1'))
ax.plot(Time(['2015-03-22T12:30:30.9',
'2018-03-22T12:30:30.9',
'2021-03-22T12:30:30.9']))
def test_nested():
with time_support(format='iso', simplify=False):
with time_support(format='yday', simplify=True):
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9'), Time('2077-03-22T12:30:32.1'))
assert get_ticklabels(ax.xaxis) == ['2020', '2040', '2060']
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(Time('2014-03-22T12:30:30.9'), Time('2077-03-22T12:30:32.1'))
assert get_ticklabels(ax.xaxis) == ['2020-01-01 00:00:00.000',
'2040-01-01 00:00:00.000',
'2060-01-01 00:00:00.000']
|
0775543fd1003fb5faa5f2168b2f093fcca41e21472165579c950825370bce83 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Rectangle
from matplotlib import rc_context
from astropy import units as u
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy.visualization.wcsaxes.patches import SphericalCircle
from astropy.visualization.wcsaxes import WCSAxes
from . import datasets
from astropy.tests.image_tests import IMAGE_REFERENCE_DIR
from astropy.visualization.wcsaxes.frame import EllipticalFrame
class BaseImageTests:
@classmethod
def setup_class(cls):
cls._data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
msx_header = os.path.join(cls._data_dir, 'msx_header')
cls.msx_header = fits.Header.fromtextfile(msx_header)
rosat_header = os.path.join(cls._data_dir, 'rosat_header')
cls.rosat_header = fits.Header.fromtextfile(rosat_header)
twoMASS_k_header = os.path.join(cls._data_dir, '2MASS_k_header')
cls.twoMASS_k_header = fits.Header.fromtextfile(twoMASS_k_header)
cube_header = os.path.join(cls._data_dir, 'cube_header')
cls.cube_header = fits.Header.fromtextfile(cube_header)
slice_header = os.path.join(cls._data_dir, 'slice_header')
cls.slice_header = fits.Header.fromtextfile(slice_header)
class TestBasic(BaseImageTests):
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_image_plot(self):
# Test for plotting image and also setting values of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=1.5, style={})
@pytest.mark.parametrize('axisbelow', [True, False, 'line'])
def test_axisbelow(self, axisbelow):
# Test that tick marks, labels, and gridlines are drawn with the
# correct zorder controlled by the axisbelow property.
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=WCS(self.msx_header), aspect='equal')
ax.set_axisbelow(axisbelow)
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_ticks([-0.30, 0., 0.20] * u.degree, size=5, width=1)
ax.grid()
# Add an image (default zorder=0).
ax.imshow(np.zeros((64, 64)))
# Add a patch (default zorder=1).
r = Rectangle((30., 50.), 60., 50., facecolor='green', edgecolor='red')
ax.add_patch(r)
# Add a line (default zorder=2).
ax.plot([32, 128], [32, 128], linewidth=10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_contour_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contour(hdu_msx.data, transform=ax.get_transform(wcs_msx),
colors='orange', levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_contourf_overlay(self):
# Test for overlaying contours on images
hdu_msx = datasets.fetch_msx_hdu()
wcs_msx = WCS(self.msx_header)
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
# Overplot contour
ax.contourf(hdu_msx.data, transform=ax.get_transform(wcs_msx),
levels=[2.5e-5, 5e-5, 1.e-4])
ax.coords[0].set_ticks(size=5, width=1)
ax.coords[1].set_ticks(size=5, width=1)
ax.set_xlim(0., 720.)
ax.set_ylim(0., 720.)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_overlay_features_image(self):
# Test for overlaying grid, changing format of ticks, setting spacing
# and number of ticks
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.25, 0.25, 0.65, 0.65],
projection=WCS(self.msx_header), aspect='equal')
# Change the format of the ticks
ax.coords[0].set_major_formatter('dd:mm:ss')
ax.coords[1].set_major_formatter('dd:mm:ss.ssss')
# Overlay grid on image
ax.grid(color='red', alpha=1.0, lw=1, linestyle='dashed')
# Set the spacing of ticks on the 'glon' axis to 4 arcsec
ax.coords['glon'].set_ticks(spacing=4 * u.arcsec, size=5, width=1)
# Set the number of ticks on the 'glat' axis to 9
ax.coords['glat'].set_ticks(number=9, size=5, width=1)
# Set labels on axes
ax.coords['glon'].set_axislabel('Galactic Longitude', minpad=1.6)
ax.coords['glat'].set_axislabel('Galactic Latitude', minpad=-0.75)
# Change the frame linewidth and color
ax.coords.frame.set_color('red')
ax.coords.frame.set_linewidth(2)
assert ax.coords.frame.get_color() == 'red'
assert ax.coords.frame.get_linewidth() == 2
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_curvilinear_grid_patches_image(self):
# Overlay curvilinear grid and patches on image
fig = plt.figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.rosat_header), aspect='equal')
ax.set_xlim(-0.5, 479.5)
ax.set_ylim(-0.5, 239.5)
ax.grid(color='black', alpha=1.0, lw=1, linestyle='dashed')
p = Circle((300, 100), radius=40, ec='yellow', fc='none')
ax.add_patch(p)
p = Circle((30., 20.), radius=20., ec='orange', fc='none',
transform=ax.get_transform('world'))
ax.add_patch(p)
p = Circle((60., 50.), radius=20., ec='red', fc='none',
transform=ax.get_transform('fk5'))
ax.add_patch(p)
p = Circle((40., 60.), radius=20., ec='green', fc='none',
transform=ax.get_transform('galactic'))
ax.add_patch(p)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_cube_slice_image(self):
# Test for cube slicing
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_axislabel('Velocity m/s')
ax.coords[1].set_ticks(spacing=0.2 * u.deg, width=1)
ax.coords[2].set_ticks(spacing=400 * u.m / u.s, width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
ax.coords[2].grid(grid_type='contours', color='red', linestyle='solid')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_cube_slice_image_lonlat(self):
# Test for cube slicing. Here we test with longitude and latitude since
# there is some longitude-specific code in _update_grid_contour.
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=('x', 'y', 50), aspect='equal')
ax.set_xlim(-0.5, 106.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[0].grid(grid_type='contours', color='blue', linestyle='solid')
ax.coords[1].grid(grid_type='contours', color='red', linestyle='solid')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_coord(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord(266 * u.deg, -29 * u.deg)
ax.plot_coord(c, 'o')
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_plot_line(self):
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes([0.15, 0.15, 0.8, 0.8],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 720.5)
ax.set_ylim(-0.5, 720.5)
c = SkyCoord([266, 266.8] * u.deg, [-29, -28.9] * u.deg)
ax.plot_coord(c)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_changed_axis_units(self):
# Test to see if changing the units of axis works
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_major_formatter('x.xx')
ax.coords[2].set_format_unit(u.km / u.s)
ax.coords[2].set_axislabel('Velocity km/s')
ax.coords[1].set_ticks(width=1)
ax.coords[2].set_ticks(width=1)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_minor_ticks(self):
# Test for drawing minor ticks
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8],
projection=WCS(self.cube_header),
slices=(50, 'y', 'x'), aspect='equal')
ax.set_xlim(-0.5, 52.5)
ax.set_ylim(-0.5, 106.5)
ax.coords[2].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
ax.coords[2].display_minor_ticks(True)
ax.coords[1].display_minor_ticks(True)
ax.coords[2].set_minor_frequency(3)
ax.coords[1].set_minor_frequency(10)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_ticks_labels(self):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.1, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.coords[0].set_ticks(size=10, color='blue', alpha=0.2, width=1)
ax.coords[1].set_ticks(size=20, color='red', alpha=0.9, width=1)
ax.coords[0].set_ticks_position('all')
ax.coords[1].set_ticks_position('all')
ax.coords[0].set_axislabel('X-axis', size=20)
ax.coords[1].set_axislabel('Y-axis', color='green', size=25,
weight='regular', style='normal',
family='cmtt10')
ax.coords[0].set_axislabel_position('t')
ax.coords[1].set_axislabel_position('r')
ax.coords[0].set_ticklabel(color='purple', size=15, alpha=1,
weight='light', style='normal',
family='cmss10')
ax.coords[1].set_ticklabel(color='black', size=18, alpha=0.9,
weight='bold', family='cmr10')
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('r')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_rcparams(self):
# Test custom rcParams
with rc_context({
'axes.labelcolor': 'purple',
'axes.labelsize': 14,
'axes.labelweight': 'bold',
'axes.linewidth': 3,
'axes.facecolor': '0.5',
'axes.edgecolor': 'green',
'xtick.color': 'red',
'xtick.labelsize': 8,
'xtick.direction': 'in',
'xtick.minor.visible': True,
'xtick.minor.size': 5,
'xtick.major.size': 20,
'xtick.major.width': 3,
'xtick.major.pad': 10,
'grid.color': 'blue',
'grid.linestyle': ':',
'grid.linewidth': 1,
'grid.alpha': 0.5}):
fig = plt.figure(figsize=(6, 6))
ax = WCSAxes(fig, [0.15, 0.1, 0.7, 0.7], wcs=None)
fig.add_axes(ax)
ax.set_xlim(-0.5, 2)
ax.set_ylim(-0.5, 2)
ax.grid()
ax.set_xlabel('X label')
ax.set_ylabel('Y label')
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_angles(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels. Addresses #45, #46.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_angles_non_square_axes(self):
# Test that tick marks point in the correct direction, even when the
# axes limits extend only over a few FITS pixels, and the axes are
# non-square.
w = WCS()
w.wcs.ctype = ['RA---TAN', 'DEC--TAN']
w.wcs.crval = [90, 70]
w.wcs.cdelt = [16, 16]
w.wcs.crpix = [1, 1]
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
fig = plt.figure(figsize=(6, 3))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], projection=w)
ax.set_xlim(1, -1)
ax.set_ylim(-1, 1)
ax.grid(color='gray', alpha=0.5, linestyle='solid')
ax.coords['ra'].set_ticks(color='red', size=20)
ax.coords['dec'].set_ticks(color='red', size=20)
# In previous versions, all angle axes defaulted to being displayed in
# degrees. We now automatically show RA axes in hour angle units, but
# for backward-compatibility with previous reference images we
# explicitly use degrees here.
ax.coords[0].set_format_unit(u.degree)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_set_coord_type(self):
# Test for setting coord_type
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6],
projection=WCS(self.msx_header),
aspect='equal')
ax.set_xlim(-0.5, 148.5)
ax.set_ylim(-0.5, 148.5)
ax.coords[0].set_coord_type('scalar')
ax.coords[1].set_coord_type('scalar')
ax.coords[0].set_major_formatter('x.xxx')
ax.coords[1].set_major_formatter('x.xxx')
ax.coords[0].set_ticklabel(exclude_overlapping=True)
ax.coords[1].set_ticklabel(exclude_overlapping=True)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_ticks_regression(self):
# Regression test for a bug that caused ticks aligned exactly with a
# sampled frame point to not appear. This also checks that tick labels
# don't get added more than once, and that no error occurs when e.g.
# the top part of the frame is all at the same coordinate as one of the
# potential ticks (which causes the tick angle calculation to return
# NaN).
wcs = WCS(self.slice_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5],
projection=wcs, aspect='auto')
limits = wcs.wcs_world2pix([0, 0], [35e3, 80e3], 0)[1]
ax.set_ylim(*limits)
ax.coords[0].set_ticks(spacing=0.002 * u.deg)
ax.coords[1].set_ticks(spacing=5 * u.km / u.s)
ax.coords[0].set_ticklabel(alpha=0.5) # to see multiple labels
ax.coords[1].set_ticklabel(alpha=0.5)
ax.coords[0].set_ticklabel_position('all')
ax.coords[1].set_ticklabel_position('all')
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_axislabels_regression(self):
# Regression test for a bug that meant that if tick labels were made
# invisible with ``set_visible(False)``, they were still added to the
# list of bounding boxes for tick labels, but with default values of 0
# to 1, which caused issues.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='auto')
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[1].ticklabels.set_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_noncelestial_angular(self, tmpdir):
# Regression test for a bug that meant that when passing a WCS that had
# angular axes and using set_coord_type to set the coordinates to
# longitude/latitude, but where the WCS wasn't recognized as celestial,
# the WCS units are not converted to deg, so we can't assume that
# transform will always return degrees.
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['solar-x', 'solar-y']
wcs.wcs.cunit = ['arcsec', 'arcsec']
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1, projection=wcs)
ax.imshow(np.zeros([1024, 1024]), origin='lower')
ax.coords[0].set_coord_type('longitude', coord_wrap=180)
ax.coords[1].set_coord_type('latitude')
ax.coords[0].set_major_formatter('s.s')
ax.coords[1].set_major_formatter('s.s')
ax.coords[0].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.coords[1].set_format_unit(u.arcsec, show_decimal_unit=False)
ax.grid(color='white', ls='solid')
# Force drawing (needed for format_coord)
fig.savefig(tmpdir.join('nothing').strpath)
assert ax.format_coord(512, 512) == '513.0 513.0 (world)'
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
savefig_kwargs={'bbox_inches': 'tight'},
tolerance=0, style={})
def test_patches_distortion(self, tmpdir):
# Check how patches get distorted (and make sure that scatter markers
# and SphericalCircle don't)
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.25, 0.25, 0.5, 0.5], projection=wcs, aspect='equal')
# Pixel coordinates
r = Rectangle((30., 50.), 60., 50., edgecolor='green', facecolor='none')
ax.add_patch(r)
# FK5 coordinates
r = Rectangle((266.4, -28.9), 0.3, 0.3, edgecolor='cyan', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
# FK5 coordinates
c = Circle((266.4, -29.1), 0.15, edgecolor='magenta', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(c)
# Pixel coordinates
ax.scatter([40, 100, 130], [30, 130, 60], s=100, edgecolor='red', facecolor=(1, 0, 0, 0.5))
# World coordinates (should not be distorted)
ax.scatter(266.78238, -28.769255, transform=ax.get_transform('fk5'), s=300,
edgecolor='red', facecolor='none')
# World coordinates (should not be distorted)
r = SphericalCircle((266.4 * u.deg, -29.1 * u.deg), 0.15 * u.degree,
edgecolor='purple', facecolor='none',
transform=ax.get_transform('fk5'))
ax.add_patch(r)
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticklabel_visible(False)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_elliptical_frame(self):
# Regression test for a bug (astropy/astropy#6063) that caused labels to
# be incorrectly simplified.
wcs = WCS(self.msx_header)
fig = plt.figure(figsize=(5, 3))
ax = fig.add_axes([0.2, 0.2, 0.6, 0.6], projection=wcs, frame_class=EllipticalFrame)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_hms_labels(self):
# This tests the apparance of the hms superscripts in tick labels
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={'text.usetex': True})
def test_latex_labels(self):
fig = plt.figure(figsize=(3, 3))
ax = fig.add_axes([0.3, 0.2, 0.65, 0.6],
projection=WCS(self.twoMASS_k_header),
aspect='equal')
ax.set_xlim(-0.5, 0.5)
ax.set_ylim(-0.5, 0.5)
ax.coords[0].set_ticks(spacing=0.2 * 15 * u.arcsec)
return fig
@pytest.mark.remote_data(source='astropy')
@pytest.mark.mpl_image_compare(baseline_dir=IMAGE_REFERENCE_DIR,
tolerance=0, style={})
def test_tick_params(self):
# This is a test to make sure that tick_params works correctly. We try
# and test as much as possible with a single reference image.
wcs = WCS()
wcs.wcs.ctype = ['lon', 'lat']
fig = plt.figure(figsize=(6, 6))
# The first subplot tests:
# - that plt.tick_params works
# - that by default both axes are changed
# - changing the tick direction and appearance, the label appearance and padding
ax = fig.add_subplot(2, 2, 1, projection=wcs)
plt.tick_params(direction='in', length=20, width=5, pad=6, labelsize=6,
color='red', labelcolor='blue')
# The second subplot tests:
# - that specifying grid parameters doesn't actually cause the grid to
# be shown (as expected)
# - that axis= can be given integer coordinates or their string name
# - that the tick positioning works (bottom/left/top/right)
# Make sure that we can pass things that can index coords
ax = fig.add_subplot(2, 2, 2, projection=wcs)
plt.tick_params(axis=0, direction='in', length=20, width=5, pad=4, labelsize=6,
color='red', labelcolor='blue', bottom=True, grid_color='purple')
plt.tick_params(axis='lat', direction='out', labelsize=8,
color='blue', labelcolor='purple', left=True, right=True,
grid_color='red')
# The third subplot tests:
# - that ax.tick_params works
# - that the grid has the correct settings once shown explicitly
# - that we can use axis='x' and axis='y'
ax = fig.add_subplot(2, 2, 3, projection=wcs)
ax.tick_params(axis='x', direction='in', length=20, width=5, pad=20, labelsize=6,
color='red', labelcolor='blue', bottom=True,
grid_color='purple')
ax.tick_params(axis='y', direction='out', labelsize=8,
color='blue', labelcolor='purple', left=True, right=True,
grid_color='red')
plt.grid()
# The final subplot tests:
# - that we can use tick_params on a specific coordinate
# - that the label positioning can be customized
# - that the colors argument works
# - that which='minor' works
ax = fig.add_subplot(2, 2, 4, projection=wcs)
ax.coords[0].tick_params(length=4, pad=2, colors='orange', labelbottom=True,
labeltop=True, labelsize=10)
ax.coords[1].display_minor_ticks(True)
ax.coords[1].tick_params(which='minor', length=6)
return fig
|
3355b6aee568d30f3735a5d4882fcd02d1a7637f1062162cde34da5aaed90899 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy._erfa import core as erfa
from astropy.tests.helper import catch_warnings
def test_erfa_wrapper():
"""
Runs a set of tests that mostly make sure vectorization is
working as expected
"""
jd = np.linspace(2456855.5, 2456855.5+1.0/24.0/60.0, 60*2+1)
ra = np.linspace(0.0, np.pi*2.0, 5)
dec = np.linspace(-np.pi/2.0, np.pi/2.0, 4)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd, 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == (121,)
aob, zob, hob, dob, rob, eo = erfa.atco13(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, jd[0], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
assert aob.shape == ()
aob, zob, hob, dob, rob, eo = erfa.atco13(ra[:, None, None], dec[None, :, None], 0.0, 0.0, 0.0, 0.0, jd[None, None, :], 0.0, 0.0, 0.0, np.pi/4.0, 0.0, 0.0, 0.0, 1014.0, 0.0, 0.0, 0.5)
(aob.shape) == (5, 4, 121)
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd, 0.0)
assert iy.shape == (121,)
assert ihmsf.shape == (121,)
assert ihmsf.dtype == erfa.dt_hmsf
iy, im, id, ihmsf = erfa.d2dtf("UTC", 3, jd[0], 0.0)
assert iy.shape == ()
assert ihmsf.shape == ()
assert ihmsf.dtype == erfa.dt_hmsf
def test_angle_ops():
sign, idmsf = erfa.a2af(6, -np.pi)
assert sign == b'-'
assert idmsf.item() == (180, 0, 0, 0)
sign, ihmsf = erfa.a2tf(6, np.pi)
assert sign == b'+'
assert ihmsf.item() == (12, 0, 0, 0)
rad = erfa.af2a('-', 180, 0, 0.0)
np.testing.assert_allclose(rad, -np.pi)
rad = erfa.tf2a('+', 12, 0, 0.0)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anp(3.*np.pi)
np.testing.assert_allclose(rad, np.pi)
rad = erfa.anpm(3.*np.pi)
np.testing.assert_allclose(rad, -np.pi)
sign, ihmsf = erfa.d2tf(1, -1.5)
assert sign == b'-'
assert ihmsf.item() == (36, 0, 0, 0)
days = erfa.tf2d('+', 3, 0, 0.0)
np.testing.assert_allclose(days, 0.125)
def test_spherical_cartesian():
theta, phi = erfa.c2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
theta, phi, r = erfa.p2s([0.0, np.sqrt(2.0), np.sqrt(2.0)])
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
pv = np.array(([0.0, np.sqrt(2.0), np.sqrt(2.0)], [1.0, 0.0, 0.0]),
dtype=erfa.dt_pv)
theta, phi, r, td, pd, rd = erfa.pv2s(pv)
np.testing.assert_allclose(theta, np.pi/2.0)
np.testing.assert_allclose(phi, np.pi/4.0)
np.testing.assert_allclose(r, 2.0)
np.testing.assert_allclose(td, -np.sqrt(2.0)/2.0)
np.testing.assert_allclose(pd, 0.0)
np.testing.assert_allclose(rd, 0.0)
c = erfa.s2c(np.pi/2.0, np.pi/4.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
c = erfa.s2p(np.pi/2.0, np.pi/4.0, 1.0)
np.testing.assert_allclose(c, [0.0, np.sqrt(2.0)/2.0, np.sqrt(2.0)/2.0], atol=1e-14)
pv = erfa.s2pv(np.pi/2.0, np.pi/4.0, 2.0, np.sqrt(2.0)/2.0, 0.0, 0.0)
np.testing.assert_allclose(pv['p'], [0.0, np.sqrt(2.0), np.sqrt(2.0)], atol=1e-14)
np.testing.assert_allclose(pv['v'], [-1.0, 0.0, 0.0], atol=1e-14)
def test_errwarn_reporting():
"""
Test that the ERFA error reporting mechanism works as it should
"""
# no warning
erfa.dat(1990, 1, 1, 0.5)
# check warning is raised for a scalar
with catch_warnings() as w:
erfa.dat(100, 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '1 of "dubious year (Note 1)"' in str(w[0].message)
# and that the count is right for a vector.
with catch_warnings() as w:
erfa.dat([100, 200, 1990], 1, 1, 0.5)
assert len(w) == 1
assert w[0].category == erfa.ErfaWarning
assert '2 of "dubious year (Note 1)"' in str(w[0].message)
try:
erfa.dat(1990, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if '1 of "bad day (Note 3)", 1 of "bad month"' not in e.args[0]:
assert False, 'Raised the correct type of error, but wrong message: ' + e.args[0]
try:
erfa.dat(200, [1, 34, 2], [1, 1, 43], 0.5)
except erfa.ErfaError as e:
if 'warning' in e.args[0]:
assert False, 'Raised the correct type of error, but there were warnings mixed in: ' + e.args[0]
def test_vector_inouts():
"""
Tests that ERFA functions working with vectors are correctly consumed and spit out
"""
# values are from test_erfa.c t_ab function
pnat = [-0.76321968546737951,
-0.60869453983060384,
-0.21676408580639883]
v = [2.1044018893653786e-5,
-8.9108923304429319e-5,
-3.8633714797716569e-5]
s = 0.99980921395708788
bm1 = 0.99999999506209258
expected = [-0.7631631094219556269,
-0.6087553082505590832,
-0.2167926269368471279]
res = erfa.ab(pnat, v, s, bm1)
assert res.shape == (3,)
np.testing.assert_allclose(res, expected)
res2 = erfa.ab([pnat]*4, v, s, bm1)
assert res2.shape == (4, 3)
np.testing.assert_allclose(res2, [expected]*4)
# here we stride an array and also do it Fortran-order to make sure
# it all still works correctly with non-contig arrays
pnata = np.array(pnat)
arrin = np.array([pnata, pnata/2, pnata/3, pnata/4, pnata/5]*4, order='F')
res3 = erfa.ab(arrin[::5], v, s, bm1)
assert res3.shape == (4, 3)
np.testing.assert_allclose(res3, [expected]*4)
def test_pv_in():
jd1 = 2456165.5
jd2 = 0.401182685
pv = np.empty((), dtype=erfa.dt_pv)
pv['p'] = [-6241497.16,
401346.896,
-1251136.04]
pv['v'] = [-29.264597,
-455.021831,
0.0266151194]
astrom = erfa.apcs13(jd1, jd2, pv)
assert astrom.shape == ()
# values from t_erfa_c
np.testing.assert_allclose(astrom['pmt'], 12.65133794027378508)
np.testing.assert_allclose(astrom['em'], 1.010428384373318379)
np.testing.assert_allclose(astrom['eb'], [0.9012691529023298391,
-.4173999812023068781,
-.1809906511146821008])
np.testing.assert_allclose(astrom['bpn'], np.eye(3))
# first make sure it *fails* if we mess with the input orders
pvbad = np.empty_like(pv)
pvbad['p'], pvbad['v'] = pv['v'], pv['p']
astrombad = erfa.apcs13(jd1, jd2, pvbad)
assert not np.allclose(astrombad['em'], 1.010428384373318379)
pvarr = np.array([pv]*3)
astrom2 = erfa.apcs13(jd1, jd2, pvarr)
assert astrom2.shape == (3,)
np.testing.assert_allclose(astrom2['em'], 1.010428384373318379)
# try striding of the input array to make non-contiguous
pvmatarr = np.array([pv]*9)[::3]
astrom3 = erfa.apcs13(jd1, jd2, pvmatarr)
assert astrom3.shape == (3,)
np.testing.assert_allclose(astrom3['em'], 1.010428384373318379)
def test_structs():
"""
Checks producing and consuming of ERFA c structs
"""
am, eo = erfa.apci13(2456165.5, [0.401182685, 1])
assert am.shape == (2, )
assert am.dtype == erfa.dt_eraASTROM
assert eo.shape == (2, )
# a few spotchecks from test_erfa.c
np.testing.assert_allclose(am[0]['pmt'], 12.65133794027378508)
np.testing.assert_allclose(am[0]['v'], [0.4289638897157027528e-4,
0.8115034002544663526e-4,
0.3517555122593144633e-4])
ri, di = erfa.atciqz(2.71, 0.174, am[0])
np.testing.assert_allclose(ri, 2.709994899247599271)
np.testing.assert_allclose(di, 0.1728740720983623469)
def test_float32_input():
# Regression test for gh-8615
xyz = np.array([[1, 0, 0], [0.9, 0.1, 0]])
out64 = erfa.p2s(xyz)
out32 = erfa.p2s(xyz.astype('f4'))
np.testing.assert_allclose(out32, out64, rtol=1.e-5)
|
44f888ceea1ef92eb14fb0dad50facf0cf3a87415b7cf3428f9dcbf02808649d | import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in order to get numerical
values.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The last tuple element must be a dictionary with the keyword
arguments required to initialize the class.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
return None
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` ``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that indicates using booleans
whether a given world coordinate depends on a given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence of
any further information. For completely independent axes, the diagonal
would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def __str__(self):
# Overall header
s = '{0} Transformation\n\n'.format(self.__class__.__name__)
s += ('This transformation has {0} pixel and {1} world dimensions\n\n'
.format(self.pixel_n_dim, self.world_n_dim))
s += 'Array shape (Numpy order): {0}\n\n'.format(self.array_shape)
# Pixel dimensions table
array_shape = self.array_shape or (0,)
pixel_shape = self.pixel_shape or (None,) * self.pixel_n_dim
# Find largest between header size and value length
pixel_dim_width = max(9, len(str(self.pixel_n_dim)))
pixel_siz_width = max(9, len(str(max(array_shape))))
s += (('{0:' + str(pixel_dim_width) + 's}').format('Pixel Dim') + ' ' +
('{0:' + str(pixel_siz_width) + 's}').format('Data size') + ' ' +
'Bounds\n')
for ipix in range(self.pixel_n_dim):
s += (('{0:' + str(pixel_dim_width) + 'd}').format(ipix) + ' ' +
(" "*5 + str(None) if pixel_shape[ipix] is None else
('{0:' + str(pixel_siz_width) + 'd}').format(pixel_shape[ipix])) + ' ' +
'{0:s}'.format(str(None if self.pixel_bounds is None else self.pixel_bounds[ipix]) + '\n'))
s += '\n'
# World dimensions table
# Find largest between header size and value length
world_dim_width = max(9, len(str(self.world_n_dim)))
world_typ_width = max(13, max(len(x) if x is not None else 0 for x in self.world_axis_physical_types))
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') + ' ' +
('{0:' + str(world_typ_width) + 's}').format('Physical Type') + ' ' +
'Units\n')
for iwrl in range(self.world_n_dim):
if self.world_axis_physical_types[iwrl] is not None:
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_typ_width) + 's}').format(self.world_axis_physical_types[iwrl]) + ' ' +
'{0:s}'.format(self.world_axis_units[iwrl] + '\n'))
else:
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) + ' ' +
('{0:' + str(world_typ_width) + 's}').format('None') + ' ' +
'{0:s}'.format('unknown' + '\n'))
s += '\n'
# Axis correlation matrix
pixel_dim_width = max(3, len(str(self.world_n_dim)))
s += 'Correlation between pixel and world axes:\n\n'
s += (' ' * world_dim_width + ' ' +
('{0:^' + str(self.pixel_n_dim * 5 - 2) + 's}').format('Pixel Dim') +
'\n')
s += (('{0:' + str(world_dim_width) + 's}').format('World Dim') +
''.join([' ' + ('{0:' + str(pixel_dim_width) + 'd}').format(ipix)
for ipix in range(self.pixel_n_dim)]) +
'\n')
matrix = self.axis_correlation_matrix
matrix_str = np.empty(matrix.shape, dtype='U3')
matrix_str[matrix] = 'yes'
matrix_str[~matrix] = 'no'
for iwrl in range(self.world_n_dim):
s += (('{0:' + str(world_dim_width) + 'd}').format(iwrl) +
''.join([' ' + ('{0:>' + str(pixel_dim_width) + 's}').format(matrix_str[iwrl, ipix])
for ipix in range(self.pixel_n_dim)]) +
'\n')
# Make sure we get rid of the extra whitespace at the end of some lines
return '\n'.join([l.rstrip() for l in s.splitlines()])
__repr__ = __str__
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError("Invalid physical type: {0}".format(physical_type))
|
d009b396e00d0fc63f068279ae38137dda32273a7ebb2a48101b166e3150db2d | import abc
from collections import defaultdict, OrderedDict
import numpy as np
from .utils import deserialize_class
__all__ = ['BaseHighLevelWCS', 'HighLevelWCSMixin']
def rec_getattr(obj, att):
for a in att.split('.'):
obj = getattr(obj, a)
return obj
def default_order(components):
order = []
for key, _, _ in components:
if key not in order:
order.append(key)
return order
class BaseHighLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the high-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def low_level_wcs(self):
"""
Returns a reference to the underlying low-level WCS object.
"""
@abc.abstractmethod
def pixel_to_world(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates (represented by
high-level objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` for pixel
indexing and ordering conventions.
"""
@abc.abstractmethod
def array_index_to_world(self, *index_arrays):
"""
Convert array indices to world coordinates (represented by Astropy
objects).
If a single high-level object is used to represent the world coordinates
(i.e., if ``len(wcs.world_axis_object_classes) == 1``), it is returned
as-is (not in a tuple/list), otherwise a tuple of high-level objects is
returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_index_to_world_values` for
pixel indexing and ordering conventions.
"""
@abc.abstractmethod
def world_to_pixel(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to pixel
coordinates.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` for pixel
indexing and ordering conventions.
"""
@abc.abstractmethod
def world_to_array_index(self, *world_objects):
"""
Convert world coordinates (represented by Astropy objects) to array
indices.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned. See
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_array_index_values` for
pixel indexing and ordering conventions. The indices should be returned
as rounded integers.
"""
class HighLevelWCSMixin(BaseHighLevelWCS):
"""
Mix-in class that automatically provides the high-level WCS API for the
low-level WCS object given by the `~HighLevelWCSMixin.low_level_wcs`
property.
"""
@property
def low_level_wcs(self):
return self
def world_to_pixel(self, *world_objects):
# Cache the classes and components since this may be expensive
serialized_classes = self.low_level_wcs.world_axis_object_classes
components = self.low_level_wcs.world_axis_object_components
# Deserialize world_axis_object_classes using the default order
classes = OrderedDict()
for key in default_order(components):
if self.low_level_wcs.serialized_classes:
classes[key] = deserialize_class(serialized_classes[key],
construct=False)
else:
classes[key] = serialized_classes[key]
# Check that the number of classes matches the number of inputs
if len(world_objects) != len(classes):
raise ValueError("Number of world inputs ({0}) does not match "
"expected ({1})".format(len(world_objects), len(classes)))
# Determine whether the classes are uniquely matched, that is we check
# whether there is only one of each class.
world_by_key = {}
unique_match = True
for w in world_objects:
matches = []
for key, (klass, _, _) in classes.items():
if isinstance(w, klass):
matches.append(key)
if len(matches) == 1:
world_by_key[matches[0]] = w
else:
unique_match = False
break
# If the match is not unique, the order of the classes needs to match,
# whereas if all classes are unique, we can still intelligently match
# them even if the order is wrong.
objects = {}
if unique_match:
for key, (klass, args, kwargs) in classes.items():
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(world_by_key[key], SkyCoord):
if 'frame' in kwargs:
objects[key] = world_by_key[key].transform_to(kwargs['frame'])
else:
objects[key] = world_by_key[key]
else:
objects[key] = klass(world_by_key[key], *args, **kwargs)
else:
for ikey, key in enumerate(classes):
klass, args, kwargs = classes[key]
w = world_objects[ikey]
if not isinstance(w, klass):
raise ValueError("Expected the following order of world "
"arguments: {0}".format(', '.join([k.__name__ for (k, _, _) in classes.values()])))
# FIXME: For now SkyCoord won't auto-convert upon initialization
# https://github.com/astropy/astropy/issues/7689
from astropy.coordinates import SkyCoord
if isinstance(w, SkyCoord):
if 'frame' in kwargs:
objects[key] = w.transform_to(kwargs['frame'])
else:
objects[key] = w
else:
objects[key] = klass(w, *args, **kwargs)
# We now extract the attributes needed for the world values
world = []
for key, _, attr in components:
world.append(rec_getattr(objects[key], attr))
# Finally we convert to pixel coordinates
pixel = self.low_level_wcs.world_to_pixel_values(*world)
return pixel
def pixel_to_world(self, *pixel_arrays):
# Compute the world coordinate values
world = self.low_level_wcs.pixel_to_world_values(*pixel_arrays)
if self.world_n_dim == 1:
world = (world,)
# Cache the classes and components since this may be expensive
components = self.low_level_wcs.world_axis_object_components
classes = self.low_level_wcs.world_axis_object_classes
# Deserialize classes
if self.low_level_wcs.serialized_classes:
classes_new = {}
for key, value in classes.items():
classes_new[key] = deserialize_class(value, construct=False)
classes = classes_new
args = defaultdict(list)
kwargs = defaultdict(dict)
for i, (key, attr, _) in enumerate(components):
if isinstance(attr, str):
kwargs[key][attr] = world[i]
else:
while attr > len(args[key]) - 1:
args[key].append(None)
args[key][attr] = world[i]
result = []
for key in default_order(components):
klass, ar, kw = classes[key]
result.append(klass(*args[key], *ar, **kwargs[key], **kw))
if len(result) == 1:
return result[0]
else:
return result
def array_index_to_world(self, *index_arrays):
return self.pixel_to_world(*index_arrays[::-1])
def world_to_array_index(self, *world_objects):
if self.pixel_n_dim == 1:
return np.round(self.world_to_pixel(*world_objects)).astype(int)
else:
return tuple(np.round(self.world_to_pixel(*world_objects)[::-1]).astype(int).tolist())
|
1833bd43ce3ff19e69a62ed2d30a990a15bd0ab204bc1dd0085a5a4d41b90eba | import pytest
from numpy.testing import assert_equal, assert_allclose
from astropy.wcs import WCS
from astropy.io.fits import Header
from astropy.coordinates import SkyCoord, Galactic
from astropy.units import Quantity
from astropy.wcs.wcsapi.sliced_low_level_wcs import SlicedLowLevelWCS, sanitize_slices
import astropy.units as u
# To test the slicing we start off from standard FITS WCS
# objects since those implement the low-level API. We create
# a WCS for a spectral cube with axes in non-standard order
# and with correlated celestial axes and an uncorrelated
# spectral axis.
HEADER_SPECTRAL_CUBE = """
NAXIS = 3
NAXIS1 = 10
NAXIS2 = 20
NAXIS3 = 30
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
WCS_SPECTRAL_CUBE = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
WCS_SPECTRAL_CUBE.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
@pytest.mark.parametrize("item, ndim, expected", (
([Ellipsis, 10], 4, [slice(None)] * 3 + [10]),
([10, slice(20, 30)], 5, [10, slice(20, 30)] + [slice(None)] * 3),
([10, Ellipsis, 8], 10, [10] + [slice(None)] * 8 + [8])
))
def test_sanitize_slice(item, ndim, expected):
new_item = sanitize_slices(item, ndim)
# FIXME: do we still need the first two since the third assert
# should cover it all?
assert len(new_item) == ndim
assert all(isinstance(i, (slice, int)) for i in new_item)
assert new_item == expected
EXPECTED_ELLIPSIS_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_ELLIPSIS_REPR.strip()
EXPECTED_SPECTRAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 2 world dimensions
Array shape (Numpy order): (30, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 yes yes
1 yes yes
"""
def test_spectral_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), 10])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 2
assert wcs.array_shape == (30, 10)
assert wcs.pixel_shape == (10, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, True], [True, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 44), (10, 25))
assert_allclose(wcs.array_index_to_world_values(44, 29), (10, 25))
assert_allclose(wcs.world_to_pixel_values(10, 25), (29., 44.))
assert_equal(wcs.world_to_array_index_values(10, 25), (44, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_SPECTRAL_SLICE_REPR.strip()
EXPECTED_SPECTRAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 6, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 6 (-6, 14)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_spectral_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [slice(None), slice(4, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 6, 10)
assert wcs.pixel_shape == (10, 6, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 35, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 35, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 35., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 35, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-6, 14), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_SPECTRAL_RANGE_REPR.strip()
EXPECTED_CELESTIAL_SLICE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 2 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20)
Pixel Dim Data size Bounds
0 20 (-2, 18)
1 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1
0 no yes
1 yes no
2 no yes
"""
def test_celestial_slice():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, 5])
assert wcs.pixel_n_dim == 2
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20)
assert wcs.pixel_shape == (20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[False, True], [True, False], [False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(39, 44), (12.4, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39), (12.4, 20, 25))
assert_allclose(wcs.world_to_pixel_values(12.4, 20, 25), (39., 44.))
assert_equal(wcs.world_to_array_index_values(12.4, 20, 25), (44, 39))
assert_equal(wcs.pixel_bounds, [(-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_SLICE_REPR.strip()
EXPECTED_CELESTIAL_RANGE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Data size Bounds
0 5 (-6, 6)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(24, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 24), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (24., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 24))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_RANGE_REPR.strip()
# Now try with a 90 degree rotation
WCS_SPECTRAL_CUBE_ROT = WCS(Header.fromstring(HEADER_SPECTRAL_CUBE, sep='\n'))
WCS_SPECTRAL_CUBE_ROT.wcs.pc = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
WCS_SPECTRAL_CUBE_ROT.wcs.crval[0] = 0
WCS_SPECTRAL_CUBE_ROT.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_CELESTIAL_RANGE_ROT_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 5)
Pixel Dim Data size Bounds
0 5 (-6, 6)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_celestial_range_rot():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_ROT, [Ellipsis, slice(5, 10)])
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 5)
assert wcs.pixel_shape == (5, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(14, 29, 34), (1, 15, 24))
assert_allclose(wcs.array_index_to_world_values(34, 29, 14), (1, 15, 24))
assert_allclose(wcs.world_to_pixel_values(1, 15, 24), (14., 29., 34.))
assert_equal(wcs.world_to_array_index_values(1, 15, 24), (34, 29, 14))
assert_equal(wcs.pixel_bounds, [(-6, 6), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_CELESTIAL_RANGE_ROT_REPR.strip()
HEADER_NO_SHAPE_CUBE = """
NAXIS = 3
CTYPE1 = GLAT-CAR
CTYPE2 = FREQ
CTYPE3 = GLON-CAR
CRVAL1 = 10
CRVAL2 = 20
CRVAL3 = 25
CRPIX1 = 30
CRPIX2 = 40
CRPIX3 = 45
CDELT1 = -0.1
CDELT2 = 0.5
CDELT3 = 0.1
CUNIT1 = deg
CUNIT2 = Hz
CUNIT3 = deg
"""
WCS_NO_SHAPE_CUBE = WCS(Header.fromstring(HEADER_NO_SHAPE_CUBE, sep='\n'))
EXPECTED_NO_SHAPE_REPR = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): None
Pixel Dim Data size Bounds
0 None None
1 None None
2 None None
World Dim Physical Type Units
0 pos.galactic.lat deg
1 em.freq Hz
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_no_array_shape():
wcs = SlicedLowLevelWCS(WCS_NO_SHAPE_CUBE, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape is None
assert wcs.pixel_shape is None
assert wcs.world_axis_physical_types == ['pos.galactic.lat', 'em.freq', 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True], [False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('freq', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert wcs.world_axis_object_classes['freq'][0] is Quantity
assert wcs.world_axis_object_classes['freq'][1] == ()
assert wcs.world_axis_object_classes['freq'][2] == {'unit': 'Hz'}
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert str(wcs) == repr(wcs) == EXPECTED_NO_SHAPE_REPR.strip()
# Testing the WCS object having some physical types as None/Unknown
HEADER_SPECTRAL_CUBE_NONE_TYPES = {
'CTYPE1': 'GLAT-CAR',
'CUNIT1': 'deg',
'CDELT1': -0.1,
'CRPIX1': 30,
'CRVAL1': 10,
'NAXIS1': 10,
'CTYPE2': '',
'CUNIT2': 'Hz',
'CDELT2': 0.5,
'CRPIX2': 40,
'CRVAL2': 20,
'NAXIS2': 20,
'CTYPE3': 'GLON-CAR',
'CUNIT3': 'deg',
'CDELT3': 0.1,
'CRPIX3': 45,
'CRVAL3': 25,
'NAXIS3': 30
}
WCS_SPECTRAL_CUBE_NONE_TYPES = WCS(header=HEADER_SPECTRAL_CUBE_NONE_TYPES)
WCS_SPECTRAL_CUBE_NONE_TYPES.pixel_bounds = [(-1, 11), (-2, 18), (5, 15)]
EXPECTED_ELLIPSIS_REPR_NONE_TYPES = """
SlicedLowLevelWCS Transformation
This transformation has 3 pixel and 3 world dimensions
Array shape (Numpy order): (30, 20, 10)
Pixel Dim Data size Bounds
0 10 (-1, 11)
1 20 (-2, 18)
2 30 (5, 15)
World Dim Physical Type Units
0 pos.galactic.lat deg
1 None unknown
2 pos.galactic.lon deg
Correlation between pixel and world axes:
Pixel Dim
World Dim 0 1 2
0 yes no yes
1 no yes no
2 yes no yes
"""
def test_ellipsis_none_types():
wcs = SlicedLowLevelWCS(WCS_SPECTRAL_CUBE_NONE_TYPES, Ellipsis)
assert wcs.pixel_n_dim == 3
assert wcs.world_n_dim == 3
assert wcs.array_shape == (30, 20, 10)
assert wcs.pixel_shape == (10, 20, 30)
assert wcs.world_axis_physical_types == ['pos.galactic.lat', None, 'pos.galactic.lon']
assert wcs.world_axis_units == ['deg', 'Hz', 'deg']
assert_equal(wcs.axis_correlation_matrix, [[True, False, True],
[False, True, False], [True, False, True]])
assert wcs.world_axis_object_components == [('celestial', 1, 'spherical.lat.degree'),
('world', 0, 'value'),
('celestial', 0, 'spherical.lon.degree')]
assert wcs.world_axis_object_classes['celestial'][0] is SkyCoord
assert wcs.world_axis_object_classes['celestial'][1] == ()
assert isinstance(wcs.world_axis_object_classes['celestial'][2]['frame'], Galactic)
assert wcs.world_axis_object_classes['celestial'][2]['unit'] is u.deg
assert_allclose(wcs.pixel_to_world_values(29, 39, 44), (10, 20, 25))
assert_allclose(wcs.array_index_to_world_values(44, 39, 29), (10, 20, 25))
assert_allclose(wcs.world_to_pixel_values(10, 20, 25), (29., 39., 44.))
assert_equal(wcs.world_to_array_index_values(10, 20, 25), (44, 39, 29))
assert_equal(wcs.pixel_bounds, [(-1, 11), (-2, 18), (5, 15)])
assert str(wcs) == repr(wcs) == EXPECTED_ELLIPSIS_REPR_NONE_TYPES.strip()
|
bfd15a35e09a0114a0c9698b11408eb2d890f99932092e10302ab719671aad9d | import functools
import pytest
import numpy as np
from astropy.time import Time, TimeDelta
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
tiny = 2. ** -52
dt_tiny = TimeDelta(tiny, format='jd')
def test_abs_jd2_always_less_than_half():
"""Make jd2 approach +/-0.5, and check that it doesn't go over."""
t1 = Time(2400000.5, [-tiny, +tiny], format='jd')
assert np.all(t1.jd1 % 1 == 0)
assert np.all(abs(t1.jd2) < 0.5)
t2 = Time(2400000., [[0.5-tiny, 0.5+tiny],
[-0.5-tiny, -0.5+tiny]], format='jd')
assert np.all(t2.jd1 % 1 == 0)
assert np.all(abs(t2.jd2) < 0.5)
def test_addition():
"""Check that an addition at the limit of precision (2^-52) is seen"""
t = Time(2455555., 0.5, format='jd', scale='utc')
t_dt = t + dt_tiny
assert t_dt.jd1 == t.jd1 and t_dt.jd2 != t.jd2
# Check that the addition is exactly reversed by the corresponding subtraction
t2 = t_dt - dt_tiny
assert t2.jd1 == t.jd1 and t2.jd2 == t.jd2
def test_mult_div():
"""Test precision with multiply and divide"""
dt_small = 6 * dt_tiny
# pick a number that will leave remainder if divided by 6.
dt_big = TimeDelta(20000., format='jd')
dt_big_small_by_6 = (dt_big + dt_small) / 6.
dt_frac = dt_big_small_by_6 - TimeDelta(3333., format='jd')
assert allclose_jd2(dt_frac.jd2, 0.33333333333333354)
def test_init_variations():
"""Check that 3 ways of specifying a time + small offset are equivalent"""
dt_tiny_sec = dt_tiny.jd2 * 86400.
t1 = Time(1e11, format='cxcsec') + dt_tiny
t2 = Time(1e11, dt_tiny_sec, format='cxcsec')
t3 = Time(dt_tiny_sec, 1e11, format='cxcsec')
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
assert t1.jd1 == t2.jd1
assert t1.jd2 == t3.jd2
def test_precision_exceeds_64bit():
"""
Check that Time object really holds more precision than float64 by looking at the
(naively) summed 64-bit result and asserting equality at the bit level.
"""
t1 = Time(1.23456789e11, format='cxcsec')
t2 = t1 + dt_tiny
assert t1.jd == t2.jd
def test_through_scale_change():
"""Check that precision holds through scale change (cxcsec is TT)"""
t0 = Time(1.0, format='cxcsec')
t1 = Time(1.23456789e11, format='cxcsec')
dt_tt = t1 - t0
dt_tai = t1.tai - t0.tai
assert allclose_jd(dt_tt.jd1, dt_tai.jd1)
assert allclose_jd2(dt_tt.jd2, dt_tai.jd2)
def test_iso_init():
"""Check when initializing from ISO date"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
t2 = Time('3000:001:13:00:00.00000002', scale='tai')
dt = t2 - t1
assert allclose_jd2(dt.jd2, 13. / 24. + 1e-8 / 86400. - 1.0)
def test_jd1_is_mult_of_one():
"""
Check that jd1 is a multiple of 1.
"""
t1 = Time('2000:001:00:00:00.00000001', scale='tai')
assert np.round(t1.jd1) == t1.jd1
t1 = Time(1.23456789, 12345678.90123456, format='jd', scale='tai')
assert np.round(t1.jd1) == t1.jd1
@pytest.mark.xfail
def test_precision_neg():
"""
Check precision when jd1 is negative. Currently fails because ERFA routines use a
test like jd1 > jd2 to decide which component to update. Should be
abs(jd1) > abs(jd2).
"""
t1 = Time(-100000.123456, format='jd', scale='tt')
assert np.round(t1.jd1) == t1.jd1
t1_tai = t1.tai
assert np.round(t1_tai.jd1) == t1_tai.jd1
def test_precision_epoch():
"""
Check that input via epoch also has full precision, i.e., against
regression on https://github.com/astropy/astropy/pull/366
"""
t_utc = Time(range(1980, 2001), format='jyear', scale='utc')
t_tai = Time(range(1980, 2001), format='jyear', scale='tai')
dt = t_utc - t_tai
assert allclose_sec(dt.sec, np.round(dt.sec))
def test_leap_seconds_rounded_correctly():
"""Regression tests against #2083, where a leap second was rounded
incorrectly by the underlying ERFA routine."""
t = Time(['2012-06-30 23:59:59.413',
'2012-07-01 00:00:00.413'], scale='ut1', precision=3).utc
assert np.all(t.iso == np.array(['2012-06-30 23:59:60.000',
'2012-07-01 00:00:00.000']))
# with the bug, both yielded '2012-06-30 23:59:60.000'
|
919aa6b59fbb099c9224e774ff3cf672bc37b9513c689c0577238187aacd998e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import functools
import pytest
import numpy as np
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.table import Column
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
class TestTimeQuantity():
"""Test Interaction of Time with Quantities"""
def test_valid_quantity_input(self):
"""Test Time formats that are allowed to take quantity input."""
q = 2450000.125*u.day
t1 = Time(q, format='jd', scale='utc')
assert t1.value == q.value
q2 = q.to(u.second)
t2 = Time(q2, format='jd', scale='utc')
assert t2.value == q.value == q2.to_value(u.day)
q3 = q-2400000.5*u.day
t3 = Time(q3, format='mjd', scale='utc')
assert t3.value == q3.value
# test we can deal with two quantity arguments, with different units
qs = 24.*36.*u.second
t4 = Time(q3, qs, format='mjd', scale='utc')
assert t4.value == (q3+qs).to_value(u.day)
qy = 1990.*u.yr
ty1 = Time(qy, format='jyear', scale='utc')
assert ty1.value == qy.value
ty2 = Time(qy.to(u.day), format='jyear', scale='utc')
assert ty2.value == qy.value
qy2 = 10.*u.yr
tcxc = Time(qy2, format='cxcsec')
assert tcxc.value == qy2.to_value(u.second)
tgps = Time(qy2, format='gps')
assert tgps.value == qy2.to_value(u.second)
tunix = Time(qy2, format='unix')
assert tunix.value == qy2.to_value(u.second)
qd = 2000.*365.*u.day
tplt = Time(qd, format='plot_date', scale='utc')
assert tplt.value == qd.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
Time(2450000.*u.m, format='jd', scale='utc')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
def test_column_with_and_without_units(self):
"""Ensure a Column without a unit is treated as an array [#3648]"""
a = np.arange(50000., 50010.)
ta = Time(a, format='mjd')
c1 = Column(np.arange(50000., 50010.), name='mjd')
tc1 = Time(c1, format='mjd')
assert np.all(ta == tc1)
c2 = Column(np.arange(50000., 50010.), name='mjd', unit='day')
tc2 = Time(c2, format='mjd')
assert np.all(ta == tc2)
c3 = Column(np.arange(50000., 50010.), name='mjd', unit='m')
with pytest.raises(u.UnitsError):
Time(c3, format='mjd')
def test_no_quantity_input_allowed(self):
"""Time formats that are not allowed to take Quantity input."""
qy = 1990.*u.yr
for fmt in ('iso', 'yday', 'datetime', 'byear',
'byear_str', 'jyear_str'):
with pytest.raises(ValueError):
Time(qy, format=fmt, scale='utc')
def test_valid_quantity_operations(self):
"""Check that adding a time-valued quantity to a Time gives a Time"""
t0 = Time(100000., format='cxcsec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, Time)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# check broadcasting
q3 = np.arange(15.).reshape(3, 5) * u.hour
t3 = t0 - q3
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value-q3.to_value(u.second))
def test_invalid_quantity_operations(self):
"""Check that comparisons of Time with quantities does not work
(even for time-like, since we cannot compare Time to TimeDelta)"""
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.m
with pytest.raises(TypeError):
Time(100000., format='cxcsec') > 10.*u.second
class TestTimeDeltaQuantity():
"""Test interaction of TimeDelta with Quantities"""
def test_valid_quantity_input(self):
"""Test that TimeDelta can take quantity input."""
q = 500.25*u.day
dt1 = TimeDelta(q, format='jd')
assert dt1.value == q.value
dt2 = TimeDelta(q, format='sec')
assert dt2.value == q.to_value(u.second)
dt3 = TimeDelta(q)
assert dt3.value == q.value
def test_invalid_quantity_input(self):
with pytest.raises(u.UnitsError):
TimeDelta(2450000.*u.m, format='jd')
with pytest.raises(u.UnitsError):
Time(2450000.*u.dimensionless_unscaled, format='jd', scale='utc')
with pytest.raises(TypeError):
TimeDelta(100, format='sec') > 10.*u.m
def test_quantity_output(self):
q = 500.25*u.day
dt = TimeDelta(q)
assert dt.to(u.day) == q
assert dt.to_value(u.day) == q.value
assert dt.to(u.second).value == q.to_value(u.second)
assert dt.to_value(u.second) == q.to_value(u.second)
with pytest.raises(u.UnitsError):
dt.to(u.m)
with pytest.raises(u.UnitsError):
dt.to_value(u.m)
def test_valid_quantity_operations1(self):
"""Check adding/substracting/comparing a time-valued quantity works
with a TimeDelta. Addition/subtraction should give TimeDelta"""
t0 = TimeDelta(106400., format='sec')
q1 = 10.*u.second
t1 = t0 + q1
assert isinstance(t1, TimeDelta)
assert t1.value == t0.value+q1.to_value(u.second)
q2 = 1.*u.day
t2 = t0 - q2
assert isinstance(t2, TimeDelta)
assert allclose_sec(t2.value, t0.value-q2.to_value(u.second))
# now comparisons
assert t0 > q1
assert t0 < 1.*u.yr
# and broadcasting
q3 = np.arange(12.).reshape(4, 3) * u.hour
t3 = t0 + q3
assert isinstance(t3, TimeDelta)
assert t3.shape == q3.shape
assert allclose_sec(t3.value, t0.value + q3.to_value(u.second))
def test_valid_quantity_operations2(self):
"""Check that TimeDelta is treated as a quantity where possible."""
t0 = TimeDelta(100000., format='sec')
f = 1./t0
assert isinstance(f, u.Quantity)
assert f.unit == 1./u.day
g = 10.*u.m/u.second**2
v = t0 * g
assert isinstance(v, u.Quantity)
assert u.allclose(v, t0.sec * g.value * u.m / u.second)
q = np.log10(t0/u.second)
assert isinstance(q, u.Quantity)
assert q.value == np.log10(t0.sec)
s = 1.*u.m
v = s/t0
assert isinstance(v, u.Quantity)
assert u.allclose(v, 1. / t0.sec * u.m / u.s)
t = 1.*u.s
t2 = t0 * t
assert isinstance(t2, u.Quantity)
assert u.allclose(t2, t0.sec * u.s ** 2)
t3 = [1] / t0
assert isinstance(t3, u.Quantity)
assert u.allclose(t3, 1 / (t0.sec * u.s))
# broadcasting
t1 = TimeDelta(np.arange(100000., 100012.).reshape(6, 2), format='sec')
f = np.array([1., 2.]) * u.cycle * u.Hz
phase = f * t1
assert isinstance(phase, u.Quantity)
assert phase.shape == t1.shape
assert u.allclose(phase, t1.sec * f.value * u.cycle)
q = t0 * t1
assert isinstance(q, u.Quantity)
assert np.all(q == t0.to(u.day) * t1.to(u.day))
q = t1 / t0
assert isinstance(q, u.Quantity)
assert np.all(q == t1.to(u.day) / t0.to(u.day))
def test_valid_quantity_operations3(self):
"""Test a TimeDelta remains one if possible."""
t0 = TimeDelta(10., format='jd')
q = 10. * u.one
t1 = q * t0
assert isinstance(t1, TimeDelta)
assert t1 == TimeDelta(100., format='jd')
t2 = t0 * q
assert isinstance(t2, TimeDelta)
assert t2 == TimeDelta(100., format='jd')
t3 = t0 / q
assert isinstance(t3, TimeDelta)
assert t3 == TimeDelta(1., format='jd')
q2 = 1. * u.percent
t4 = t0 * q2
assert isinstance(t4, TimeDelta)
assert abs(t4 - TimeDelta(0.1, format='jd')) < 1. * u.ns
q3 = 1. * u.hr / (36. * u.s)
t5 = q3 * t0
assert isinstance(t4, TimeDelta)
assert abs(t5 - TimeDelta(1000., format='jd')) < 1. * u.ns
# Test multiplication with a unit.
t6 = t0 * u.one
assert isinstance(t6, TimeDelta)
assert t6 == TimeDelta(10., format='jd')
t7 = u.one * t0
assert isinstance(t7, TimeDelta)
assert t7 == TimeDelta(10., format='jd')
t8 = t0 * ''
assert isinstance(t8, TimeDelta)
assert t8 == TimeDelta(10., format='jd')
t9 = '' * t0
assert isinstance(t9, TimeDelta)
assert t9 == TimeDelta(10., format='jd')
t10 = t0 / u.one
assert isinstance(t10, TimeDelta)
assert t6 == TimeDelta(10., format='jd')
t11 = t0 / ''
assert isinstance(t11, TimeDelta)
assert t11 == TimeDelta(10., format='jd')
t12 = t0 / [1]
assert isinstance(t12, TimeDelta)
assert t12 == TimeDelta(10., format='jd')
t13 = [1] * t0
assert isinstance(t13, TimeDelta)
assert t13 == TimeDelta(10., format='jd')
def test_invalid_quantity_operations(self):
"""Check comparisons of TimeDelta with non-time quantities fails."""
with pytest.raises(TypeError):
TimeDelta(100000., format='sec') > 10.*u.m
def test_invalid_quantity_operations2(self):
"""Check that operations with non-time/quantity fail."""
td = TimeDelta(100000., format='sec')
with pytest.raises(TypeError):
td * object()
with pytest.raises(TypeError):
td / object()
def test_invalid_quantity_broadcast(self):
"""Check broadcasting rules in interactions with Quantity."""
t0 = TimeDelta(np.arange(12.).reshape(4, 3), format='sec')
with pytest.raises(ValueError):
t0 + np.arange(4.) * u.s
class TestDeltaAttributes():
def test_delta_ut1_utc(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='utc', precision=6)
t.delta_ut1_utc = 0.3 * u.s
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = 0.4 / 60. * u.minute
assert t.ut1.iso == '2010-01-01 00:00:00.400000'
with pytest.raises(u.UnitsError):
t.delta_ut1_utc = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_ut1_utc = TimeDelta(0.3, format='sec')
assert t.ut1.iso == '2010-01-01 00:00:00.300000'
t.delta_ut1_utc = TimeDelta(0.5/24./3600., format='jd')
assert t.ut1.iso == '2010-01-01 00:00:00.500000'
def test_delta_tdb_tt(self):
t = Time('2010-01-01 00:00:00', format='iso', scale='tt', precision=6)
t.delta_tdb_tt = 20. * u.second
assert t.tdb.iso == '2010-01-01 00:00:20.000000'
t.delta_tdb_tt = 30. / 60. * u.minute
assert t.tdb.iso == '2010-01-01 00:00:30.000000'
with pytest.raises(u.UnitsError):
t.delta_tdb_tt = 0.4 * u.m
# Also check that a TimeDelta works.
t.delta_tdb_tt = TimeDelta(40., format='sec')
assert t.tdb.iso == '2010-01-01 00:00:40.000000'
t.delta_tdb_tt = TimeDelta(50./24./3600., format='jd')
assert t.tdb.iso == '2010-01-01 00:00:50.000000'
@pytest.mark.parametrize('q1, q2', ((5e8*u.s, None),
(5e17*u.ns, None),
(4e8*u.s, 1e17*u.ns),
(4e14*u.us, 1e17*u.ns)))
def test_quantity_conversion_rounding(q1, q2):
"""Check that no rounding errors are incurred by unit conversion.
This occurred before as quantities in seconds were converted to days
before trying to split them into two-part doubles. See gh-7622.
"""
t = Time('2001-01-01T00:00:00.', scale='tai')
expected = Time('2016-11-05T00:53:20.', scale='tai')
if q2 is None:
t0 = t + q1
else:
t0 = t + q1 + q2
assert abs(t0 - expected) < 20 * u.ps
dt1 = TimeDelta(q1, q2)
t1 = t + dt1
assert abs(t1 - expected) < 20 * u.ps
dt2 = TimeDelta(q1, q2, format='sec')
t2 = t + dt2
assert abs(t2 - expected) < 20 * u.ps
|
795ac1595045742d91339457f460071ba027e5a450ee61fa34a15ae63c6b75fb | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
from astropy.units.core import (
UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from astropy.units.si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from astropy.units.si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from astropy.units.si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from astropy.units.si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_clip(f, unit1, unit2, unit3):
# Treat the array being clipped as primary.
converters = [None]
if unit1 is None:
result_unit = dimensionless_unscaled
try:
converters += [(None if unit is None else
get_converter(unit, dimensionless_unscaled))
for unit in (unit2, unit3)]
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
result_unit = unit1
for unit in unit2, unit3:
try:
converter = get_converter(_d(unit), result_unit)
except UnitsError:
if unit is None:
# special case: OK if unitless number is zero, inf, nan
converters.append(False)
else:
raise UnitConversionError(
"Can only apply '{0}' function to quantities with "
"compatible dimensions".format(f.__name__))
else:
converters.append(converter)
return converters, result_unit
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# As found out in gh-7058, some numpy 1.13 conda installations also provide
# np.erf, even though upstream doesn't have it. We include it if present.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
if isinstance(getattr(np, 'matmul', None), np.ufunc):
UFUNC_HELPERS[np.matmul] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# Check for clip ufunc; note that np.clip is a wrapper function, not the ufunc.
if isinstance(getattr(np.core.umath, 'clip', None), np.ufunc):
UFUNC_HELPERS[np.core.umath.clip] = helper_clip
|
aeed7b1bc7a611f03398cfbb2ed10c82a796fd8276cb65ff98ea6ace6557a590 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Converters for Quantity."""
import numpy as np
from astropy.units.core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled)
__all__ = ['can_have_arbitrary_unit', 'converters_and_unit',
'check_output', 'UFUNC_HELPERS', 'UNSUPPORTED_UFUNCS']
class UfuncHelpers(dict):
"""Registry of unit conversion functions to help ufunc evaluation.
Based on dict for quick access, but with a missing method to load
helpers for additional modules such as scipy.special and erfa.
Such modules should be registered using ``register_module``.
"""
UNSUPPORTED = set()
def register_module(self, module, names, importer):
"""Register (but do not import) a set of ufunc helpers.
Parameters
----------
module : str
Name of the module with the ufuncs (e.g., 'scipy.special').
names : iterable of str
Names of the module ufuncs for which helpers are available.
importer : callable
Function that imports the ufuncs and returns a dict of helpers
keyed by those ufuncs. If the value is `None`, the ufunc is
explicitly *not* supported.
"""
self.modules[module] = {'names': names,
'importer': importer}
@property
def modules(self):
"""Modules for which helpers are available (but not yet loaded)."""
if not hasattr(self, '_modules'):
self._modules = {}
return self._modules
def import_module(self, module):
"""Import the helpers from the given module using its helper function.
Parameters
----------
module : str
Name of the module. Has to have been registered beforehand.
"""
module_info = self.modules.pop(module)
self.update(module_info['importer']())
def __missing__(self, ufunc):
"""Called if a ufunc is not found.
Check if the ufunc is in any of the available modules, and, if so,
import the helpers for that module.
"""
if ufunc in self.UNSUPPORTED:
raise TypeError("Cannot use ufunc '{0}' with quantities"
.format(ufunc.__name__))
for module, module_info in list(self.modules.items()):
if ufunc.__name__ in module_info['names']:
# A ufunc with the same name is supported by this module.
# Of course, this doesn't necessarily mean it is the
# right module. So, we try let the importer do its work.
# If it fails (e.g., for `scipy.special`), then that's
# fine, just raise the TypeError. If it succeeds, but
# the ufunc is not found, that is also fine: we will
# enter __missing__ again and either find another
# module or get the TypeError there.
try:
self.import_module(module)
except ImportError:
pass
else:
return self[ufunc]
raise TypeError("unknown ufunc {0}. If you believe this ufunc "
"should be supported, please raise an issue on "
"https://github.com/astropy/astropy"
.format(ufunc.__name__))
def __setitem__(self, key, value):
# Implementation note: in principle, we could just let `None`
# mean that something is not implemented, but this means an
# extra if clause for the output, slowing down the common
# path where a ufunc is supported.
if value is None:
self.UNSUPPORTED |= {key}
self.pop(key, None)
else:
super().__setitem__(key, value)
self.UNSUPPORTED -= {key}
UFUNC_HELPERS = UfuncHelpers()
UNSUPPORTED_UFUNCS = UFUNC_HELPERS.UNSUPPORTED
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined in helpers) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
ufunc_helper = UFUNC_HELPERS[function]
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for multi-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
try:
# Don't fold this loop in the test above: this rare case
# should not make the common case slower.
for i, converter in enumerate(converters):
if converter is not False:
continue
if can_have_arbitrary_unit(args[i]):
converters[i] = None
else:
raise UnitConversionError(
"Can only apply '{0}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {0}: "
"'{1}'".format(function.__name__,
','.join([arg.__class__.__name__
for arg in args])))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError("{0} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {0}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
# NOTE: this cannot be the more logical UnitTypeError, since
# then things like np.cumprod will not longer fail (they check
# for TypeError).
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{0} in {1} "
"instance".format(
(" from {0} function".format(function.__name__)
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{0}'{1} "
"in {2} instance. Use {3} instance instead."
.format(unit, (" from {0} function".format(function.__name__)
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{0}in a non-Quantity instance."
.format("" if function is None else
"resulting from {0} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(output.dtype))
return output
|
1e6aa17460f74d2c339c911a8e894f9c87bb1a0ac3a5ac41469b44359352279a | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Quantity class and related.
"""
import copy
import pickle
import decimal
from fractions import Fraction
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal,
assert_array_almost_equal)
from astropy.tests.helper import catch_warnings, raises
from astropy.utils import isiterable, minversion
from astropy.utils.compat import NUMPY_LT_1_14
from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyWarning
from astropy import units as u
from astropy.units.quantity import _UNIT_NOT_INITIALISED
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from distutils.version import LooseVersion
MATPLOTLIB_LT_15 = LooseVersion(matplotlib.__version__) < LooseVersion("1.5")
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
""" The Quantity class will represent a number + unit + uncertainty """
class TestQuantityCreation:
def test_1(self):
# create objects through operations with Unit objects:
quantity = 11.42 * u.meter # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = u.meter * 11.42 # returns a Quantity object
assert isinstance(quantity, u.Quantity)
quantity = 11.42 / u.meter
assert isinstance(quantity, u.Quantity)
quantity = u.meter / 11.42
assert isinstance(quantity, u.Quantity)
quantity = 11.42 * u.meter / u.second
assert isinstance(quantity, u.Quantity)
with pytest.raises(TypeError):
quantity = 182.234 + u.meter
with pytest.raises(TypeError):
quantity = 182.234 - u.meter
with pytest.raises(TypeError):
quantity = 182.234 % u.meter
def test_2(self):
# create objects using the Quantity constructor:
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, "cm")
q3 = u.Quantity(11.412)
# By default quantities that don't specify a unit are unscaled
# dimensionless
assert q3.unit == u.Unit(1)
with pytest.raises(TypeError):
q4 = u.Quantity(object(), unit=u.m)
def test_3(self):
# with pytest.raises(u.UnitsError):
with pytest.raises(ValueError): # Until @mdboom fixes the errors in units
q1 = u.Quantity(11.412, unit="testingggg")
def test_nan_inf(self):
# Not-a-number
q = u.Quantity('nan', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('NaN', unit='cm')
assert np.isnan(q.value)
q = u.Quantity('-nan', unit='cm') # float() allows this
assert np.isnan(q.value)
q = u.Quantity('nan cm')
assert np.isnan(q.value)
assert q.unit == u.cm
# Infinity
q = u.Quantity('inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('-inf', unit='cm')
assert np.isinf(q.value)
q = u.Quantity('inf cm')
assert np.isinf(q.value)
assert q.unit == u.cm
q = u.Quantity('Infinity', unit='cm') # float() allows this
assert np.isinf(q.value)
# make sure these strings don't parse...
with pytest.raises(TypeError):
q = u.Quantity('', unit='cm')
with pytest.raises(TypeError):
q = u.Quantity('spam', unit='cm')
def test_unit_property(self):
# test getting and setting 'unit' attribute
q1 = u.Quantity(11.4, unit=u.meter)
with pytest.raises(AttributeError):
q1.unit = u.cm
def test_preserve_dtype(self):
"""Test that if an explicit dtype is given, it is used, while if not,
numbers are converted to float (including decimal.Decimal, which
numpy converts to an object; closes #1419)
"""
# If dtype is specified, use it, but if not, convert int, bool to float
q1 = u.Quantity(12, unit=u.m / u.s, dtype=int)
assert q1.dtype == int
q2 = u.Quantity(q1)
assert q2.dtype == float
assert q2.value == float(q1.value)
assert q2.unit == q1.unit
# but we should preserve float32
a3 = np.array([1., 2.], dtype=np.float32)
q3 = u.Quantity(a3, u.yr)
assert q3.dtype == a3.dtype
# items stored as objects by numpy should be converted to float
# by default
q4 = u.Quantity(decimal.Decimal('10.25'), u.m)
assert q4.dtype == float
q5 = u.Quantity(decimal.Decimal('10.25'), u.m, dtype=object)
assert q5.dtype == object
def test_copy(self):
# By default, a new quantity is constructed, but not if copy=False
a = np.arange(10.)
q0 = u.Quantity(a, unit=u.m / u.s)
assert q0.base is not a
q1 = u.Quantity(a, unit=u.m / u.s, copy=False)
assert q1.base is a
q2 = u.Quantity(q0)
assert q2 is not q0
assert q2.base is not q0.base
q2 = u.Quantity(q0, copy=False)
assert q2 is q0
assert q2.base is q0.base
q3 = u.Quantity(q0, q0.unit, copy=False)
assert q3 is q0
assert q3.base is q0.base
q4 = u.Quantity(q0, u.cm / u.s, copy=False)
assert q4 is not q0
assert q4.base is not q0.base
def test_subok(self):
"""Test subok can be used to keep class, or to insist on Quantity"""
class MyQuantitySubclass(u.Quantity):
pass
myq = MyQuantitySubclass(np.arange(10.), u.m)
# try both with and without changing the unit
assert type(u.Quantity(myq)) is u.Quantity
assert type(u.Quantity(myq, subok=True)) is MyQuantitySubclass
assert type(u.Quantity(myq, u.km)) is u.Quantity
assert type(u.Quantity(myq, u.km, subok=True)) is MyQuantitySubclass
def test_order(self):
"""Test that order is correctly propagated to np.array"""
ac = np.array(np.arange(10.), order='C')
qcc = u.Quantity(ac, u.m, order='C')
assert qcc.flags['C_CONTIGUOUS']
qcf = u.Quantity(ac, u.m, order='F')
assert qcf.flags['F_CONTIGUOUS']
qca = u.Quantity(ac, u.m, order='A')
assert qca.flags['C_CONTIGUOUS']
# check it works also when passing in a quantity
assert u.Quantity(qcc, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='A').flags['C_CONTIGUOUS']
assert u.Quantity(qcc, order='F').flags['F_CONTIGUOUS']
af = np.array(np.arange(10.), order='F')
qfc = u.Quantity(af, u.m, order='C')
assert qfc.flags['C_CONTIGUOUS']
qff = u.Quantity(ac, u.m, order='F')
assert qff.flags['F_CONTIGUOUS']
qfa = u.Quantity(af, u.m, order='A')
assert qfa.flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='C').flags['C_CONTIGUOUS']
assert u.Quantity(qff, order='A').flags['F_CONTIGUOUS']
assert u.Quantity(qff, order='F').flags['F_CONTIGUOUS']
def test_ndmin(self):
"""Test that ndmin is correctly propagated to np.array"""
a = np.arange(10.)
q1 = u.Quantity(a, u.m, ndmin=1)
assert q1.ndim == 1 and q1.shape == (10,)
q2 = u.Quantity(a, u.m, ndmin=2)
assert q2.ndim == 2 and q2.shape == (1, 10)
# check it works also when passing in a quantity
q3 = u.Quantity(q1, u.m, ndmin=3)
assert q3.ndim == 3 and q3.shape == (1, 1, 10)
def test_non_quantity_with_unit(self):
"""Test that unit attributes in objects get recognized."""
class MyQuantityLookalike(np.ndarray):
pass
a = np.arange(3.)
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = 'm'
q1 = u.Quantity(mylookalike)
assert isinstance(q1, u.Quantity)
assert q1.unit is u.m
assert np.all(q1.value == a)
q2 = u.Quantity(mylookalike, u.mm)
assert q2.unit is u.mm
assert np.all(q2.value == 1000.*a)
q3 = u.Quantity(mylookalike, copy=False)
assert np.all(q3.value == mylookalike)
q3[2] = 0
assert q3[2] == 0.
assert mylookalike[2] == 0.
mylookalike = a.copy().view(MyQuantityLookalike)
mylookalike.unit = u.m
q4 = u.Quantity(mylookalike, u.mm, copy=False)
q4[2] = 0
assert q4[2] == 0.
assert mylookalike[2] == 2.
mylookalike.unit = 'nonsense'
with pytest.raises(TypeError):
u.Quantity(mylookalike)
def test_creation_via_view(self):
# This works but is no better than 1. * u.m
q1 = 1. << u.m
assert isinstance(q1, u.Quantity)
assert q1.unit == u.m
assert q1.value == 1.
# With an array, we get an actual view.
a2 = np.arange(10.)
q2 = a2 << u.m / u.s
assert isinstance(q2, u.Quantity)
assert q2.unit == u.m / u.s
assert np.all(q2.value == a2)
a2[9] = 0.
assert np.all(q2.value == a2)
# But with a unit change we get a copy.
q3 = q2 << u.mm / u.s
assert isinstance(q3, u.Quantity)
assert q3.unit == u.mm / u.s
assert np.all(q3.value == a2 * 1000.)
a2[8] = 0.
assert q3[8].value == 8000.
# Without a unit change, we do get a view.
q4 = q2 << q2.unit
a2[7] = 0.
assert np.all(q4.value == a2)
with pytest.raises(u.UnitsError):
q2 << u.s
# But one can do an in-place unit change.
a2_copy = a2.copy()
q2 <<= u.mm / u.s
assert q2.unit == u.mm / u.s
# Of course, this changes a2 as well.
assert np.all(q2.value == a2)
# Sanity check on the values.
assert np.all(q2.value == a2_copy * 1000.)
a2[8] = -1.
# Using quantities, one can also work with strings.
q5 = q2 << 'km/hr'
assert q5.unit == u.km / u.hr
assert np.all(q5 == q2)
# Finally, we can use scalar quantities as units.
not_quite_a_foot = 30. * u.cm
a6 = np.arange(5.)
q6 = a6 << not_quite_a_foot
assert q6.unit == u.Unit(not_quite_a_foot)
assert np.all(q6.to_value(u.cm) == 30. * a6)
def test_rshift_warns(self):
with pytest.raises(TypeError), \
catch_warnings() as warning_lines:
1 >> u.m
assert len(warning_lines) == 1
assert warning_lines[0].category == AstropyWarning
assert 'is not implemented' in str(warning_lines[0].message)
q = 1. * u.km
with pytest.raises(TypeError), \
catch_warnings() as warning_lines:
q >> u.m
assert len(warning_lines) == 1
assert warning_lines[0].category == AstropyWarning
assert 'is not implemented' in str(warning_lines[0].message)
with pytest.raises(TypeError), \
catch_warnings() as warning_lines:
q >>= u.m
assert len(warning_lines) == 1
assert warning_lines[0].category == AstropyWarning
assert 'is not implemented' in str(warning_lines[0].message)
with pytest.raises(TypeError), \
catch_warnings() as warning_lines:
1. >> q
assert len(warning_lines) == 1
assert warning_lines[0].category == AstropyWarning
assert 'is not implemented' in str(warning_lines[0].message)
class TestQuantityOperations:
q1 = u.Quantity(11.42, u.meter)
q2 = u.Quantity(8.0, u.centimeter)
def test_addition(self):
# Take units from left object, q1
new_quantity = self.q1 + self.q2
assert new_quantity.value == 11.5
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 + self.q1
assert new_quantity.value == 1150.0
assert new_quantity.unit == u.centimeter
new_q = u.Quantity(1500.1, u.m) + u.Quantity(13.5, u.km)
assert new_q.unit == u.m
assert new_q.value == 15000.1
def test_subtraction(self):
# Take units from left object, q1
new_quantity = self.q1 - self.q2
assert new_quantity.value == 11.34
assert new_quantity.unit == u.meter
# Take units from left object, q2
new_quantity = self.q2 - self.q1
assert new_quantity.value == -1134.0
assert new_quantity.unit == u.centimeter
def test_multiplication(self):
# Take units from left object, q1
new_quantity = self.q1 * self.q2
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.meter * u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 * self.q1
assert new_quantity.value == 91.36
assert new_quantity.unit == (u.centimeter * u.meter)
# Multiply with a number
new_quantity = 15. * self.q1
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
# Multiply with a number
new_quantity = self.q1 * 15.
assert new_quantity.value == 171.3
assert new_quantity.unit == u.meter
def test_division(self):
# Take units from left object, q1
new_quantity = self.q1 / self.q2
assert_array_almost_equal(new_quantity.value, 1.4275, decimal=5)
assert new_quantity.unit == (u.meter / u.centimeter)
# Take units from left object, q2
new_quantity = self.q2 / self.q1
assert_array_almost_equal(new_quantity.value, 0.70052539404553416,
decimal=16)
assert new_quantity.unit == (u.centimeter / u.meter)
q1 = u.Quantity(11.4, unit=u.meter)
q2 = u.Quantity(10.0, unit=u.second)
new_quantity = q1 / q2
assert_array_almost_equal(new_quantity.value, 1.14, decimal=10)
assert new_quantity.unit == (u.meter / u.second)
# divide with a number
new_quantity = self.q1 / 10.
assert new_quantity.value == 1.142
assert new_quantity.unit == u.meter
# divide with a number
new_quantity = 11.42 / self.q1
assert new_quantity.value == 1.
assert new_quantity.unit == u.Unit("1/m")
def test_commutativity(self):
"""Regression test for issue #587."""
new_q = u.Quantity(11.42, 'm*s')
assert self.q1 * u.s == u.s * self.q1 == new_q
assert self.q1 / u.s == u.Quantity(11.42, 'm/s')
assert u.s / self.q1 == u.Quantity(1 / 11.42, 's/m')
def test_power(self):
# raise quantity to a power
new_quantity = self.q1 ** 2
assert_array_almost_equal(new_quantity.value, 130.4164, decimal=5)
assert new_quantity.unit == u.Unit("m^2")
new_quantity = self.q1 ** 3
assert_array_almost_equal(new_quantity.value, 1489.355288, decimal=7)
assert new_quantity.unit == u.Unit("m^3")
def test_matrix_multiplication(self):
a = np.eye(3)
q = a * u.m
result1 = q @ a
assert np.all(result1 == q)
result2 = a @ q
assert np.all(result2 == q)
result3 = q @ q
assert np.all(result3 == a * u.m ** 2)
# less trivial case.
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
result4 = q @ q2
assert np.all(result4 == np.matmul(a, q2.value) * q.unit * q2.unit)
def test_unary(self):
# Test the minus unary operator
new_quantity = -self.q1
assert new_quantity.value == -self.q1.value
assert new_quantity.unit == self.q1.unit
new_quantity = -(-self.q1)
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
# Test the plus unary operator
new_quantity = +self.q1
assert new_quantity.value == self.q1.value
assert new_quantity.unit == self.q1.unit
def test_abs(self):
q = 1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == q.value
assert new_quantity.unit == q.unit
q = -1. * u.m / u.s
new_quantity = abs(q)
assert new_quantity.value == -q.value
assert new_quantity.unit == q.unit
def test_incompatible_units(self):
""" When trying to add or subtract units that aren't compatible, throw an error """
q1 = u.Quantity(11.412, unit=u.meter)
q2 = u.Quantity(21.52, unit=u.second)
with pytest.raises(u.UnitsError):
new_q = q1 + q2
def test_non_number_type(self):
q1 = u.Quantity(11.412, unit=u.meter)
with pytest.raises(TypeError) as exc:
q1 + {'a': 1}
assert exc.value.args[0].startswith(
"Unsupported operand type(s) for ufunc add:")
with pytest.raises(TypeError):
q1 + u.meter
def test_dimensionless_operations(self):
# test conversion to dimensionless
dq = 3. * u.m / u.km
dq1 = dq + 1. * u.mm / u.km
assert dq1.value == 3.001
assert dq1.unit == dq.unit
dq2 = dq + 1.
assert dq2.value == 1.003
assert dq2.unit == u.dimensionless_unscaled
# this test will check that operations with dimensionless Quantities
# don't work
with pytest.raises(u.UnitsError):
self.q1 + u.Quantity(0.1, unit=u.Unit(""))
with pytest.raises(u.UnitsError):
self.q1 - u.Quantity(0.1, unit=u.Unit(""))
# and test that scaling of integers works
q = u.Quantity(np.array([1, 2, 3]), u.m / u.km, dtype=int)
q2 = q + np.array([4, 5, 6])
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value, np.array([4.001, 5.002, 6.003]))
# but not if doing it inplace
with pytest.raises(TypeError):
q += np.array([1, 2, 3])
# except if it is actually possible
q = np.array([1, 2, 3]) * u.km / u.m
q += np.array([4, 5, 6])
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == np.array([1004, 2005, 3006]))
def test_complicated_operation(self):
""" Perform a more complicated test """
from astropy.units import imperial
# Multiple units
distance = u.Quantity(15., u.meter)
time = u.Quantity(11., u.second)
velocity = (distance / time).to(imperial.mile / u.hour)
assert_array_almost_equal(
velocity.value, 3.05037, decimal=5)
G = u.Quantity(6.673E-11, u.m ** 3 / u.kg / u.s ** 2)
new_q = ((1. / (4. * np.pi * G)).to(u.pc ** -3 / u.s ** -2 * u.kg))
# Area
side1 = u.Quantity(11., u.centimeter)
side2 = u.Quantity(7., u.centimeter)
area = side1 * side2
assert_array_almost_equal(area.value, 77., decimal=15)
assert area.unit == u.cm * u.cm
def test_comparison(self):
# equality/ non-equality is straightforward for quantity objects
assert (1 / (u.cm * u.cm)) == 1 * u.cm ** -2
assert 1 * u.m == 100 * u.cm
assert 1 * u.m != 1 * u.cm
# when one is a unit, Quantity does not know what to do,
# but unit is fine with it, so it still works
unit = u.cm**3
q = 1. * unit
assert q.__eq__(unit) is NotImplemented
assert unit.__eq__(q) is True
assert q == unit
q = 1000. * u.mm**3
assert q == unit
# mismatched types should never work
assert not 1. * u.cm == 1.
assert 1. * u.cm != 1.
# comparison with zero should raise a deprecation warning
for quantity in (1. * u.cm, 1. * u.dimensionless_unscaled):
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
bool(quantity)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == 'The truth value of '
'a Quantity is ambiguous. In the future this will '
'raise a ValueError.')
def test_numeric_converters(self):
# float, int, long, and __index__ should only work for single
# quantities, of appropriate type, and only if they are dimensionless.
# for index, this should be unscaled as well
# (Check on __index__ is also a regression test for #1557)
# quantities with units should never convert, or be usable as an index
q1 = u.Quantity(1, u.m)
converter_err_msg = ("only dimensionless scalar quantities "
"can be converted to Python scalars")
index_err_msg = ("only integer dimensionless scalar quantities "
"can be converted to a Python index")
with pytest.raises(TypeError) as exc:
float(q1)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q1)
assert exc.value.args[0] == converter_err_msg
# We used to test `q1 * ['a', 'b', 'c'] here, but that that worked
# at all was a really odd confluence of bugs. Since it doesn't work
# in numpy >=1.10 any more, just go directly for `__index__` (which
# makes the test more similar to the `int`, `long`, etc., tests).
with pytest.raises(TypeError) as exc:
q1.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless but scaled is OK, however
q2 = u.Quantity(1.23, u.m / u.km)
assert float(q2) == float(q2.to_value(u.dimensionless_unscaled))
assert int(q2) == int(q2.to_value(u.dimensionless_unscaled))
with pytest.raises(TypeError) as exc:
q2.__index__()
assert exc.value.args[0] == index_err_msg
# dimensionless unscaled is OK, though for index needs to be int
q3 = u.Quantity(1.23, u.dimensionless_unscaled)
assert float(q3) == 1.23
assert int(q3) == 1
with pytest.raises(TypeError) as exc:
q3.__index__()
assert exc.value.args[0] == index_err_msg
# integer dimensionless unscaled is good for all
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert float(q4) == 2.
assert int(q4) == 2
assert q4.__index__() == 2
# but arrays are not OK
q5 = u.Quantity([1, 2], u.m)
with pytest.raises(TypeError) as exc:
float(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
int(q5)
assert exc.value.args[0] == converter_err_msg
with pytest.raises(TypeError) as exc:
q5.__index__()
assert exc.value.args[0] == index_err_msg
# See https://github.com/numpy/numpy/issues/5074
# It seems unlikely this will be resolved, so xfail'ing it.
@pytest.mark.xfail(reason="list multiplication only works for numpy <=1.10")
def test_numeric_converter_to_index_in_practice(self):
"""Test that use of __index__ actually works."""
q4 = u.Quantity(2, u.dimensionless_unscaled, dtype=int)
assert q4 * ['a', 'b', 'c'] == ['a', 'b', 'c', 'a', 'b', 'c']
def test_array_converters(self):
# Scalar quantity
q = u.Quantity(1.23, u.m)
assert np.all(np.array(q) == np.array([1.23]))
# Array quantity
q = u.Quantity([1., 2., 3.], u.m)
assert np.all(np.array(q) == np.array([1., 2., 3.]))
def test_quantity_conversion():
q1 = u.Quantity(0.1, unit=u.meter)
value = q1.value
assert value == 0.1
value_in_km = q1.to_value(u.kilometer)
assert value_in_km == 0.0001
new_quantity = q1.to(u.kilometer)
assert new_quantity.value == 0.0001
with pytest.raises(u.UnitsError):
q1.to(u.zettastokes)
with pytest.raises(u.UnitsError):
q1.to_value(u.zettastokes)
def test_quantity_value_views():
q1 = u.Quantity([1., 2.], unit=u.meter)
# views if the unit is the same.
v1 = q1.value
v1[0] = 0.
assert np.all(q1 == [0., 2.] * u.meter)
v2 = q1.to_value()
v2[1] = 3.
assert np.all(q1 == [0., 3.] * u.meter)
v3 = q1.to_value('m')
v3[0] = 1.
assert np.all(q1 == [1., 3.] * u.meter)
v4 = q1.to_value('cm')
v4[0] = 0.
# copy if different unit.
assert np.all(q1 == [1., 3.] * u.meter)
def test_quantity_conversion_with_equiv():
q1 = u.Quantity(0.1, unit=u.meter)
v2 = q1.to_value(u.Hz, equivalencies=u.spectral())
assert_allclose(v2, 2997924580.0)
q2 = q1.to(u.Hz, equivalencies=u.spectral())
assert_allclose(q2.value, v2)
q1 = u.Quantity(0.4, unit=u.arcsecond)
v2 = q1.to_value(u.au, equivalencies=u.parallax())
q2 = q1.to(u.au, equivalencies=u.parallax())
v3 = q2.to_value(u.arcminute, equivalencies=u.parallax())
q3 = q2.to(u.arcminute, equivalencies=u.parallax())
assert_allclose(v2, 515662.015)
assert_allclose(q2.value, v2)
assert q2.unit == u.au
assert_allclose(v3, 0.0066666667)
assert_allclose(q3.value, v3)
assert q3.unit == u.arcminute
def test_quantity_conversion_equivalency_passed_on():
class MySpectral(u.Quantity):
_equivalencies = u.spectral()
def __quantity_view__(self, obj, unit):
return obj.view(MySpectral)
def __quantity_instance__(self, *args, **kwargs):
return MySpectral(*args, **kwargs)
q1 = MySpectral([1000, 2000], unit=u.Hz)
q2 = q1.to(u.nm)
assert q2.unit == u.nm
q3 = q2.to(u.Hz)
assert q3.unit == u.Hz
assert_allclose(q3.value, q1.value)
q4 = MySpectral([1000, 2000], unit=u.nm)
q5 = q4.to(u.Hz).to(u.nm)
assert q5.unit == u.nm
assert_allclose(q4.value, q5.value)
# Regression test for issue #2315, divide-by-zero error when examining 0*unit
def test_self_equivalency():
assert u.deg.is_equivalent(0*u.radian)
assert u.deg.is_equivalent(1*u.radian)
def test_si():
q1 = 10. * u.m * u.s ** 2 / (200. * u.ms) ** 2 # 250 meters
assert q1.si.value == 250
assert q1.si.unit == u.m
q = 10. * u.m # 10 meters
assert q.si.value == 10
assert q.si.unit == u.m
q = 10. / u.m # 10 1 / meters
assert q.si.value == 10
assert q.si.unit == (1 / u.m)
def test_cgs():
q1 = 10. * u.cm * u.s ** 2 / (200. * u.ms) ** 2 # 250 centimeters
assert q1.cgs.value == 250
assert q1.cgs.unit == u.cm
q = 10. * u.m # 10 centimeters
assert q.cgs.value == 1000
assert q.cgs.unit == u.cm
q = 10. / u.cm # 10 1 / centimeters
assert q.cgs.value == 10
assert q.cgs.unit == (1 / u.cm)
q = 10. * u.Pa # 10 pascals
assert q.cgs.value == 100
assert q.cgs.unit == u.barye
class TestQuantityComparison:
def test_quantity_equality(self):
with catch_warnings(DeprecationWarning) as w:
assert u.Quantity(1000, unit='m') == u.Quantity(1, unit='km')
assert not (u.Quantity(1, unit='m') == u.Quantity(1, unit='km'))
# for ==, !=, return False, True if units do not match
assert (u.Quantity(1100, unit=u.m) != u.Quantity(1, unit=u.s)) is True
assert (u.Quantity(1100, unit=u.m) == u.Quantity(1, unit=u.s)) is False
assert (u.Quantity(0, unit=u.m) == u.Quantity(0, unit=u.s)) is False
# But allow comparison with 0, +/-inf if latter unitless
assert u.Quantity(0, u.m) == 0.
assert u.Quantity(1, u.m) != 0.
assert u.Quantity(1, u.m) != np.inf
assert u.Quantity(np.inf, u.m) == np.inf
assert len(w) == 0
def test_quantity_equality_array(self):
with catch_warnings(DeprecationWarning) as w:
a = u.Quantity([0., 1., 1000.], u.m)
b = u.Quantity(1., u.km)
eq = a == b
ne = a != b
assert np.all(eq == [False, False, True])
assert np.all(eq != ne)
# For mismatched units, we should just get True, False
c = u.Quantity(1., u.s)
eq = a == c
ne = a != c
assert eq is False
assert ne is True
# Constants are treated as dimensionless, so False too.
eq = a == 1.
ne = a != 1.
assert eq is False
assert ne is True
# But 0 can have any units, so we can compare.
eq = a == 0
ne = a != 0
assert np.all(eq == [True, False, False])
assert np.all(eq != ne)
# But we do not extend that to arrays; they should have the same unit.
d = np.array([0, 1., 1000.])
eq = a == d
ne = a != d
assert eq is False
assert ne is True
assert len(w) == 0
def test_quantity_comparison(self):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) < u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) > u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) < u.Quantity(1, unit=u.second)
assert u.Quantity(1100, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) >= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(900, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
assert u.Quantity(1000, unit=u.meter) <= u.Quantity(1, unit=u.kilometer)
with pytest.raises(u.UnitsError):
assert u.Quantity(
1100, unit=u.meter) >= u.Quantity(1, unit=u.second)
with pytest.raises(u.UnitsError):
assert u.Quantity(1100, unit=u.meter) <= u.Quantity(1, unit=u.second)
assert u.Quantity(1200, unit=u.meter) != u.Quantity(1, unit=u.kilometer)
class TestQuantityDisplay:
scalarintq = u.Quantity(1, unit='m', dtype=int)
scalarfloatq = u.Quantity(1.3, unit='m')
arrq = u.Quantity([1, 2.3, 8.9], unit='m')
scalar_complex_q = u.Quantity(complex(1.0, 2.0))
scalar_big_complex_q = u.Quantity(complex(1.0, 2.0e27) * 1e25)
scalar_big_neg_complex_q = u.Quantity(complex(-1.0, -2.0e27) * 1e36)
arr_complex_q = u.Quantity(np.arange(3) * (complex(-1.0, -2.0e27) * 1e36))
big_arr_complex_q = u.Quantity(np.arange(125) * (complex(-1.0, -2.0e27) * 1e36))
def test_dimensionless_quantity_repr(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
if NUMPY_LT_1_14:
assert repr(self.scalarintq * q2) == "<Quantity 1.0>"
assert repr(self.arrq * q2) == "<Quantity [ 1. , 2.3, 8.9]>"
else:
assert repr(self.scalarintq * q2) == "<Quantity 1.>"
assert repr(self.arrq * q2) == "<Quantity [1. , 2.3, 8.9]>"
assert repr(self.scalarintq * q3) == "<Quantity 1>"
def test_dimensionless_quantity_str(self):
q2 = u.Quantity(1., unit='m-1')
q3 = u.Quantity(1, unit='m-1', dtype=int)
assert str(self.scalarintq * q2) == "1.0"
assert str(self.scalarintq * q3) == "1"
if NUMPY_LT_1_14:
assert str(self.arrq * q2) == "[ 1. 2.3 8.9]"
else:
assert str(self.arrq * q2) == "[1. 2.3 8.9]"
def test_dimensionless_quantity_format(self):
q1 = u.Quantity(3.14)
assert format(q1, '.2f') == '3.14'
def test_scalar_quantity_str(self):
assert str(self.scalarintq) == "1 m"
assert str(self.scalarfloatq) == "1.3 m"
def test_scalar_quantity_repr(self):
assert repr(self.scalarintq) == "<Quantity 1 m>"
assert repr(self.scalarfloatq) == "<Quantity 1.3 m>"
def test_array_quantity_str(self):
if NUMPY_LT_1_14:
assert str(self.arrq) == "[ 1. 2.3 8.9] m"
else:
assert str(self.arrq) == "[1. 2.3 8.9] m"
def test_array_quantity_repr(self):
if NUMPY_LT_1_14:
assert repr(self.arrq) == "<Quantity [ 1. , 2.3, 8.9] m>"
else:
assert repr(self.arrq) == "<Quantity [1. , 2.3, 8.9] m>"
def test_scalar_quantity_format(self):
assert format(self.scalarintq, '02d') == "01 m"
assert format(self.scalarfloatq, '.1f') == "1.3 m"
assert format(self.scalarfloatq, '.0f') == "1 m"
def test_uninitialized_unit_format(self):
bad_quantity = np.arange(10.).view(u.Quantity)
assert str(bad_quantity).endswith(_UNIT_NOT_INITIALISED)
assert repr(bad_quantity).endswith(_UNIT_NOT_INITIALISED + '>')
def test_to_string(self):
qscalar = u.Quantity(1.5e14, 'm/s')
# __str__ is the default `format`
assert str(qscalar) == qscalar.to_string()
res = 'Quantity as KMS: 150000000000.0 km / s'
assert "Quantity as KMS: {0}".format(qscalar.to_string(unit=u.km / u.s)) == res
res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex") == res
res = r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex", subfmt="inline") == res
res = r'$\displaystyle 1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$'
assert qscalar.to_string(format="latex", subfmt="display") == res
def test_repr_latex(self):
from astropy.units.quantity import conf
q2scalar = u.Quantity(1.5e14, 'm/s')
assert self.scalarintq._repr_latex_() == r'$1 \; \mathrm{m}$'
assert self.scalarfloatq._repr_latex_() == r'$1.3 \; \mathrm{m}$'
assert (q2scalar._repr_latex_() ==
r'$1.5 \times 10^{14} \; \mathrm{\frac{m}{s}}$')
assert self.arrq._repr_latex_() == r'$[1,~2.3,~8.9] \; \mathrm{m}$'
# Complex quantities
assert self.scalar_complex_q._repr_latex_() == r'$(1+2i) \; \mathrm{}$'
assert (self.scalar_big_complex_q._repr_latex_() ==
r'$(1 \times 10^{25}+2 \times 10^{52}i) \; \mathrm{}$')
assert (self.scalar_big_neg_complex_q._repr_latex_() ==
r'$(-1 \times 10^{36}-2 \times 10^{63}i) \; \mathrm{}$')
assert (self.arr_complex_q._repr_latex_() ==
(r'$[(0-0i),~(-1 \times 10^{36}-2 \times 10^{63}i),'
r'~(-2 \times 10^{36}-4 \times 10^{63}i)] \; \mathrm{}$'))
assert r'\dots' in self.big_arr_complex_q._repr_latex_()
qmed = np.arange(100)*u.m
qbig = np.arange(1000)*u.m
qvbig = np.arange(10000)*1e9*u.m
pops = np.get_printoptions()
oldlat = conf.latex_array_threshold
try:
# check precision behavior
q = u.Quantity(987654321.123456789, 'm/s')
qa = np.array([7.89123, 123456789.987654321, 0]) * u.cm
np.set_printoptions(precision=8)
assert q._repr_latex_() == r'$9.8765432 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.89123,~1.2345679 \times 10^{8},~0] \; \mathrm{cm}$'
np.set_printoptions(precision=2)
assert q._repr_latex_() == r'$9.9 \times 10^{8} \; \mathrm{\frac{m}{s}}$'
assert qa._repr_latex_() == r'$[7.9,~1.2 \times 10^{8},~0] \; \mathrm{cm}$'
# check thresholding behavior
conf.latex_array_threshold = 100 # should be default
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = 1001
lsmed = qmed._repr_latex_()
assert r'\dots' not in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' not in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
conf.latex_array_threshold = -1 # means use the numpy threshold
np.set_printoptions(threshold=99)
lsmed = qmed._repr_latex_()
assert r'\dots' in lsmed
lsbig = qbig._repr_latex_()
assert r'\dots' in lsbig
lsvbig = qvbig._repr_latex_()
assert r'\dots' in lsvbig
finally:
# prevent side-effects from influencing other tests
np.set_printoptions(**pops)
conf.latex_array_threshold = oldlat
qinfnan = [np.inf, -np.inf, np.nan] * u.m
assert qinfnan._repr_latex_() == r'$[\infty,~-\infty,~{\rm NaN}] \; \mathrm{m}$'
def test_decompose():
q1 = 5 * u.N
assert q1.decompose() == (5 * u.kg * u.m * u.s ** -2)
def test_decompose_regression():
"""
Regression test for bug #1163
If decompose was called multiple times on a Quantity with an array and a
scale != 1, the result changed every time. This is because the value was
being referenced not copied, then modified, which changed the original
value.
"""
q = np.array([1, 2, 3]) * u.m / (2. * u.km)
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
assert np.all(q == np.array([1, 2, 3]) * u.m / (2. * u.km))
assert np.all(q.decompose().value == np.array([0.0005, 0.001, 0.0015]))
def test_arrays():
"""
Test using quantites with array values
"""
qsec = u.Quantity(np.arange(10), u.second)
assert isinstance(qsec.value, np.ndarray)
assert not qsec.isscalar
# len and indexing should work for arrays
assert len(qsec) == len(qsec.value)
qsecsub25 = qsec[2:5]
assert qsecsub25.unit == qsec.unit
assert isinstance(qsecsub25, u.Quantity)
assert len(qsecsub25) == 3
# make sure isscalar, len, and indexing behave correcly for non-arrays.
qsecnotarray = u.Quantity(10., u.second)
assert qsecnotarray.isscalar
with pytest.raises(TypeError):
len(qsecnotarray)
with pytest.raises(TypeError):
qsecnotarray[0]
qseclen0array = u.Quantity(np.array(10), u.second, dtype=int)
# 0d numpy array should act basically like a scalar
assert qseclen0array.isscalar
with pytest.raises(TypeError):
len(qseclen0array)
with pytest.raises(TypeError):
qseclen0array[0]
assert isinstance(qseclen0array.value, int)
a = np.array([(1., 2., 3.), (4., 5., 6.), (7., 8., 9.)],
dtype=[('x', float),
('y', float),
('z', float)])
qkpc = u.Quantity(a, u.kpc)
assert not qkpc.isscalar
qkpc0 = qkpc[0]
assert qkpc0.value == a[0]
assert qkpc0.unit == qkpc.unit
assert isinstance(qkpc0, u.Quantity)
assert qkpc0.isscalar
qkpcx = qkpc['x']
assert np.all(qkpcx.value == a['x'])
assert qkpcx.unit == qkpc.unit
assert isinstance(qkpcx, u.Quantity)
assert not qkpcx.isscalar
qkpcx1 = qkpc['x'][1]
assert qkpcx1.unit == qkpc.unit
assert isinstance(qkpcx1, u.Quantity)
assert qkpcx1.isscalar
qkpc1x = qkpc[1]['x']
assert qkpc1x.isscalar
assert qkpc1x == qkpcx1
# can also create from lists, will auto-convert to arrays
qsec = u.Quantity(list(range(10)), u.second)
assert isinstance(qsec.value, np.ndarray)
# quantity math should work with arrays
assert_array_equal((qsec * 2).value, (np.arange(10) * 2))
assert_array_equal((qsec / 2).value, (np.arange(10) / 2))
# quantity addition/subtraction should *not* work with arrays b/c unit
# ambiguous
with pytest.raises(u.UnitsError):
assert_array_equal((qsec + 2).value, (np.arange(10) + 2))
with pytest.raises(u.UnitsError):
assert_array_equal((qsec - 2).value, (np.arange(10) + 2))
# should create by unit multiplication, too
qsec2 = np.arange(10) * u.second
qsec3 = u.second * np.arange(10)
assert np.all(qsec == qsec2)
assert np.all(qsec2 == qsec3)
# make sure numerical-converters fail when arrays are present
with pytest.raises(TypeError):
float(qsec)
with pytest.raises(TypeError):
int(qsec)
def test_array_indexing_slicing():
q = np.array([1., 2., 3.]) * u.m
assert q[0] == 1. * u.m
assert np.all(q[0:2] == u.Quantity([1., 2.], u.m))
def test_array_setslice():
q = np.array([1., 2., 3.]) * u.m
q[1:2] = np.array([400.]) * u.cm
assert np.all(q == np.array([1., 4., 3.]) * u.m)
def test_inverse_quantity():
"""
Regression test from issue #679
"""
q = u.Quantity(4., u.meter / u.second)
qot = q / 2
toq = 2 / q
npqot = q / np.array(2)
assert npqot.value == 2.0
assert npqot.unit == (u.meter / u.second)
assert qot.value == 2.0
assert qot.unit == (u.meter / u.second)
assert toq.value == 0.5
assert toq.unit == (u.second / u.meter)
def test_quantity_mutability():
q = u.Quantity(9.8, u.meter / u.second / u.second)
with pytest.raises(AttributeError):
q.value = 3
with pytest.raises(AttributeError):
q.unit = u.kg
def test_quantity_initialized_with_quantity():
q1 = u.Quantity(60, u.second)
q2 = u.Quantity(q1, u.minute)
assert q2.value == 1
q3 = u.Quantity([q1, q2], u.second)
assert q3[0].value == 60
assert q3[1].value == 60
q4 = u.Quantity([q2, q1])
assert q4.unit == q2.unit
assert q4[0].value == 1
assert q4[1].value == 1
def test_quantity_string_unit():
q1 = 1. * u.m / 's'
assert q1.value == 1
assert q1.unit == (u.m / u.s)
q2 = q1 * "m"
assert q2.unit == ((u.m * u.m) / u.s)
@raises(ValueError)
def test_quantity_invalid_unit_string():
"foo" * u.m
def test_implicit_conversion():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
assert_allclose(q.centimeter, 100)
assert_allclose(q.cm, 100)
assert_allclose(q.parsec, 3.240779289469756e-17)
def test_implicit_conversion_autocomplete():
q = u.Quantity(1.0, u.meter)
# Manually turn this on to simulate what might happen in a subclass
q._include_easy_conversion_members = True
q.foo = 42
attrs = dir(q)
assert 'centimeter' in attrs
assert 'cm' in attrs
assert 'parsec' in attrs
assert 'foo' in attrs
assert 'to' in attrs
assert 'value' in attrs
# Something from the base class, object
assert '__setattr__' in attrs
with pytest.raises(AttributeError):
q.l
def test_quantity_iterability():
"""Regressiont est for issue #878.
Scalar quantities should not be iterable and should raise a type error on
iteration.
"""
q1 = [15.0, 17.0] * u.m
assert isiterable(q1)
q2 = next(iter(q1))
assert q2 == 15.0 * u.m
assert not isiterable(q2)
pytest.raises(TypeError, iter, q2)
def test_copy():
q1 = u.Quantity(np.array([[1., 2., 3.], [4., 5., 6.]]), unit=u.m)
q2 = q1.copy()
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
q3 = q1.copy(order='F')
assert q3.flags['F_CONTIGUOUS']
assert np.all(q1.value == q3.value)
assert q1.unit == q3.unit
assert q1.dtype == q3.dtype
assert q1.value is not q3.value
q4 = q1.copy(order='C')
assert q4.flags['C_CONTIGUOUS']
assert np.all(q1.value == q4.value)
assert q1.unit == q4.unit
assert q1.dtype == q4.dtype
assert q1.value is not q4.value
def test_deepcopy():
q1 = u.Quantity(np.array([1., 2., 3.]), unit=u.m)
q2 = copy.deepcopy(q1)
assert isinstance(q2, u.Quantity)
assert np.all(q1.value == q2.value)
assert q1.unit == q2.unit
assert q1.dtype == q2.dtype
assert q1.value is not q2.value
def test_equality_numpy_scalar():
"""
A regression test to ensure that numpy scalars are correctly compared
(which originally failed due to the lack of ``__array_priority__``).
"""
assert 10 != 10. * u.m
assert np.int64(10) != 10 * u.m
assert 10 * u.m != np.int64(10)
def test_quantity_pickelability():
"""
Testing pickleability of quantity
"""
q1 = np.arange(10) * u.m
q2 = pickle.loads(pickle.dumps(q1))
assert np.all(q1.value == q2.value)
assert q1.unit.is_equivalent(q2.unit)
assert q1.unit == q2.unit
def test_quantity_initialisation_from_string():
q = u.Quantity('1')
assert q.unit == u.dimensionless_unscaled
assert q.value == 1.
q = u.Quantity('1.5 m/s')
assert q.unit == u.m/u.s
assert q.value == 1.5
assert u.Unit(q) == u.Unit('1.5 m/s')
q = u.Quantity('.5 m')
assert q == u.Quantity(0.5, u.m)
q = u.Quantity('-1e1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('-1e+1km')
assert q == u.Quantity(-10, u.km)
q = u.Quantity('+.5km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('+5e-1km')
assert q == u.Quantity(.5, u.km)
q = u.Quantity('5', u.m)
assert q == u.Quantity(5., u.m)
q = u.Quantity('5 km', u.m)
assert q.value == 5000.
assert q.unit == u.m
q = u.Quantity('5Em')
assert q == u.Quantity(5., u.Em)
with pytest.raises(TypeError):
u.Quantity('')
with pytest.raises(TypeError):
u.Quantity('m')
with pytest.raises(TypeError):
u.Quantity('1.2.3 deg')
with pytest.raises(TypeError):
u.Quantity('1+deg')
with pytest.raises(TypeError):
u.Quantity('1-2deg')
with pytest.raises(TypeError):
u.Quantity('1.2e-13.3m')
with pytest.raises(TypeError):
u.Quantity(['5'])
with pytest.raises(TypeError):
u.Quantity(np.array(['5']))
with pytest.raises(ValueError):
u.Quantity('5E')
with pytest.raises(ValueError):
u.Quantity('5 foo')
def test_unsupported():
q1 = np.arange(10) * u.m
with pytest.raises(TypeError):
q2 = np.bitwise_and(q1, q1)
def test_unit_identity():
q = 1.0 * u.hour
assert q.unit is u.hour
def test_quantity_to_view():
q1 = np.array([1000, 2000]) * u.m
q2 = q1.to(u.km)
assert q1.value[0] == 1000
assert q2.value[0] == 1
@raises(ValueError)
def test_quantity_tuple_power():
(5.0 * u.m) ** (1, 2)
def test_quantity_fraction_power():
q = (25.0 * u.m**2) ** Fraction(1, 2)
assert q.value == 5.
assert q.unit == u.m
# Regression check to ensure we didn't create an object type by raising
# the value of the quantity to a Fraction. [#3922]
assert q.dtype.kind == 'f'
def test_inherit_docstrings():
assert u.Quantity.argmax.__doc__ == np.ndarray.argmax.__doc__
def test_quantity_from_table():
"""
Checks that units from tables are respected when converted to a Quantity.
This also generically checks the use of *anything* with a `unit` attribute
passed into Quantity
"""
from astropy.table import Table
t = Table(data=[np.arange(5), np.arange(5)], names=['a', 'b'])
t['a'].unit = u.kpc
qa = u.Quantity(t['a'])
assert qa.unit == u.kpc
assert_array_equal(qa.value, t['a'])
qb = u.Quantity(t['b'])
assert qb.unit == u.dimensionless_unscaled
assert_array_equal(qb.value, t['b'])
# This does *not* auto-convert, because it's not necessarily obvious that's
# desired. Instead we revert to standard `Quantity` behavior
qap = u.Quantity(t['a'], u.pc)
assert qap.unit == u.pc
assert_array_equal(qap.value, t['a'] * 1000)
qbp = u.Quantity(t['b'], u.pc)
assert qbp.unit == u.pc
assert_array_equal(qbp.value, t['b'])
def test_assign_slice_with_quantity_like():
# Regression tests for gh-5961
from astropy.table import Table, Column
# first check directly that we can use a Column to assign to a slice.
c = Column(np.arange(10.), unit=u.mm)
q = u.Quantity(c)
q[:2] = c[:2]
# next check that we do not fail the original problem.
t = Table()
t['x'] = np.arange(10) * u.mm
t['y'] = np.ones(10) * u.mm
assert type(t['x']) is Column
xy = np.vstack([t['x'], t['y']]).T * u.mm
ii = [0, 2, 4]
assert xy[ii, 0].unit == t['x'][ii].unit
# should not raise anything
xy[ii, 0] = t['x'][ii]
def test_insert():
"""
Test Quantity.insert method. This does not test the full capabilities
of the underlying np.insert, but hits the key functionality for
Quantity.
"""
q = [1, 2] * u.m
# Insert a compatible float with different units
q2 = q.insert(0, 1 * u.km)
assert np.all(q2.value == [1000, 1, 2])
assert q2.unit is u.m
assert q2.dtype.kind == 'f'
if minversion(np, '1.8.0'):
q2 = q.insert(1, [1, 2] * u.km)
assert np.all(q2.value == [1, 1000, 2000, 2])
assert q2.unit is u.m
# Cannot convert 1.5 * u.s to m
with pytest.raises(u.UnitsError):
q.insert(1, 1.5 * u.s)
# Tests with multi-dim quantity
q = [[1, 2], [3, 4]] * u.m
q2 = q.insert(1, [10, 20] * u.m, axis=0)
assert np.all(q2.value == [[1, 2],
[10, 20],
[3, 4]])
q2 = q.insert(1, [10, 20] * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 20, 4]])
q2 = q.insert(1, 10 * u.m, axis=1)
assert np.all(q2.value == [[1, 10, 2],
[3, 10, 4]])
def test_repr_array_of_quantity():
"""
Test print/repr of object arrays of Quantity objects with different
units.
Regression test for the issue first reported in
https://github.com/astropy/astropy/issues/3777
"""
a = np.array([1 * u.m, 2 * u.s], dtype=object)
if NUMPY_LT_1_14:
assert repr(a) == 'array([<Quantity 1.0 m>, <Quantity 2.0 s>], dtype=object)'
assert str(a) == '[<Quantity 1.0 m> <Quantity 2.0 s>]'
else:
assert repr(a) == 'array([<Quantity 1. m>, <Quantity 2. s>], dtype=object)'
assert str(a) == '[<Quantity 1. m> <Quantity 2. s>]'
class TestSpecificTypeQuantity:
def setup(self):
class Length(u.SpecificTypeQuantity):
_equivalent_unit = u.m
class Length2(Length):
_default_unit = u.m
class Length3(Length):
_unit = u.m
self.Length = Length
self.Length2 = Length2
self.Length3 = Length3
def test_creation(self):
l = self.Length(np.arange(10.)*u.km)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.) * u.hour)
with pytest.raises(u.UnitTypeError):
self.Length(np.arange(10.))
l2 = self.Length2(np.arange(5.))
assert type(l2) is self.Length2
assert l2._default_unit is self.Length2._default_unit
with pytest.raises(u.UnitTypeError):
self.Length3(np.arange(10.))
def test_view(self):
l = (np.arange(5.) * u.km).view(self.Length)
assert type(l) is self.Length
with pytest.raises(u.UnitTypeError):
(np.arange(5.) * u.s).view(self.Length)
v = np.arange(5.).view(self.Length)
assert type(v) is self.Length
assert v._unit is None
l3 = np.ones((2, 2)).view(self.Length3)
assert type(l3) is self.Length3
assert l3.unit is self.Length3._unit
def test_operation_precedence_and_fallback(self):
l = self.Length(np.arange(5.)*u.cm)
sum1 = l + 1.*u.m
assert type(sum1) is self.Length
sum2 = 1.*u.km + l
assert type(sum2) is self.Length
sum3 = l + l
assert type(sum3) is self.Length
res1 = l * (1.*u.m)
assert type(res1) is u.Quantity
res2 = l * l
assert type(res2) is u.Quantity
@pytest.mark.skipif('not HAS_MATPLOTLIB')
@pytest.mark.xfail('MATPLOTLIB_LT_15')
class TestQuantityMatplotlib:
"""Test if passing matplotlib quantities works.
TODO: create PNG output and check against reference image
once `astropy.wcsaxes` is merged, which provides
the machinery for this.
See https://github.com/astropy/astropy/issues/1881
See https://github.com/astropy/astropy/pull/2139
"""
def test_plot(self):
data = u.Quantity([4, 5, 6], 's')
plt.plot(data)
def test_scatter(self):
x = u.Quantity([4, 5, 6], 'second')
y = [1, 3, 4] * u.m
plt.scatter(x, y)
def test_unit_class_override():
class MyQuantity(u.Quantity):
pass
my_unit = u.Unit("my_deg", u.deg)
my_unit._quantity_class = MyQuantity
q1 = u.Quantity(1., my_unit)
assert type(q1) is u.Quantity
q2 = u.Quantity(1., my_unit, subok=True)
assert type(q2) is MyQuantity
class QuantityMimic:
def __init__(self, value, unit):
self.value = value
self.unit = unit
def __array__(self):
return np.array(self.value)
class QuantityMimic2(QuantityMimic):
def to(self, unit):
return u.Quantity(self.value, self.unit).to(unit)
def to_value(self, unit):
return u.Quantity(self.value, self.unit).to_value(unit)
class TestQuantityMimics:
"""Test Quantity Mimics that are not ndarray subclasses."""
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_input(self, Mimic):
value = np.arange(10.)
mimic = Mimic(value, u.m)
q = u.Quantity(mimic)
assert q.unit == u.m
assert np.all(q.value == value)
q2 = u.Quantity(mimic, u.cm)
assert q2.unit == u.cm
assert np.all(q2.value == 100 * value)
@pytest.mark.parametrize('Mimic', (QuantityMimic, QuantityMimic2))
def test_mimic_setting(self, Mimic):
mimic = Mimic([1., 2.], u.m)
q = u.Quantity(np.arange(10.), u.cm)
q[8:] = mimic
assert np.all(q[:8].value == np.arange(8.))
assert np.all(q[8:].value == [100., 200.])
|
14af49c875e963079f266826b5b7e61618031733746160f0dd39fbef50f9d3a8 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
from collections import namedtuple
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from astropy.units import quantity_helper as qh
from astropy._erfa import ufunc as erfa_ufunc
from astropy.tests.helper import raises
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncHelpers:
# Note that this test should work even if scipy is present, since
# the scipy.special ufuncs are only loaded on demand.
# The test passes independently of whether erfa is already loaded
# (which will be the case for a full test, since coordinates uses it).
def test_coverage(self):
"""Test that we cover all ufunc's"""
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)])
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
# Check that every numpy ufunc is covered.
assert all_np_ufuncs - all_q_ufuncs == set()
# Check that all ufuncs we cover come from numpy or erfa.
# (Since coverage for erfa is incomplete, we do not check
# this the other way).
all_erfa_ufuncs = set([ufunc for ufunc in erfa_ufunc.__dict__.values()
if isinstance(ufunc, np.ufunc)])
assert (all_q_ufuncs - all_np_ufuncs - all_erfa_ufuncs == set())
def test_scipy_registered(self):
# Should be registered as existing even if scipy is not available.
assert 'scipy.special' in qh.UFUNC_HELPERS.modules
def test_removal_addition(self):
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = None
assert np.add not in qh.UFUNC_HELPERS
assert np.add in qh.UNSUPPORTED_UFUNCS
qh.UFUNC_HELPERS[np.add] = qh.UFUNC_HELPERS[np.subtract]
assert np.add in qh.UFUNC_HELPERS
assert np.add not in qh.UNSUPPORTED_UFUNCS
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.skipif(not isinstance(getattr(np, 'matmul', None), np.ufunc),
reason="np.matmul is not yet a gufunc")
def test_matmul(self):
q = np.arange(3.) * u.m
r = np.matmul(q, q)
assert r == 5. * u.m ** 2
# less trivial case.
q1 = np.eye(3) * u.m
q2 = np.array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]],
[[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]]]) / u.s
r2 = np.matmul(q1, q2)
assert np.all(r2 == np.matmul(q1.value, q2.value) * q1.unit * q2.unit)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
# float_power only introduced in numpy 1.12
@pytest.mark.skipif("not hasattr(np, 'float_power')")
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil, np.positive])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# And now, with numpy >= 1.13, one can also replace input with
# first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
@pytest.mark.skipif(not hasattr(np.core.umath, 'clip'),
reason='no clip ufunc available')
class TestClip:
"""Test the clip ufunc.
In numpy, this is hidden behind a function that does not backwards
compatibility checks. We explicitly test the ufunc here.
"""
def setup(self):
self.clip = np.core.umath.clip
def test_clip_simple(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
result = self.clip(q, q_min, q_max)
assert result.unit == q.unit
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
assert np.all(result == expected)
def test_clip_unitless_parts(self):
q = np.arange(-1., 10.) * u.m
qlim = 0.0055 * u.km
# one-sided
result1 = self.clip(q, -np.inf, qlim)
expected1 = self.clip(q.value, -np.inf, qlim.to_value(q.unit)) * q.unit
assert np.all(result1 == expected1)
result2 = self.clip(q, qlim, np.inf)
expected2 = self.clip(q.value, qlim.to_value(q.unit), np.inf) * q.unit
assert np.all(result2 == expected2)
# Zero
result3 = self.clip(q, np.zeros(q.shape), qlim)
expected3 = self.clip(q.value, 0, qlim.to_value(q.unit)) * q.unit
assert np.all(result3 == expected3)
# Two unitless parts, array-shaped.
result4 = self.clip(q, np.zeros(q.shape), np.full(q.shape, np.inf))
expected4 = self.clip(q.value, 0, np.inf) * q.unit
assert np.all(result4 == expected4)
def test_clip_dimensionless(self):
q = np.arange(-1., 10.) * u.dimensionless_unscaled
result = self.clip(q, 200 * u.percent, 5.)
expected = self.clip(q, 2., 5.)
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_ndarray(self):
a = np.arange(-1., 10.)
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled)
assert isinstance(result, u.Quantity)
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_quantity_inplace(self):
q = np.arange(-1., 10.) * u.m
q_min = 125 * u.cm
q_max = 0.0055 * u.km
expected = self.clip(q.value, q_min.to_value(q.unit),
q_max.to_value(q.unit)) * q.unit
result = self.clip(q, q_min, q_max, out=q)
assert result is q
assert np.all(result == expected)
def test_clip_ndarray_dimensionless_output(self):
a = np.arange(-1., 10.)
q = np.zeros_like(a) * u.m
expected = self.clip(a, 2., 5.) * u.dimensionless_unscaled
result = self.clip(a, 200 * u.percent, 5. * u.dimensionless_unscaled,
out=q)
assert result is q
assert result.unit == u.dimensionless_unscaled
assert np.all(result == expected)
def test_clip_errors(self):
q = np.arange(-1., 10.) * u.m
with pytest.raises(u.UnitsError):
self.clip(q, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q.value, 0, 1*u.s)
with pytest.raises(u.UnitsError):
self.clip(q, -1, 0.)
with pytest.raises(u.UnitsError):
self.clip(q, 0., 1.)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
def test_scipy_registration():
"""Check that scipy gets loaded upon first use."""
assert sps.erf not in qh.UFUNC_HELPERS
sps.erf(1. * u.percent)
assert sps.erf in qh.UFUNC_HELPERS
class TestScipySpecialUfuncs:
erf_like_ufuncs = (
sps.erf, sps.gamma, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
|
5cede9baf482798fc9ea1612650d91c64d6d0c41db2804edf87e6ffd62b83aa7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Function Units and Quantities."""
from abc import ABCMeta, abstractmethod
import numpy as np
from astropy.units import (Unit, UnitBase, UnitsError, UnitTypeError, UnitConversionError,
dimensionless_unscaled, Quantity)
__all__ = ['FunctionUnitBase', 'FunctionQuantity']
SUPPORTED_UFUNCS = set(getattr(np.core.umath, ufunc) for ufunc in (
'isfinite', 'isinf', 'isnan', 'sign', 'signbit',
'rint', 'floor', 'ceil', 'trunc',
'_ones_like', 'ones_like', 'positive') if hasattr(np.core.umath, ufunc))
# TODO: the following could work if helper changed relative to Quantity:
# - spacing should return dimensionless, not same unit
# - negative should negate unit too,
# - add, subtract, comparisons can work if units added/subtracted
SUPPORTED_FUNCTIONS = set(getattr(np, function) for function in
('clip', 'trace', 'mean', 'min', 'max', 'round'))
# subclassing UnitBase or CompositeUnit was found to be problematic, requiring
# a large number of overrides. Hence, define new class.
class FunctionUnitBase(metaclass=ABCMeta):
"""Abstract base class for function units.
Function units are functions containing a physical unit, such as dB(mW).
Most of the arithmetic operations on function units are defined in this
base class.
While instantiation is defined, this class should not be used directly.
Rather, subclasses should be used that override the abstract properties
`_default_function_unit` and `_quantity_class`, and the abstract methods
`from_physical`, and `to_physical`.
Parameters
----------
physical_unit : `~astropy.units.Unit` or `string`
Unit that is encapsulated within the function unit.
If not given, dimensionless.
function_unit : `~astropy.units.Unit` or `string`
By default, the same as the function unit set by the subclass.
"""
# ↓↓↓ the following four need to be set by subclasses
# Make this a property so we can ensure subclasses define it.
@property
@abstractmethod
def _default_function_unit(self):
"""Default function unit corresponding to the function.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.mag`.
"""
# This has to be a property because the function quantity will not be
# known at unit definition time, as it gets defined after.
@property
@abstractmethod
def _quantity_class(self):
"""Function quantity class corresponding to this function unit.
This property should be overridden by subclasses, with, e.g.,
`~astropy.unit.MagUnit` returning `~astropy.unit.Magnitude`.
"""
@abstractmethod
def from_physical(self, x):
"""Transformation from value in physical to value in function units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
@abstractmethod
def to_physical(self, x):
"""Transformation from value in function to value in physical units.
This method should be overridden by subclasses. It is used to
provide automatic transformations using an equivalency.
"""
# ↑↑↑ the above four need to be set by subclasses
# have priority over arrays, regular units, and regular quantities
__array_priority__ = 30000
def __init__(self, physical_unit=None, function_unit=None):
if physical_unit is None:
self._physical_unit = dimensionless_unscaled
else:
self._physical_unit = Unit(physical_unit)
if (not isinstance(self._physical_unit, UnitBase) or
self._physical_unit.is_equivalent(
self._default_function_unit)):
raise UnitConversionError("Unit {0} is not a physical unit."
.format(self._physical_unit))
if function_unit is None:
self._function_unit = self._default_function_unit
else:
# any function unit should be equivalent to subclass default
function_unit = Unit(getattr(function_unit, 'function_unit',
function_unit))
if function_unit.is_equivalent(self._default_function_unit):
self._function_unit = function_unit
else:
raise UnitConversionError(
"Cannot initialize '{0}' instance with function unit '{1}'"
", as it is not equivalent to default function unit '{2}'."
.format(self.__class__.__name__, function_unit,
self._default_function_unit))
def _copy(self, physical_unit=None):
"""Copy oneself, possibly with a different physical unit."""
if physical_unit is None:
physical_unit = self.physical_unit
return self.__class__(physical_unit, self.function_unit)
@property
def physical_unit(self):
return self._physical_unit
@property
def function_unit(self):
return self._function_unit
@property
def equivalencies(self):
"""List of equivalencies between function and physical units.
Uses the `from_physical` and `to_physical` methods.
"""
return [(self, self.physical_unit,
self.to_physical, self.from_physical)]
# ↓↓↓ properties/methods required to behave like a unit
def decompose(self, bases=set()):
"""Copy the current unit with the physical unit decomposed.
For details, see `~astropy.units.UnitBase.decompose`.
"""
return self._copy(self.physical_unit.decompose(bases))
@property
def si(self):
"""Copy the current function unit with the physical unit in SI."""
return self._copy(self.physical_unit.si)
@property
def cgs(self):
"""Copy the current function unit with the physical unit in CGS."""
return self._copy(self.physical_unit.cgs)
def _get_physical_type_id(self):
"""Get physical type corresponding to physical unit."""
return self.physical_unit._get_physical_type_id()
@property
def physical_type(self):
"""Return the physical type of the physical unit (e.g., 'length')."""
return self.physical_unit.physical_type
def is_equivalent(self, other, equivalencies=[]):
"""
Returns `True` if this unit is equivalent to ``other``.
Parameters
----------
other : unit object or string or tuple
The unit to convert to. If a tuple of units is specified, this
method returns true if the unit matches any of those in the tuple.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in addition to the built-in equivalencies between the
function unit and the physical one, as well as possible global
defaults set by, e.g., `~astropy.units.set_enabled_equivalencies`.
Use `None` to turn off any global equivalencies.
Returns
-------
bool
"""
if isinstance(other, tuple):
return any(self.is_equivalent(u, equivalencies=equivalencies)
for u in other)
other_physical_unit = getattr(other, 'physical_unit', (
dimensionless_unscaled if self.function_unit.is_equivalent(other)
else other))
return self.physical_unit.is_equivalent(other_physical_unit,
equivalencies)
def to(self, other, value=1., equivalencies=[]):
"""
Return the converted values in the specified unit.
Parameters
----------
other : `~astropy.units.Unit` object, `~astropy.units.function.FunctionUnitBase` object or string
The unit to convert to.
value : scalar int or float, or sequence convertible to array, optional
Value(s) in the current unit to be converted to the specified unit.
If not provided, defaults to 1.0.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
This list is in meant to treat only equivalencies between different
physical units; the build-in equivalency between the function
unit and the physical one is automatically taken into account.
Returns
-------
values : scalar or array
Converted value(s). Input value sequences are returned as
numpy arrays.
Raises
------
UnitsError
If units are inconsistent.
"""
# conversion to one's own physical unit should be fastest
if other is self.physical_unit:
return self.to_physical(value)
other_function_unit = getattr(other, 'function_unit', other)
if self.function_unit.is_equivalent(other_function_unit):
# when other is an equivalent function unit:
# first convert physical units to other's physical units
other_physical_unit = getattr(other, 'physical_unit',
dimensionless_unscaled)
if self.physical_unit != other_physical_unit:
value_other_physical = self.physical_unit.to(
other_physical_unit, self.to_physical(value),
equivalencies)
# make function unit again, in own system
value = self.from_physical(value_other_physical)
# convert possible difference in function unit (e.g., dex->dB)
return self.function_unit.to(other_function_unit, value)
else:
try:
# when other is not a function unit
return self.physical_unit.to(other, self.to_physical(value),
equivalencies)
except UnitConversionError as e:
if self.function_unit == Unit('mag'):
# One can get to raw magnitudes via math that strips the dimensions off.
# Include extra information in the exception to remind users of this.
msg = "Did you perhaps subtract magnitudes so the unit got lost?"
e.args += (msg,)
raise e
else:
raise
def is_unity(self):
return False
def __eq__(self, other):
return (self.physical_unit == getattr(other, 'physical_unit',
dimensionless_unscaled) and
self.function_unit == getattr(other, 'function_unit', other))
def __ne__(self, other):
return not self.__eq__(other)
def __rlshift__(self, other):
"""Unit converstion operator ``<<``"""
try:
return self._quantity_class(other, self, copy=False, subok=True)
except Exception:
return NotImplemented
def __mul__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit * other
else:
raise UnitsError("Cannot multiply a function unit "
"with a physical dimension with any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(other, unit=self)
except Exception:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return self.function_unit / other
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension by any unit.")
else:
# Anything not like a unit, try initialising as a function quantity.
try:
return self._quantity_class(1./other, unit=self)
except Exception:
return NotImplemented
def __rdiv__(self, other):
if isinstance(other, (str, UnitBase, FunctionUnitBase)):
if self.physical_unit == dimensionless_unscaled:
# If dimensionless, drop back to normal unit and retry.
return other / self.function_unit
else:
raise UnitsError("Cannot divide a function unit "
"with a physical dimension into any unit")
else:
# Don't know what to do with anything not like a unit.
return NotImplemented
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __pow__(self, power):
if power == 0:
return dimensionless_unscaled
elif power == 1:
return self._copy()
if self.physical_unit == dimensionless_unscaled:
return self.function_unit ** power
raise UnitsError("Cannot raise a function unit "
"with a physical dimension to any power but 0 or 1.")
def __pos__(self):
return self._copy()
def to_string(self, format='generic'):
"""
Output the unit in the given format as a string.
The physical unit is appended, within parentheses, to the function
unit, as in "dB(mW)", with both units set using the given format
Parameters
----------
format : `astropy.units.format.Base` instance or str
The name of a format or a formatter object. If not
provided, defaults to the generic format.
"""
if format not in ('generic', 'unscaled', 'latex'):
raise ValueError("Function units cannot be written in {0} format. "
"Only 'generic', 'unscaled' and 'latex' are "
"supported.".format(format))
self_str = self.function_unit.to_string(format)
pu_str = self.physical_unit.to_string(format)
if pu_str == '':
pu_str = '1'
if format == 'latex':
self_str += r'$\mathrm{{\left( {0} \right)}}$'.format(
pu_str[1:-1]) # need to strip leading and trailing "$"
else:
self_str += '({0})'.format(pu_str)
return self_str
def __str__(self):
"""Return string representation for unit."""
self_str = str(self.function_unit)
pu_str = str(self.physical_unit)
if pu_str:
self_str += '({0})'.format(pu_str)
return self_str
def __repr__(self):
# By default, try to give a representation using `Unit(<string>)`,
# with string such that parsing it would give the correct FunctionUnit.
if callable(self.function_unit):
return 'Unit("{0}")'.format(self.to_string())
else:
return '{0}("{1}"{2})'.format(
self.__class__.__name__, self.physical_unit,
"" if self.function_unit is self._default_function_unit
else ', unit="{0}"'.format(self.function_unit))
def _repr_latex_(self):
"""
Generate latex representation of unit name. This is used by
the IPython notebook to print a unit with a nice layout.
Returns
-------
Latex string
"""
return self.to_string('latex')
def __hash__(self):
return hash((self.function_unit, self.physical_unit))
class FunctionQuantity(Quantity):
"""A representation of a (scaled) function of a number with a unit.
Function quantities are quantities whose units are functions containing a
physical unit, such as dB(mW). Most of the arithmetic operations on
function quantities are defined in this base class.
While instantiation is also defined here, this class should not be
instantiated directly. Rather, subclasses should be made which have
``_unit_class`` pointing back to the corresponding function unit class.
Parameters
----------
value : number, sequence of convertible items, `~astropy.units.Quantity`, or `~astropy.units.function.FunctionQuantity`
The numerical value of the function quantity. If a number or
a `~astropy.units.Quantity` with a function unit, it will be converted
to ``unit`` and the physical unit will be inferred from ``unit``.
If a `~astropy.units.Quantity` with just a physical unit, it will
converted to the function unit, after, if necessary, converting it to
the physical unit inferred from ``unit``.
unit : string, `~astropy.units.UnitBase` or `~astropy.units.function.FunctionUnitBase` instance, optional
For an `~astropy.units.function.FunctionUnitBase` instance, the
physical unit will be taken from it; for other input, it will be
inferred from ``value``. By default, ``unit`` is set by the subclass.
dtype : `~numpy.dtype`, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. Ignored
if the input does not need to be converted and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be of the
class used. Otherwise, subclasses will be passed through.
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`~astropy.units.Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not a `~astropy.units.function.FunctionUnitBase`
or `~astropy.units.Unit` object, or a parseable string unit.
"""
_unit_class = None
"""Default `~astropy.units.function.FunctionUnitBase` subclass.
This should be overridden by subclasses.
"""
# Ensure priority over ndarray, regular Unit & Quantity, and FunctionUnit.
__array_priority__ = 40000
# Define functions that work on FunctionQuantity.
_supported_ufuncs = SUPPORTED_UFUNCS
_supported_functions = SUPPORTED_FUNCTIONS
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# Convert possible string input to a (function) unit.
unit = Unit(unit)
if not isinstance(unit, FunctionUnitBase):
# By default, use value's physical unit.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# if iterable, see if first item has a unit
# (mixed lists fail in super call below).
try:
value_unit = getattr(value[0], 'unit')
except Exception:
pass
physical_unit = getattr(value_unit, 'physical_unit', value_unit)
unit = cls._unit_class(physical_unit, function_unit=unit)
# initialise!
return super().__new__(cls, value, unit, dtype=dtype, copy=copy,
order=order, subok=subok, ndmin=ndmin)
# ↓↓↓ properties not found in Quantity
@property
def physical(self):
"""The physical quantity corresponding the function one."""
return self.to(self.unit.physical_unit)
@property
def _function_view(self):
"""View as Quantity with function unit, dropping the physical unit.
Use `~astropy.units.quantity.Quantity.value` for just the value.
"""
return self._new_view(unit=self.unit.function_unit)
# ↓↓↓ methods overridden to change the behavior
@property
def si(self):
"""Return a copy with the physical unit in SI units."""
return self.__class__(self.physical.si)
@property
def cgs(self):
"""Return a copy with the physical unit in CGS units."""
return self.__class__(self.physical.cgs)
def decompose(self, bases=[]):
"""Generate a new `FunctionQuantity` with the physical unit decomposed.
For details, see `~astropy.units.Quantity.decompose`.
"""
return self.__class__(self.physical.decompose(bases))
# ↓↓↓ methods overridden to add additional behavior
def __quantity_subclass__(self, unit):
if isinstance(unit, FunctionUnitBase):
return self.__class__, True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if not isinstance(unit, self._unit_class):
# Have to take care of, e.g., (10*u.mag).view(u.Magnitude)
try:
# "or 'nonsense'" ensures `None` breaks, just in case.
unit = self._unit_class(function_unit=unit or 'nonsense')
except Exception:
raise UnitTypeError(
"{0} instances require {1} function units"
.format(type(self).__name__, self._unit_class.__name__) +
", so cannot set it to '{0}'.".format(unit))
self._unit = unit
def __array_ufunc__(self, function, method, *inputs, **kwargs):
# TODO: it would be more logical to have this in Quantity already,
# instead of in UFUNC_HELPERS, where it cannot be overridden.
# And really it should just return NotImplemented, since possibly
# another argument might know what to do.
if function not in self._supported_ufuncs:
raise UnitTypeError(
"Cannot use ufunc '{0}' with function quantities"
.format(function.__name__))
return super().__array_ufunc__(function, method, *inputs, **kwargs)
# ↓↓↓ methods overridden to change behavior
def __mul__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view * other
raise UnitTypeError("Cannot multiply function quantities which "
"are not dimensionless with anything.")
def __truediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view / other
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless by anything.")
def __rtruediv__(self, other):
if self.unit.physical_unit == dimensionless_unscaled:
return self._function_view.__rdiv__(other)
raise UnitTypeError("Cannot divide function quantities which "
"are not dimensionless into anything.")
def _comparison(self, other, comparison_func):
"""Do a comparison between self and other, raising UnitsError when
other cannot be converted to self because it has different physical
unit, and returning NotImplemented when there are other errors."""
try:
# will raise a UnitsError if physical units not equivalent
other_in_own_unit = self._to_own_unit(other, check_precision=False)
except UnitsError as exc:
if self.unit.physical_unit != dimensionless_unscaled:
raise exc
try:
other_in_own_unit = self._function_view._to_own_unit(
other, check_precision=False)
except Exception:
raise exc
except Exception:
return NotImplemented
return comparison_func(other_in_own_unit)
def __eq__(self, other):
try:
return self._comparison(other, self.value.__eq__)
except UnitsError:
return False
def __ne__(self, other):
try:
return self._comparison(other, self.value.__ne__)
except UnitsError:
return True
def __gt__(self, other):
return self._comparison(other, self.value.__gt__)
def __ge__(self, other):
return self._comparison(other, self.value.__ge__)
def __lt__(self, other):
return self._comparison(other, self.value.__lt__)
def __le__(self, other):
return self._comparison(other, self.value.__le__)
def __lshift__(self, other):
"""Unit converstion operator `<<`"""
try:
other = Unit(other, parse_strict='silent')
except UnitTypeError:
return NotImplemented
return self.__class__(self, other, copy=False, subok=True)
# Ensure Quantity methods are used only if they make sense.
def _wrap_function(self, function, *args, **kwargs):
if function in self._supported_functions:
return super()._wrap_function(function, *args, **kwargs)
# For dimensionless, we can convert to regular quantities.
if all(arg.unit.physical_unit == dimensionless_unscaled
for arg in (self,) + args
if (hasattr(arg, 'unit') and
hasattr(arg.unit, 'physical_unit'))):
args = tuple(getattr(arg, '_function_view', arg) for arg in args)
return self._function_view._wrap_function(function, *args, **kwargs)
raise TypeError("Cannot use method that uses function '{0}' with "
"function quantities that are not dimensionless."
.format(function.__name__))
# Override functions that are supported but do not use _wrap_function
# in Quantity.
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out,
keepdims=keepdims)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
|
0266f9e2298aae96e27a909e8f8623db9b1804837b8d5cadac4a06ad7cb49a8d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames actually implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the trasnformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric
from .lsr import LSR, GalacticLSR
from .supergalactic import Supergalactic
from .altaz import AltAz
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .ecliptic import * # there are a lot of these so we don't list them all explicitly
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
from astropy.coordinates.baseframe import frame_transform_graph
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'PrecessedGeocentric', 'GeocentricMeanEcliptic',
'BarycentricMeanEcliptic', 'HeliocentricMeanEcliptic',
'GeocentricTrueEcliptic', 'BarycentricTrueEcliptic',
'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR',
'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs',
'HeliocentricEclipticIAU76', 'CustomBarycentricEcliptic']
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for
item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the built in coordinate systems,
their aliases (useful for converting other coordinates to them using
attribute-style access) and the pre-defined transformations between
them. The user is free to override any of these transformations by
defining new transformations between these systems, but the
pre-defined transformations should be sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. Wrap the graph in a div with a custom class to allow themeing.
.. container:: frametransformgraph
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from astropy.coordinates.transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = u"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{0}:</b>
<span style="font-size: 24px; color: {1};"><b>➝</b></span>
</p>
</li>
""".format(cls.__name__, color)
html_list_items.append(block)
graph_legend = u"""
.. raw:: html
<ul>
{}
</ul>
""".format("\n".join(html_list_items))
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
|
3f27209690f2382ffe67a7a9396a08cd1324dfbd2a41a89e3b9ea06c7ba6b71d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Contains the transformation functions for getting from ICRS/HCRS to CIRS and
anything in between (currently that means GCRS)
"""
import numpy as np
from astropy import units as u
from astropy.coordinates.baseframe import frame_transform_graph
from astropy.coordinates.transformations import FunctionTransformWithFiniteDifference, AffineTransform
from astropy.coordinates.representation import (SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation, CartesianDifferential)
from astropy import _erfa as erfa
from .icrs import ICRS
from .gcrs import GCRS
from .cirs import CIRS
from .hcrs import HCRS
from .utils import get_jd12, aticq, atciqz, get_cip, prepare_earth_position_vel
# First the ICRS/CIRS related transforms
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, CIRS)
def icrs_to_cirs(icrs_coo, cirs_frame):
# first set up the astrometry context for ICRS<->CIRS
jd1, jd2 = get_jd12(cirs_frame.obstime, 'tdb')
x, y, s = get_cip(jd1, jd2)
earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_frame.obstime)
astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)
i_ra = usrepr.lon.to_value(u.radian)
i_dec = usrepr.lat.to_value(u.radian)
cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# astrometric coordinate direction and *then* run the ERFA transform for
# no parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
i_ra = srepr.lon.to_value(u.radian)
i_dec = srepr.lat.to_value(u.radian)
cirs_ra, cirs_dec = atciqz(i_ra, i_dec, astrom)
newrep = SphericalRepresentation(lat=u.Quantity(cirs_dec, u.radian, copy=False),
lon=u.Quantity(cirs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return cirs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, ICRS)
def cirs_to_icrs(cirs_coo, icrs_frame):
srepr = cirs_coo.represent_as(SphericalRepresentation)
cirs_ra = srepr.lon.to_value(u.radian)
cirs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->cirs and then convert to
# astrometric coordinate direction
jd1, jd2 = get_jd12(cirs_coo.obstime, 'tdb')
x, y, s = get_cip(jd1, jd2)
earth_pv, earth_heliocentric = prepare_earth_position_vel(cirs_coo.obstime)
astrom = erfa.apci(jd1, jd2, earth_pv, earth_heliocentric, x, y, s)
i_ra, i_dec = aticq(cirs_ra, cirs_dec, astrom)
if cirs_coo.data.get_name() == 'unitspherical' or cirs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_cirs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, CIRS, CIRS)
def cirs_to_cirs(from_coo, to_frame):
if np.all(from_coo.obstime == to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
# the CIRS<-> CIRS transform actually goes through ICRS. This has a
# subtle implication that a point in CIRS is uniquely determined
# by the corresponding astrometric ICRS coordinate *at its
# current time*. This has some subtle implications in terms of GR, but
# is sort of glossed over in the current scheme because we are dropping
# distances anyway.
return from_coo.transform_to(ICRS).transform_to(to_frame)
# Now the GCRS-related transforms to/from ICRS
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, ICRS, GCRS)
def icrs_to_gcrs(icrs_coo, gcrs_frame):
# first set up the astrometry context for ICRS<->GCRS. There are a few steps...
# get the position and velocity arrays for the observatory. Need to
# have xyz in last dimension, and pos/vel in one-but-last.
# (Note could use np.stack once our minimum numpy version is >=1.10.)
obs_pv = erfa.pav2pv(
gcrs_frame.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_frame.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
# find the position and velocity of earth
jd1, jd2 = get_jd12(gcrs_frame.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_frame.obstime)
# get astrometry context object, astrom.
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
if icrs_coo.data.get_name() == 'unitspherical' or icrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just do the infinite-distance/no parallax calculation
usrepr = icrs_coo.represent_as(UnitSphericalRepresentation)
i_ra = usrepr.lon.to_value(u.radian)
i_dec = usrepr.lat.to_value(u.radian)
gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)
newrep = UnitSphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, we first offset for parallax to get the
# BCRS coordinate direction and *then* run the ERFA transform for no
# parallax/PM. This ensures reversibility and is more sensible for
# inside solar system objects
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newcart = icrs_coo.cartesian - astrom_eb
srepr = newcart.represent_as(SphericalRepresentation)
i_ra = srepr.lon.to_value(u.radian)
i_dec = srepr.lat.to_value(u.radian)
gcrs_ra, gcrs_dec = atciqz(i_ra, i_dec, astrom)
newrep = SphericalRepresentation(lat=u.Quantity(gcrs_dec, u.radian, copy=False),
lon=u.Quantity(gcrs_ra, u.radian, copy=False),
distance=srepr.distance, copy=False)
return gcrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
GCRS, ICRS)
def gcrs_to_icrs(gcrs_coo, icrs_frame):
srepr = gcrs_coo.represent_as(SphericalRepresentation)
gcrs_ra = srepr.lon.to_value(u.radian)
gcrs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->GCRS and then convert to BCRS
# coordinate direction
obs_pv = erfa.pav2pv(
gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
jd1, jd2 = get_jd12(gcrs_coo.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime)
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
copy=False)
else:
# When there is a distance, apply the parallax/offset to the SSB as the
# last step - ensures round-tripping with the icrs_to_gcrs transform
# the distance in intermedrep is *not* a real distance as it does not
# include the offset back to the SSB
intermedrep = SphericalRepresentation(lat=u.Quantity(i_dec, u.radian, copy=False),
lon=u.Quantity(i_ra, u.radian, copy=False),
distance=srepr.distance,
copy=False)
astrom_eb = CartesianRepresentation(astrom['eb'], unit=u.au,
xyz_axis=-1, copy=False)
newrep = intermedrep + astrom_eb
return icrs_frame.realize_frame(newrep)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, GCRS)
def gcrs_to_gcrs(from_coo, to_frame):
if (np.all(from_coo.obstime == to_frame.obstime)
and np.all(from_coo.obsgeoloc == to_frame.obsgeoloc)):
return to_frame.realize_frame(from_coo.data)
else:
# like CIRS, we do this self-transform via ICRS
return from_coo.transform_to(ICRS).transform_to(to_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, GCRS, HCRS)
def gcrs_to_hcrs(gcrs_coo, hcrs_frame):
if np.any(gcrs_coo.obstime != hcrs_frame.obstime):
# if they GCRS obstime and HCRS obstime are not the same, we first
# have to move to a GCRS where they are.
frameattrs = gcrs_coo.get_frame_attr_names()
frameattrs['obstime'] = hcrs_frame.obstime
gcrs_coo = gcrs_coo.transform_to(GCRS(**frameattrs))
srepr = gcrs_coo.represent_as(SphericalRepresentation)
gcrs_ra = srepr.lon.to_value(u.radian)
gcrs_dec = srepr.lat.to_value(u.radian)
# set up the astrometry context for ICRS<->GCRS and then convert to ICRS
# coordinate direction
obs_pv = erfa.pav2pv(
gcrs_coo.obsgeoloc.get_xyz(xyz_axis=-1).to_value(u.m),
gcrs_coo.obsgeovel.get_xyz(xyz_axis=-1).to_value(u.m/u.s))
jd1, jd2 = get_jd12(hcrs_frame.obstime, 'tdb')
earth_pv, earth_heliocentric = prepare_earth_position_vel(gcrs_coo.obstime)
astrom = erfa.apcs(jd1, jd2, obs_pv, earth_pv, earth_heliocentric)
i_ra, i_dec = aticq(gcrs_ra, gcrs_dec, astrom)
# convert to Quantity objects
i_ra = u.Quantity(i_ra, u.radian, copy=False)
i_dec = u.Quantity(i_dec, u.radian, copy=False)
if gcrs_coo.data.get_name() == 'unitspherical' or gcrs_coo.data.to_cartesian().x.unit == u.one:
# if no distance, just use the coordinate direction to yield the
# infinite-distance/no parallax answer
newrep = UnitSphericalRepresentation(lat=i_dec, lon=i_ra, copy=False)
else:
# When there is a distance, apply the parallax/offset to the
# Heliocentre as the last step to ensure round-tripping with the
# hcrs_to_gcrs transform
# Note that the distance in intermedrep is *not* a real distance as it
# does not include the offset back to the Heliocentre
intermedrep = SphericalRepresentation(lat=i_dec, lon=i_ra,
distance=srepr.distance,
copy=False)
# astrom['eh'] and astrom['em'] contain Sun to observer unit vector,
# and distance, respectively. Shapes are (X) and (X,3), where (X) is the
# shape resulting from broadcasting the shape of the times object
# against the shape of the pv array.
# broadcast em to eh and scale eh
eh = astrom['eh'] * astrom['em'][..., np.newaxis]
eh = CartesianRepresentation(eh, unit=u.au, xyz_axis=-1, copy=False)
newrep = intermedrep.to_cartesian() + eh
return hcrs_frame.realize_frame(newrep)
_NEED_ORIGIN_HINT = ("The input {0} coordinates do not have length units. This "
"probably means you created coordinates with lat/lon but "
"no distance. Heliocentric<->ICRS transforms cannot "
"function in this case because there is an origin shift.")
@frame_transform_graph.transform(AffineTransform, HCRS, ICRS)
def hcrs_to_icrs(hcrs_coo, icrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(hcrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(hcrs_coo.__class__.__name__))
if hcrs_coo.data.differentials:
from astropy.coordinates.solar_system import get_body_barycentric_posvel
bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',
hcrs_coo.obstime)
bary_sun_vel = bary_sun_vel.represent_as(CartesianDifferential)
bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel)
else:
from astropy.coordinates.solar_system import get_body_barycentric
bary_sun_pos = get_body_barycentric('sun', hcrs_coo.obstime)
bary_sun_vel = None
return None, bary_sun_pos
@frame_transform_graph.transform(AffineTransform, ICRS, HCRS)
def icrs_to_hcrs(icrs_coo, hcrs_frame):
# this is just an origin translation so without a distance it cannot go ahead
if isinstance(icrs_coo.data, UnitSphericalRepresentation):
raise u.UnitsError(_NEED_ORIGIN_HINT.format(icrs_coo.__class__.__name__))
if icrs_coo.data.differentials:
from astropy.coordinates.solar_system import get_body_barycentric_posvel
bary_sun_pos, bary_sun_vel = get_body_barycentric_posvel('sun',
hcrs_frame.obstime)
bary_sun_pos = -bary_sun_pos
bary_sun_vel = -bary_sun_vel.represent_as(CartesianDifferential)
bary_sun_pos = bary_sun_pos.with_differentials(bary_sun_vel)
else:
from astropy.coordinates.solar_system import get_body_barycentric
bary_sun_pos = -get_body_barycentric('sun', hcrs_frame.obstime)
bary_sun_vel = None
return None, bary_sun_pos
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference, HCRS, HCRS)
def hcrs_to_hcrs(from_coo, to_frame):
if np.all(from_coo.obstime == to_frame.obstime):
return to_frame.realize_frame(from_coo.data)
else:
# like CIRS, we do this self-transform via ICRS
return from_coo.transform_to(ICRS).transform_to(to_frame)
|
88c5b3f41144fb318be8d5e13a0ad03dc0c80b0dc5f6a8e5b93d60b9b45b419f |
import pytest
import numpy as np
from urllib.error import HTTPError
from astropy.time import Time
from astropy import units as u
from astropy.constants import c
from astropy.coordinates.builtin_frames import GCRS
from astropy.coordinates.earth import EarthLocation
from astropy.coordinates.sky_coordinate import SkyCoord
from astropy.coordinates.solar_system import (get_body, get_moon, BODY_NAME_TO_KERNEL_SPEC,
_apparent_position_in_true_coordinates,
get_body_barycentric, get_body_barycentric_posvel)
from astropy.coordinates.funcs import get_sun
from astropy.tests.helper import assert_quantity_allclose
from astropy.units import allclose as quantity_allclose
from astropy.utils.data import download_file
try:
import jplephem # pylint: disable=W0611
except ImportError:
HAS_JPLEPHEM = False
else:
HAS_JPLEPHEM = True
try:
from skyfield.api import load # pylint: disable=W0611
except ImportError:
HAS_SKYFIELD = False
else:
HAS_SKYFIELD = True
de432s_separation_tolerance_planets = 5*u.arcsec
de432s_separation_tolerance_moon = 5*u.arcsec
de432s_distance_tolerance = 20*u.km
skyfield_angular_separation_tolerance = 1*u.arcsec
skyfield_separation_tolerance = 10*u.km
@pytest.mark.remote_data
@pytest.mark.skipif(str('not HAS_SKYFIELD'))
def test_positions_skyfield():
"""
Test positions against those generated by skyfield.
"""
t = Time('1980-03-25 00:00')
location = None
# skyfield ephemeris
planets = load('de421.bsp')
ts = load.timescale()
mercury, jupiter, moon = planets['mercury'], planets['jupiter barycenter'], planets['moon']
earth = planets['earth']
skyfield_t = ts.from_astropy(t)
if location is not None:
earth = earth.topos(latitude_degrees=location.lat.to_value(u.deg),
longitude_degrees=location.lon.to_value(u.deg),
elevation_m=location.height.to_value(u.m))
skyfield_mercury = earth.at(skyfield_t).observe(mercury).apparent()
skyfield_jupiter = earth.at(skyfield_t).observe(jupiter).apparent()
skyfield_moon = earth.at(skyfield_t).observe(moon).apparent()
if location is not None:
obsgeoloc, obsgeovel = location.get_gcrs_posvel(t)
frame = GCRS(obstime=t, obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
else:
frame = GCRS(obstime=t)
ra, dec, dist = skyfield_mercury.radec(epoch='date')
skyfield_mercury = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_jupiter.radec(epoch='date')
skyfield_jupiter = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
ra, dec, dist = skyfield_moon.radec(epoch='date')
skyfield_moon = SkyCoord(ra.to(u.deg), dec.to(u.deg), distance=dist.to(u.km),
frame=frame)
moon_astropy = get_moon(t, location, ephemeris='de430')
mercury_astropy = get_body('mercury', t, location, ephemeris='de430')
jupiter_astropy = get_body('jupiter', t, location, ephemeris='de430')
# convert to true equator and equinox
jupiter_astropy = _apparent_position_in_true_coordinates(jupiter_astropy)
mercury_astropy = _apparent_position_in_true_coordinates(mercury_astropy)
moon_astropy = _apparent_position_in_true_coordinates(moon_astropy)
assert (moon_astropy.separation(skyfield_moon) <
skyfield_angular_separation_tolerance)
assert (moon_astropy.separation_3d(skyfield_moon) < skyfield_separation_tolerance)
assert (jupiter_astropy.separation(skyfield_jupiter) <
skyfield_angular_separation_tolerance)
assert (jupiter_astropy.separation_3d(skyfield_jupiter) <
skyfield_separation_tolerance)
assert (mercury_astropy.separation(skyfield_mercury) <
skyfield_angular_separation_tolerance)
assert (mercury_astropy.separation_3d(skyfield_mercury) <
skyfield_separation_tolerance)
class TestPositionsGeocentric:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
self.t = Time('1980-03-25 00:00')
self.frame = GCRS(obstime=self.t)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='22h41m47.78s', dec='-08d29m32.0s',
distance=c*6.323037*u.min, frame=self.frame),
'moon': SkyCoord(ra='07h32m02.62s', dec='+18d34m05.0s',
distance=c*0.021921*u.min, frame=self.frame),
'jupiter': SkyCoord(ra='10h17m12.82s', dec='+12d02m57.0s',
distance=c*37.694557*u.min, frame=self.frame),
'sun': SkyCoord(ra='00h16m31.00s', dec='+01d47m16.9s',
distance=c*8.294858*u.min, frame=self.frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 1000*u.km),
('jupiter', 78.*u.arcsec, 76000*u.km),
('moon', 20.*u.arcsec, 80*u.km),
('sun', 5.*u.arcsec, 11.*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c, for Jupiter and
Mercury, and that quoted in Meeus "Astronomical Algorithms" (1998) for the Moon.
"""
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter', 'sun'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
class TestPositionKittPeak:
"""
Test positions against those generated by JPL Horizons accessed on
2016-03-28, with refraction turned on.
"""
def setup(self):
kitt_peak = EarthLocation.from_geodetic(lon=-111.6*u.deg,
lat=31.963333333333342*u.deg,
height=2120*u.m)
self.t = Time('2014-09-25T00:00', location=kitt_peak)
obsgeoloc, obsgeovel = kitt_peak.get_gcrs_posvel(self.t)
self.frame = GCRS(obstime=self.t,
obsgeoloc=obsgeoloc, obsgeovel=obsgeovel)
# Results returned by JPL Horizons web interface
self.horizons = {
'mercury': SkyCoord(ra='13h38m58.50s', dec='-13d34m42.6s',
distance=c*7.699020*u.min, frame=self.frame),
'moon': SkyCoord(ra='12h33m12.85s', dec='-05d17m54.4s',
distance=c*0.022054*u.min, frame=self.frame),
'jupiter': SkyCoord(ra='09h09m55.55s', dec='+16d51m57.8s',
distance=c*49.244937*u.min, frame=self.frame)}
@pytest.mark.parametrize(('body', 'sep_tol', 'dist_tol'),
(('mercury', 7.*u.arcsec, 500*u.km),
('jupiter', 78.*u.arcsec, 82000*u.km)))
def test_erfa_planet(self, body, sep_tol, dist_tol):
"""Test predictions using erfa/plan94.
Accuracies are maximum deviations listed in erfa/plan94.c.
"""
# Add uncertainty in position of Earth
dist_tol = dist_tol + 1300 * u.km
astropy = get_body(body, self.t, ephemeris='builtin')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert astropy.separation(horizons) < sep_tol
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=dist_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('body', ('mercury', 'jupiter'))
def test_de432s_planet(self, body):
astropy = get_body(body, self.t, ephemeris='de432s')
horizons = self.horizons[body]
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_planets)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_de432s_moon(self):
astropy = get_moon(self.t, ephemeris='de432s')
horizons = self.horizons['moon']
# convert to true equator and equinox
astropy = _apparent_position_in_true_coordinates(astropy)
# Assert sky coordinates are close.
assert (astropy.separation(horizons) <
de432s_separation_tolerance_moon)
# Assert distances are close.
assert_quantity_allclose(astropy.distance, horizons.distance,
atol=de432s_distance_tolerance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('bodyname', ('mercury', 'jupiter'))
def test_custom_kernel_spec_body(self, bodyname):
"""
Checks that giving a kernel specifier instead of a body name works
"""
coord_by_name = get_body(bodyname, self.t, ephemeris='de432s')
kspec = BODY_NAME_TO_KERNEL_SPEC[bodyname]
coord_by_kspec = get_body(kspec, self.t, ephemeris='de432s')
assert_quantity_allclose(coord_by_name.ra, coord_by_kspec.ra)
assert_quantity_allclose(coord_by_name.dec, coord_by_kspec.dec)
assert_quantity_allclose(coord_by_name.distance, coord_by_kspec.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_get_sun_consistency(time):
"""
Test that the sun from JPL and the builtin get_sun match
"""
sun_jpl_gcrs = get_body('sun', time, ephemeris='de432s')
builtin_get_sun = get_sun(time)
sep = builtin_get_sun.separation(sun_jpl_gcrs)
assert sep < 0.1*u.arcsec
def test_get_moon_nonscalar_regression():
"""
Test that the builtin ephemeris works with non-scalar times.
See Issue #5069.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30"])
# the following line will raise an Exception if the bug recurs.
get_moon(times, ephemeris='builtin')
def test_barycentric_pos_posvel_same():
# Check that the two routines give identical results.
ep1 = get_body_barycentric('earth', Time('2016-03-20T12:30:00'))
ep2, _ = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert np.all(ep1.xyz == ep2.xyz)
def test_earth_barycentric_velocity_rough():
# Check that a time near the equinox gives roughly the right result.
ep, ev = get_body_barycentric_posvel('earth', Time('2016-03-20T12:30:00'))
assert_quantity_allclose(ep.xyz, [-1., 0., 0.]*u.AU, atol=0.01*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * -30. * u.km / u.s
assert_quantity_allclose(ev.xyz, expected, atol=1.*u.km/u.s)
def test_earth_barycentric_velocity_multi_d():
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel('earth', t)
# note: assert_quantity_allclose doesn't like the shape mismatch.
# this is a problem with np.testing.assert_allclose.
assert quantity_allclose(ep.get_xyz(xyz_axis=-1),
[[-1., 0., 0.], [+1., 0., 0.]]*u.AU,
atol=0.06*u.AU)
expected = u.Quantity([0.*u.one,
np.cos(23.5*u.deg),
np.sin(23.5*u.deg)]) * ([[-30.], [30.]] * u.km / u.s)
assert quantity_allclose(ev.get_xyz(xyz_axis=-1), expected,
atol=2.*u.km/u.s)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize(('body', 'pos_tol', 'vel_tol'),
(('mercury', 1000.*u.km, 1.*u.km/u.s),
('jupiter', 100000.*u.km, 2.*u.km/u.s),
('earth', 10*u.km, 10*u.mm/u.s)))
def test_barycentric_velocity_consistency(body, pos_tol, vel_tol):
# Tolerances are about 1.5 times the rms listed for plan94 and epv00,
# except for Mercury (which nominally is 334 km rms)
t = Time('2016-03-20T12:30:00')
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
# Might as well test it with a multidimensional array too.
t = Time('2016-03-20T12:30:00') + np.arange(8.).reshape(2, 2, 2) * u.yr / 2.
ep, ev = get_body_barycentric_posvel(body, t, ephemeris='builtin')
dp, dv = get_body_barycentric_posvel(body, t, ephemeris='de432s')
assert_quantity_allclose(ep.xyz, dp.xyz, atol=pos_tol)
assert_quantity_allclose(ev.xyz, dv.xyz, atol=vel_tol)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
@pytest.mark.parametrize('time', (Time('1960-01-12 00:00'),
Time('1980-03-25 00:00'),
Time('2010-10-13 00:00')))
def test_url_or_file_ephemeris(time):
# URL for ephemeris de432s used for testing:
url = 'http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp'
# Pass the ephemeris directly as a URL.
coord_by_url = get_body('earth', time, ephemeris=url)
# Translate the URL to the cached location on the filesystem.
# Since we just used the url above, it should already have been downloaded.
filepath = download_file(url, cache=True)
# Get the coordinates using the file path directly:
coord_by_filepath = get_body('earth', time, ephemeris=filepath)
# Using the URL or filepath should give exactly the same results:
assert_quantity_allclose(coord_by_url.ra, coord_by_filepath.ra)
assert_quantity_allclose(coord_by_url.dec, coord_by_filepath.dec)
assert_quantity_allclose(coord_by_url.distance, coord_by_filepath.distance)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_url_ephemeris_wrong_input():
# Try loading a non-existing URL:
time = Time('1960-01-12 00:00')
with pytest.raises(HTTPError):
get_body('earth', time, ephemeris='http://data.astropy.org/path/to/nonexisting/file.bsp')
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_file_ephemeris_wrong_input():
time = Time('1960-01-12 00:00')
# Try loading a non-existing file:
with pytest.raises(ValueError):
get_body('earth', time, ephemeris='/path/to/nonexisting/file.bsp')
# Try loading a file that does exist, but is not an ephemeris file:
with pytest.raises(ValueError):
get_body('earth', time, ephemeris=__file__)
|
fde8370144b2030415f4f1e849d7ca0dde31dd48b11b36b7fb726108f06734c4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates.distances import Distance
from astropy.coordinates.builtin_frames import (ICRS, FK5, FK4, FK4NoETerms, Galactic,
Supergalactic, Galactocentric, HCRS, GCRS, LSR)
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.coordinates import EarthLocation, CartesianRepresentation
from astropy.time import Time
from astropy.units import allclose
# used below in the next parametrized test
m31_sys = [ICRS, FK5, FK4, Galactic]
m31_coo = [(10.6847929, 41.2690650), (10.6847929, 41.2690650), (10.0004738, 40.9952444), (121.1744050, -21.5729360)]
m31_dist = Distance(770, u.kpc)
convert_precision = 1 * u.arcsec
roundtrip_precision = 1e-4 * u.degree
dist_precision = 1e-9 * u.kpc
m31_params = []
for i in range(len(m31_sys)):
for j in range(len(m31_sys)):
if i < j:
m31_params.append((m31_sys[i], m31_sys[j], m31_coo[i], m31_coo[j]))
@pytest.mark.parametrize(('fromsys', 'tosys', 'fromcoo', 'tocoo'), m31_params)
def test_m31_coord_transforms(fromsys, tosys, fromcoo, tocoo):
"""
This tests a variety of coordinate conversions for the Chandra point-source
catalog location of M31 from NED.
"""
coo1 = fromsys(ra=fromcoo[0]*u.deg, dec=fromcoo[1]*u.deg, distance=m31_dist)
coo2 = coo1.transform_to(tosys)
if tosys is FK4:
coo2_prec = coo2.transform_to(FK4(equinox=Time('B1950')))
assert (coo2_prec.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2_prec.spherical.lat - tocoo[1]*u.deg) < convert_precision
else:
assert (coo2.spherical.lon - tocoo[0]*u.deg) < convert_precision # <1 arcsec
assert (coo2.spherical.lat - tocoo[1]*u.deg) < convert_precision
assert coo1.distance.unit == u.kpc
assert coo2.distance.unit == u.kpc
assert m31_dist.unit == u.kpc
assert (coo2.distance - m31_dist) < dist_precision
# check round-tripping
coo1_2 = coo2.transform_to(fromsys)
assert (coo1_2.spherical.lon - fromcoo[0]*u.deg) < roundtrip_precision
assert (coo1_2.spherical.lat - fromcoo[1]*u.deg) < roundtrip_precision
assert (coo1_2.distance - m31_dist) < dist_precision
def test_precession():
"""
Ensures that FK4 and FK5 coordinates precess their equinoxes
"""
j2000 = Time('J2000')
b1950 = Time('B1950')
j1975 = Time('J1975')
b1975 = Time('B1975')
fk4 = FK4(ra=1*u.radian, dec=0.5*u.radian)
assert fk4.equinox.byear == b1950.byear
fk4_2 = fk4.transform_to(FK4(equinox=b1975))
assert fk4_2.equinox.byear == b1975.byear
fk5 = FK5(ra=1*u.radian, dec=0.5*u.radian)
assert fk5.equinox.jyear == j2000.jyear
fk5_2 = fk5.transform_to(FK4(equinox=j1975))
assert fk5_2.equinox.jyear == j1975.jyear
def test_fk5_galactic():
"""
Check that FK5 -> Galactic gives the same as FK5 -> FK4 -> Galactic.
"""
fk5 = FK5(ra=1*u.deg, dec=2*u.deg)
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
direct = fk5.transform_to(Galactic)
indirect = fk5.transform_to(FK4NoETerms).transform_to(Galactic)
assert direct.separation(indirect).degree < 1.e-10
def test_galactocentric():
# when z_sun=0, transformation should be very similar to Galactic
icrs_coord = ICRS(ra=np.linspace(0, 360, 10)*u.deg,
dec=np.linspace(-90, 90, 10)*u.deg,
distance=1.*u.kpc)
g_xyz = icrs_coord.transform_to(Galactic).cartesian.xyz
gc_xyz = icrs_coord.transform_to(Galactocentric(z_sun=0*u.kpc)).cartesian.xyz
diff = np.abs(g_xyz - gc_xyz)
assert allclose(diff[0], 8.3*u.kpc, atol=1E-5*u.kpc)
assert allclose(diff[1:], 0*u.kpc, atol=1E-5*u.kpc)
# generate some test coordinates
g = Galactic(l=[0, 0, 45, 315]*u.deg, b=[-45, 45, 0, 0]*u.deg,
distance=[np.sqrt(2)]*4*u.kpc)
xyz = g.transform_to(Galactocentric(galcen_distance=1.*u.kpc, z_sun=0.*u.pc)).cartesian.xyz
true_xyz = np.array([[0, 0, -1.], [0, 0, 1], [0, 1, 0], [0, -1, 0]]).T*u.kpc
assert allclose(xyz.to(u.kpc), true_xyz.to(u.kpc), atol=1E-5*u.kpc)
# check that ND arrays work
# from Galactocentric to Galactic
x = np.linspace(-10., 10., 100) * u.kpc
y = np.linspace(-10., 10., 100) * u.kpc
z = np.zeros_like(x)
g1 = Galactocentric(x=x, y=y, z=z)
g2 = Galactocentric(x=x.reshape(100, 1, 1), y=y.reshape(100, 1, 1),
z=z.reshape(100, 1, 1))
g1t = g1.transform_to(Galactic)
g2t = g2.transform_to(Galactic)
assert_allclose(g1t.cartesian.xyz, g2t.cartesian.xyz[:, :, 0, 0])
# from Galactic to Galactocentric
l = np.linspace(15, 30., 100) * u.deg
b = np.linspace(-10., 10., 100) * u.deg
d = np.ones_like(l.value) * u.kpc
g1 = Galactic(l=l, b=b, distance=d)
g2 = Galactic(l=l.reshape(100, 1, 1), b=b.reshape(100, 1, 1),
distance=d.reshape(100, 1, 1))
g1t = g1.transform_to(Galactocentric)
g2t = g2.transform_to(Galactocentric)
np.testing.assert_almost_equal(g1t.cartesian.xyz.value,
g2t.cartesian.xyz.value[:, :, 0, 0])
def test_supergalactic():
"""
Check Galactic<->Supergalactic and Galactic<->ICRS conversion.
"""
# Check supergalactic North pole.
npole = Galactic(l=47.37*u.degree, b=+6.32*u.degree)
assert allclose(npole.transform_to(Supergalactic).sgb.deg, +90, atol=1e-9)
# Check the origin of supergalactic longitude.
lon0 = Supergalactic(sgl=0*u.degree, sgb=0*u.degree)
lon0_gal = lon0.transform_to(Galactic)
assert allclose(lon0_gal.l.deg, 137.37, atol=1e-9)
assert allclose(lon0_gal.b.deg, 0, atol=1e-9)
# Test Galactic<->ICRS with some positions that appear in Foley et al. 2008
# (http://adsabs.harvard.edu/abs/2008A%26A...484..143F)
# GRB 021219
supergalactic = Supergalactic(sgl=29.91*u.degree, sgb=+73.72*u.degree)
icrs = SkyCoord('18h50m27s +31d57m17s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
# GRB 030320
supergalactic = Supergalactic(sgl=-174.44*u.degree, sgb=+46.17*u.degree)
icrs = SkyCoord('17h51m36s -25d18m52s')
assert supergalactic.separation(icrs) < 0.005 * u.degree
class TestHCRS():
"""
Check HCRS<->ICRS coordinate conversions.
Uses ICRS Solar positions predicted by get_body_barycentric; with `t1` and
`tarr` as defined below, the ICRS Solar positions were predicted using, e.g.
coord.ICRS(coord.get_body_barycentric(tarr, 'sun')).
"""
def setup(self):
self.t1 = Time("2013-02-02T23:00")
self.t2 = Time("2013-08-02T23:00")
self.tarr = Time(["2013-02-02T23:00", "2013-08-02T23:00"])
self.sun_icrs_scalar = ICRS(ra=244.52984668*u.deg,
dec=-22.36943723*u.deg,
distance=406615.66347377*u.km)
# array of positions corresponds to times in `tarr`
self.sun_icrs_arr = ICRS(ra=[244.52989062, 271.40976248]*u.deg,
dec=[-22.36943605, -25.07431079]*u.deg,
distance=[406615.66347377, 375484.13558956]*u.km)
# corresponding HCRS positions
self.sun_hcrs_t1 = HCRS(CartesianRepresentation([0.0, 0.0, 0.0] * u.km),
obstime=self.t1)
twod_rep = CartesianRepresentation([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]] * u.km)
self.sun_hcrs_tarr = HCRS(twod_rep, obstime=self.tarr)
self.tolerance = 5*u.km
def test_from_hcrs(self):
# test scalar transform
transformed = self.sun_hcrs_t1.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_scalar)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# test non-scalar positions and times
transformed = self.sun_hcrs_tarr.transform_to(ICRS())
separation = transformed.separation_3d(self.sun_icrs_arr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
def test_from_icrs(self):
# scalar positions
transformed = self.sun_icrs_scalar.transform_to(HCRS(obstime=self.t1))
separation = transformed.separation_3d(self.sun_hcrs_t1)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
# nonscalar positions
transformed = self.sun_icrs_arr.transform_to(HCRS(obstime=self.tarr))
separation = transformed.separation_3d(self.sun_hcrs_tarr)
assert_allclose(separation, 0*u.km, atol=self.tolerance)
class TestHelioBaryCentric():
"""
Check GCRS<->Heliocentric and Barycentric coordinate conversions.
Uses the WHT observing site (information grabbed from data/sites.json).
"""
def setup(self):
wht = EarthLocation(342.12*u.deg, 28.758333333333333*u.deg, 2327*u.m)
self.obstime = Time("2013-02-02T23:00")
self.wht_itrs = wht.get_itrs(obstime=self.obstime)
@pytest.mark.remote_data
def test_heliocentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
helio = gcrs.transform_to(HCRS(obstime=self.obstime))
# Check it doesn't change from previous times.
previous = [-1.02597256e+11, 9.71725820e+10, 4.21268419e+10] * u.m
assert_allclose(helio.cartesian.xyz, previous)
# And that it agrees with SLALIB to within 14km
helio_slalib = [-0.685820296, 0.6495585893, 0.2816005464] * u.au
assert np.sqrt(((helio.cartesian.xyz -
helio_slalib)**2).sum()) < 14. * u.km
@pytest.mark.remote_data
def test_barycentric(self):
gcrs = self.wht_itrs.transform_to(GCRS(obstime=self.obstime))
bary = gcrs.transform_to(ICRS())
previous = [-1.02758958e+11, 9.68331109e+10, 4.19720938e+10] * u.m
assert_allclose(bary.cartesian.xyz, previous)
# And that it agrees with SLALIB answer to within 14km
bary_slalib = [-0.6869012079, 0.6472893646, 0.2805661191] * u.au
assert np.sqrt(((bary.cartesian.xyz -
bary_slalib)**2).sum()) < 14. * u.km
def test_lsr_sanity():
# random numbers, but zero velocity in ICRS frame
icrs = ICRS(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
lsr = icrs.transform_to(LSR)
lsr_diff = lsr.data.differentials['s']
cart_lsr_vel = lsr_diff.represent_as(CartesianRepresentation, base=lsr.data)
lsr_vel = ICRS(cart_lsr_vel)
gal_lsr = lsr_vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_lsr.to(u.km/u.s, u.dimensionless_angles()),
lsr.v_bary.d_xyz)
# moving with LSR velocity
lsr = LSR(ra=15.1241*u.deg, dec=17.5143*u.deg, distance=150.12*u.pc,
pm_ra_cosdec=0*u.mas/u.yr, pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
icrs = lsr.transform_to(ICRS)
icrs_diff = icrs.data.differentials['s']
cart_vel = icrs_diff.represent_as(CartesianRepresentation, base=icrs.data)
vel = ICRS(cart_vel)
gal_icrs = vel.transform_to(Galactic).cartesian.xyz
assert allclose(gal_icrs.to(u.km/u.s, u.dimensionless_angles()),
-lsr.v_bary.d_xyz)
def test_hcrs_icrs_differentials():
# Regression to ensure that we can transform velocities from HCRS to LSR.
# Numbers taken from the original issue, gh-6835.
hcrs = HCRS(ra=8.67*u.deg, dec=53.09*u.deg, distance=117*u.pc,
pm_ra_cosdec=4.8*u.mas/u.yr, pm_dec=-15.16*u.mas/u.yr,
radial_velocity=23.42*u.km/u.s)
icrs = hcrs.transform_to(ICRS)
# The position and velocity should not change much
assert allclose(hcrs.cartesian.xyz, icrs.cartesian.xyz, rtol=1e-8)
assert allclose(hcrs.velocity.d_xyz, icrs.velocity.d_xyz, rtol=1e-2)
hcrs2 = icrs.transform_to(HCRS)
# The values should round trip
assert allclose(hcrs.cartesian.xyz, hcrs2.cartesian.xyz, rtol=1e-12)
assert allclose(hcrs.velocity.d_xyz, hcrs2.velocity.d_xyz, rtol=1e-12)
|
46e65bf9d0295309bb07a8f4c36fade178302dd10fcdbd270efa4cf942831110 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import io
import copy
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates import (AltAz, EarthLocation, SkyCoord, get_sun, ICRS,
GeocentricMeanEcliptic, Longitude, Latitude, GCRS, HCRS, CIRS,
get_moon, FK4, FK4NoETerms, BaseCoordinateFrame, ITRS,
QuantityAttribute, UnitSphericalRepresentation,
SphericalRepresentation, CartesianRepresentation,
FunctionTransform)
from astropy.coordinates.sites import get_builtin_sites
from astropy.time import Time
from astropy.utils import iers
from astropy.table import Table
from astropy.tests.helper import assert_quantity_allclose, catch_warnings
from .test_matching import HAS_SCIPY, OLDER_SCIPY
from astropy.units import allclose as quantity_allclose
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
# Note: for regression test, we need to be sure that we use UTC for the
# epoch, even though more properly that should be TT; but the "expected"
# values were calculated using that.
j2000 = Time('J2000', scale='utc')
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491]*u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226]*u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296]*u.au)
coo = GeocentricMeanEcliptic(lat=latitudes,
lon=longitudes,
distance=distances, obstime=times, equinox=times)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428]*u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522]*u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649]*u.au)
expected_result = GCRS(ra=ras, dec=decs,
distance=distances, obstime=j2000).cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime=j2000)).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
@pytest.mark.remote_data
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0*u.deg, 0*u.deg, 0)
time = Time('2010-1-1')
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10*u.deg, 3*u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10*u.deg, 3*u.deg, 1*u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
@pytest.mark.remote_data
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time('2012-01-01 00:00:00')
location = EarthLocation('10d', '45d', 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time('2012-01-01 00:00:00')
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
@pytest.mark.remote_data
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548*u.deg, 27.95968007*u.deg)
alb_wdist = SkyCoord(alb, distance=133*u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975*u.deg, 45.28033881*u.deg)
de_wdist = SkyCoord(de, distance=802*u.pc)
aa = AltAz(location=EarthLocation(lat=45*u.deg, lon=0*u.deg), obstime='2010-1-1')
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862*u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862*u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1*u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(de_wdistaa.separation(alb_wdistaa), 22.2862*u.deg, rtol=1e-3)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862*u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from astropy.coordinates import search_around_sky, search_around_3d
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit='deg')
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord([10.076, 10.00455]*u.deg, [18.54746, 18.54896]*u.deg, distance=[0.1, 1.5]*u.kpc)
search_around_3d(cat3d[0:1], cat3d, 1*u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0*u.deg, 0*u.deg, distance=1*u.AU)
ecl = crd.geocentricmeanecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from astropy.coordinates.builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1*u.deg, 2*u.deg, 3*u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
from astropy.utils.exceptions import AstropyWarning
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from astropy.coordinates.builtin_frames import utils
if hasattr(utils, '__warningregistry__'):
utils.__warningregistry__.clear()
with catch_warnings() as found_warnings:
future_time = Time('2511-5-1')
c = CIRS(1*u.deg, 2*u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
if not isinstance(iers.IERS_Auto.iers_table, iers.IERS_Auto):
saw_iers_warnings = False
for w in found_warnings:
if issubclass(w.category, AstropyWarning):
if '(some) times are outside of range covered by IERS table' in str(w.message):
saw_iers_warnings = True
break
assert saw_iers_warnings, 'Never saw IERS warning'
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000)*u.hour
times = Time('2012-7-13 00:00:00') + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time('2012-7-13 00:00:00') + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838*u.arcsec
Dd = +0.335299*u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553*u.arcsec
fk4noe_dec = (fk4.dec - (Dd*np.cos(fk4.ra) -
Dc*np.sin(fk4.ra))*np.sin(fk4.dec) -
Dctano*np.cos(fk4.dec))
fk4noe_ra = fk4.ra - (Dc*np.cos(fk4.ra) +
Dd*np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms)
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.*u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.*u.uas, rtol=0)
@pytest.mark.remote_data
def test_regression_4926():
times = Time('2010-01-1') + np.arange(20)*u.day
green = get_builtin_sites()['greenwich']
# this is the regression test
moon = get_moon(times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time('2015-01-01')
moon = get_moon(time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
@pytest.mark.remote_data
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10., N) * u.km
time = Time('2010-1-1')
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000)*u.km]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
@pytest.mark.remote_data
def test_itrs_vals_5133():
time = Time('2010-1-1')
el = EarthLocation.from_geodetic(lon=20*u.deg, lat=45*u.deg, height=0*u.km)
lons = [20, 30, 20]*u.deg
lats = [44, 45, 45]*u.deg
alts = [0, 0, 10]*u.km
coos = [EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all([coo.isscalar for coo in aacs])
# the ~1 arcsec tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180*u.deg, atol=1*u.arcsec)
assert aacs[0].alt < 0*u.deg
assert aacs[0].distance > 50*u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90*u.deg, atol=5*u.deg)
assert aacs[1].alt < 0*u.deg
assert aacs[1].distance > 50*u.km
assert_quantity_allclose(aacs[2].alt, 90*u.deg, atol=1*u.arcsec)
assert_quantity_allclose(aacs[2].distance, 10*u.km)
@pytest.mark.remote_data
def test_regression_simple_5133():
t = Time('J2010')
obj = EarthLocation(-1*u.deg, 52*u.deg, height=[100., 0.]*u.km)
home = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km)
aa = obj.get_itrs(t).transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90]*u.deg, rtol=1e-5)
assert_quantity_allclose(aa.distance, [90, 10]*u.km)
def test_regression_5743():
sc = SkyCoord([5, 10], [20, 30], unit=u.deg,
obstime=['2017-01-01T00:00', '2017-01-01T00:10'])
assert sc[0].obstime.shape == tuple()
@pytest.mark.remote_data
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067],
unit=u.m))
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3)*u.hour
moon = get_moon(times, location=greenwich)
targets = SkyCoord([350.7*u.deg, 260.7*u.deg], [18.4*u.deg, 22.4*u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation_type', None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation_type = self.default_representation
self._data = self.data.represent_as(self.representation_type)
rep1 = UnitSphericalRepresentation([0., 1]*u.deg, [2., 3.]*u.deg)
rep2 = SphericalRepresentation([10., 11]*u.deg, [12., 13.]*u.deg,
[14., 15.]*u.kpc)
mf1 = MyFrame(rep1, my_attr=1.*u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation_type is CartesianRepresentation
assert mf2.representation_type is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the reprensentation explicitly
mf3 = MyFrame(rep1, my_attr=1.*u.km, representation_type='unitspherical')
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation_type is UnitSphericalRepresentation
assert mf4.representation_type is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.*u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation_type is CartesianRepresentation
assert msf2.representation_type is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.*u.km,
representation_type='unitspherical')
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation_type is UnitSphericalRepresentation
assert msf4.representation_type is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_6347():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = SkyCoord([1.1, 2.1]*u.deg, [3.1, 4.1]*u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10*u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1*u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10*u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5, 6]*u.kpc)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5.1, 6.1]*u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500*u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50*u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500*u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_regression_6300():
"""Check that importing old frame attribute names from astropy.coordinates
still works. See comments at end of #6300
"""
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.coordinates import CartesianRepresentation
from astropy.coordinates import (TimeFrameAttribute, QuantityFrameAttribute,
CartesianRepresentationFrameAttribute)
with catch_warnings() as found_warnings:
attr = TimeFrameAttribute(default=Time("J2000"))
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
with catch_warnings() as found_warnings:
attr = QuantityFrameAttribute(default=5*u.km)
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
with catch_warnings() as found_warnings:
attr = CartesianRepresentationFrameAttribute(
default=CartesianRepresentation([5,6,7]*u.kpc))
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
@pytest.mark.remote_data
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(CartesianRepresentation((859.07256, -4137.20368, 5295.56871),
unit='km'), representation_type='cartesian')
gcrs.transform_to(ITRS)
@pytest.mark.skipif('not HAS_YAML')
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit='deg')
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format='ascii.ecsv')
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit='deg')
c2 = SkyCoord(2, 4, unit='deg')
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format='ascii.ecsv')
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6448():
"""
This tests the more narrow problem reported in 6446 that 6448 is meant to
fix. `test_regression_6446` also covers this, but this test is provided
so that this is still tested even if YAML isn't installed.
"""
sc1 = SkyCoord([1, 2], [3, 4], unit='deg')
# this should always succeed even prior to 6448
assert sc1.galcen_v_sun is None
c1 = SkyCoord(1, 3, unit='deg')
c2 = SkyCoord(2, 4, unit='deg')
sc2 = SkyCoord([c1, c2])
# without 6448 this fails
assert sc2.galcen_v_sun is None
def test_regression_6597():
frame_name = 'galactic'
c1 = SkyCoord(1, 3, unit='deg', frame=frame_name)
c2 = SkyCoord(2, 4, unit='deg', frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox='J1949')
c1 = SkyCoord(1, 3, unit='deg', frame=frame)
c2 = SkyCoord(2, 4, unit='deg', frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
@pytest.mark.remote_data
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(*(348.63632871, -212.31704928, -0.60154936), unit=u.m/u.s)
location = EarthLocation(*(5327448.9957829, -1718665.73869569, 3051566.90295403), unit=u.m)
t = Time(2458036.161966612, format='jd')
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel-pint_vels).norm()
assert delta < 1*u.cm/u.s
def test_regression_8138():
sc = SkyCoord(1*u.deg, 2*u.deg)
newframe = GCRS()
sc2 = sc.transform_to(newframe)
assert newframe.is_equivalent_frame(sc2.frame)
def test_regression_8276():
from astropy.coordinates import baseframe
with pytest.raises(TypeError) as excinfo:
class MyFrame(BaseCoordinateFrame):
a = QuantityAttribute(unit=u.m)
# note that the remainder of this with clause does not get executed
# because an exception is raised here. A future PR is planned to
# allow the default to be left off, after which the rest of this
# test will get executed, so it is being left in place. See
# https://github.com/astropy/astropy/pull/8300 for more info
# we save the transform graph so that it doesn't acidentally mess with other tests
old_transform_graph = baseframe.frame_transform_graph
try:
baseframe.frame_transform_graph = copy.copy(baseframe.frame_transform_graph)
# as reported in 8276, this fails right here because registering the
# transform tries to create a frame attribute
@baseframe.frame_transform_graph.transform(FunctionTransform, MyFrame, AltAz)
def trans(my_frame_coord, altaz_frame):
pass
# should also be able to *create* the Frame at this point
MyFrame()
finally:
baseframe.frame_transform_graph = old_transform_graph
assert "missing 1 required positional argument: 'default'" in str(excinfo.value)
def test_regression_8615():
# note this is a "higher-level" symptom of the problem
# _erfa/tests/test_erfa:test_float32_input is testing for, but is kept here
# due to being a more practical version of the issue.
crf = CartesianRepresentation(np.array([3, 0, 4], dtype=float) * u.pc)
srf = SphericalRepresentation.from_cartesian(crf) # does not error in 8615
cr = CartesianRepresentation(np.array([3, 0, 4], dtype='f4') * u.pc)
sr = SphericalRepresentation.from_cartesian(cr) # errors in 8615
assert_quantity_allclose(sr.distance, 5 * u.pc)
assert_quantity_allclose(srf.distance, 5 * u.pc)
|
db83cab250c8b87109ebc70cbf2f6529856270670122eb15eee80157ab8eae5c | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import collections
import copy
import itertools
import re
import warnings
from .card import Card, _pad, KEYWORD_LENGTH, UNDEFINED
from .file import _File
from .util import (encode_ascii, decode_ascii, fileobj_closed,
fileobj_is_binary, path_like)
from ._utils import parse_header
from astropy.utils import isiterable
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.decorators import deprecated_renamed_argument
BLOCK_SIZE = 2880 # the FITS block size
# This regular expression can match a *valid* END card which just consists of
# the string 'END' followed by all spaces, or an *invalid* end card which
# consists of END, followed by any character that is *not* a valid character
# for a valid FITS keyword (that is, this is not a keyword like 'ENDER' which
# starts with 'END' but is not 'END'), followed by any arbitrary bytes. An
# invalid end card may also consist of just 'END' with no trailing bytes.
HEADER_END_RE = re.compile(encode_ascii(
r'(?:(?P<valid>END {77}) *)|(?P<invalid>END$|END {0,76}[^A-Z0-9_-])'))
# According to the FITS standard the only characters that may appear in a
# header record are the restricted ASCII chars from 0x20 through 0x7E.
VALID_HEADER_CHARS = set(map(chr, range(0x20, 0x7F)))
END_CARD = 'END' + ' ' * 77
__doctest_skip__ = ['Header', 'Header.comments', 'Header.fromtextfile',
'Header.totextfile', 'Header.set', 'Header.update']
class Header:
"""
FITS header class. This class exposes both a dict-like interface and a
list-like interface to FITS headers.
The header may be indexed by keyword and, like a dict, the associated value
will be returned. When the header contains cards with duplicate keywords,
only the value of the first card with the given keyword will be returned.
It is also possible to use a 2-tuple as the index in the form (keyword,
n)--this returns the n-th value with that keyword, in the case where there
are duplicate keywords.
For example::
>>> header['NAXIS']
0
>>> header[('FOO', 1)] # Return the value of the second FOO keyword
'foo'
The header may also be indexed by card number::
>>> header[0] # Return the value of the first card in the header
'T'
Commentary keywords such as HISTORY and COMMENT are special cases: When
indexing the Header object with either 'HISTORY' or 'COMMENT' a list of all
the HISTORY/COMMENT values is returned::
>>> header['HISTORY']
This is the first history entry in this header.
This is the second history entry in this header.
...
See the Astropy documentation for more details on working with headers.
"""
def __init__(self, cards=[], copy=False):
"""
Construct a `Header` from an iterable and/or text file.
Parameters
----------
cards : A list of `Card` objects, optional
The cards to initialize the header with. Also allowed are other
`Header` (or `dict`-like) objects.
.. versionchanged:: 1.2
Allowed ``cards`` to be a `dict`-like object.
copy : bool, optional
If ``True`` copies the ``cards`` if they were another `Header`
instance.
Default is ``False``.
.. versionadded:: 1.3
"""
self.clear()
if isinstance(cards, Header):
if copy:
cards = cards.copy()
cards = cards.cards
elif isinstance(cards, dict):
cards = cards.items()
for card in cards:
self.append(card, end=True)
self._modified = False
def __len__(self):
return len(self._cards)
def __iter__(self):
for card in self._cards:
yield card.keyword
def __contains__(self, keyword):
if keyword in self._keyword_indices or keyword in self._rvkc_indices:
# For the most common case (single, standard form keyword lookup)
# this will work and is an O(1) check. If it fails that doesn't
# guarantee absence, just that we have to perform the full set of
# checks in self._cardindex
return True
try:
self._cardindex(keyword)
except (KeyError, IndexError):
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
return Header([copy.copy(c) for c in self._cards[key]])
elif self._haswildcard(key):
return Header([copy.copy(self._cards[idx])
for idx in self._wildcardmatch(key)])
elif (isinstance(key, str) and
key.upper() in Card._commentary_keywords):
key = key.upper()
# Special case for commentary cards
return _HeaderCommentaryCards(self, key)
if isinstance(key, tuple):
keyword = key[0]
else:
keyword = key
card = self._cards[self._cardindex(key)]
if card.field_specifier is not None and keyword == card.rawkeyword:
# This is RVKC; if only the top-level keyword was specified return
# the raw value, not the parsed out float value
return card.rawvalue
value = card.value
if value == UNDEFINED:
return None
return value
def __setitem__(self, key, value):
if self._set_slice(key, value, self):
return
if isinstance(value, tuple):
if not (0 < len(value) <= 2):
raise ValueError(
'A Header item may be set with either a scalar value, '
'a 1-tuple containing a scalar value, or a 2-tuple '
'containing a scalar value and comment string.')
if len(value) == 1:
value, comment = value[0], None
if value is None:
value = UNDEFINED
elif len(value) == 2:
value, comment = value
if value is None:
value = UNDEFINED
if comment is None:
comment = ''
else:
comment = None
card = None
if isinstance(key, int):
card = self._cards[key]
elif isinstance(key, tuple):
card = self._cards[self._cardindex(key)]
if value is None:
value = UNDEFINED
if card:
card.value = value
if comment is not None:
card.comment = comment
if card._modified:
self._modified = True
else:
# If we get an IndexError that should be raised; we don't allow
# assignment to non-existing indices
self._update((key, value, comment))
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# This is very inefficient but it's not a commonly used feature.
# If someone out there complains that they make heavy use of slice
# deletions and it's too slow, well, we can worry about it then
# [the solution is not too complicated--it would be wait 'til all
# the cards are deleted before updating _keyword_indices rather
# than updating it once for each card that gets deleted]
if isinstance(key, slice):
indices = range(*key.indices(len(self)))
# If the slice step is backwards we want to reverse it, because
# it will be reversed in a few lines...
if key.step and key.step < 0:
indices = reversed(indices)
else:
indices = self._wildcardmatch(key)
for idx in reversed(indices):
del self[idx]
return
elif isinstance(key, str):
# delete ALL cards with the same keyword name
key = Card.normalize_keyword(key)
indices = self._keyword_indices
if key not in self._keyword_indices:
indices = self._rvkc_indices
if key not in indices:
# if keyword is not present raise KeyError.
# To delete keyword without caring if they were present,
# Header.remove(Keyword) can be used with optional argument ignore_missing as True
raise KeyError("Keyword '{}' not found.".format(key))
for idx in reversed(indices[key]):
# Have to copy the indices list since it will be modified below
del self[idx]
return
idx = self._cardindex(key)
card = self._cards[idx]
keyword = card.keyword
del self._cards[idx]
keyword = Card.normalize_keyword(keyword)
indices = self._keyword_indices[keyword]
indices.remove(idx)
if not indices:
del self._keyword_indices[keyword]
# Also update RVKC indices if necessary :/
if card.field_specifier is not None:
indices = self._rvkc_indices[card.rawkeyword]
indices.remove(idx)
if not indices:
del self._rvkc_indices[card.rawkeyword]
# We also need to update all other indices
self._updateindices(idx, increment=False)
self._modified = True
def __repr__(self):
return self.tostring(sep='\n', endcard=False, padding=False)
def __str__(self):
return self.tostring()
def __eq__(self, other):
"""
Two Headers are equal only if they have the exact same string
representation.
"""
return str(self) == str(other)
def __add__(self, other):
temp = self.copy(strip=False)
temp.extend(other)
return temp
def __iadd__(self, other):
self.extend(other)
return self
def _ipython_key_completions_(self):
return self.__iter__()
@property
def cards(self):
"""
The underlying physical cards that make up this Header; it can be
looked at, but it should not be modified directly.
"""
return _CardAccessor(self)
@property
def comments(self):
"""
View the comments associated with each keyword, if any.
For example, to see the comment on the NAXIS keyword:
>>> header.comments['NAXIS']
number of data axes
Comments can also be updated through this interface:
>>> header.comments['NAXIS'] = 'Number of data axes'
"""
return _HeaderComments(self)
@property
def _modified(self):
"""
Whether or not the header has been modified; this is a property so that
it can also check each card for modifications--cards may have been
modified directly without the header containing it otherwise knowing.
"""
modified_cards = any(c._modified for c in self._cards)
if modified_cards:
# If any cards were modified then by definition the header was
# modified
self.__dict__['_modified'] = True
return self.__dict__['_modified']
@_modified.setter
def _modified(self, val):
self.__dict__['_modified'] = val
@classmethod
def fromstring(cls, data, sep=''):
"""
Creates an HDU header from a byte string containing the entire header
data.
Parameters
----------
data : str or bytes
String or bytes containing the entire header. In the case of bytes
they will be decoded using latin-1 (only plain ASCII characters are
allowed in FITS headers but latin-1 allows us to retain any invalid
bytes that might appear in malformatted FITS files).
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file). In general this is only used in cases where a header was
printed as text (e.g. with newlines after each card) and you want
to create a new `Header` from it by copy/pasting.
Examples
--------
>>> from astropy.io.fits import Header
>>> hdr = Header({'SIMPLE': True})
>>> Header.fromstring(hdr.tostring()) == hdr
True
If you want to create a `Header` from printed text it's not necessary
to have the exact binary structure as it would appear in a FITS file,
with the full 80 byte card length. Rather, each "card" can end in a
newline and does not have to be padded out to a full card length as
long as it "looks like" a FITS header:
>>> hdr = Header.fromstring(\"\"\"\\
... SIMPLE = T / conforms to FITS standard
... BITPIX = 8 / array data type
... NAXIS = 0 / number of array dimensions
... EXTEND = T
... \"\"\", sep='\\n')
>>> hdr['SIMPLE']
True
>>> hdr['BITPIX']
8
>>> len(hdr)
4
Returns
-------
header
A new `Header` instance.
"""
cards = []
# If the card separator contains characters that may validly appear in
# a card, the only way to unambiguously distinguish between cards is to
# require that they be Card.length long. However, if the separator
# contains non-valid characters (namely \n) the cards may be split
# immediately at the separator
require_full_cardlength = set(sep).issubset(VALID_HEADER_CHARS)
if isinstance(data, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place--accepting it here still gives us the
# opportunity to display warnings later during validation
CONTINUE = b'CONTINUE'
END = b'END'
end_card = END_CARD.encode('ascii')
sep = sep.encode('latin1')
empty = b''
else:
CONTINUE = 'CONTINUE'
END = 'END'
end_card = END_CARD
empty = ''
# Split the header into individual cards
idx = 0
image = []
while idx < len(data):
if require_full_cardlength:
end_idx = idx + Card.length
else:
try:
end_idx = data.index(sep, idx)
except ValueError:
end_idx = len(data)
next_image = data[idx:end_idx]
idx = end_idx + len(sep)
if image:
if next_image[:8] == CONTINUE:
image.append(next_image)
continue
cards.append(Card.fromstring(empty.join(image)))
if require_full_cardlength:
if next_image == end_card:
image = []
break
else:
if next_image.split(sep)[0].rstrip() == END:
image = []
break
image = [next_image]
# Add the last image that was found before the end, if any
if image:
cards.append(Card.fromstring(empty.join(image)))
return cls._fromcards(cards)
@classmethod
def fromfile(cls, fileobj, sep='', endcard=True, padding=True):
"""
Similar to :meth:`Header.fromstring`, but reads the header string from
a given file-like object or filename.
Parameters
----------
fileobj : str, file-like
A filename or an open file-like object from which a FITS header is
to be read. For open file handles the file pointer must be at the
beginning of the header.
sep : str, optional
The string separating cards from each other, such as a newline. By
default there is no card separator (as is the case in a raw FITS
file).
endcard : bool, optional
If True (the default) the header must end with an END card in order
to be considered valid. If an END card is not found an
`OSError` is raised.
padding : bool, optional
If True (the default) the header will be required to be padded out
to a multiple of 2880, the FITS header block size. Otherwise any
padding, or lack thereof, is ignored.
Returns
-------
header
A new `Header` instance.
"""
close_file = False
if isinstance(fileobj, path_like):
# If sep is non-empty we are trying to read a header printed to a
# text file, so open in text mode by default to support newline
# handling; if a binary-mode file object is passed in, the user is
# then on their own w.r.t. newline handling.
#
# Otherwise assume we are reading from an actual FITS file and open
# in binary mode.
if sep:
fileobj = open(fileobj, 'r', encoding='latin1')
else:
fileobj = open(fileobj, 'rb')
close_file = True
try:
is_binary = fileobj_is_binary(fileobj)
def block_iter(nbytes):
while True:
data = fileobj.read(nbytes)
if data:
yield data
else:
break
return cls._from_blocks(block_iter, is_binary, sep, endcard,
padding)[1]
finally:
if close_file:
fileobj.close()
@classmethod
def _fromcards(cls, cards):
header = cls()
for idx, card in enumerate(cards):
header._cards.append(card)
keyword = Card.normalize_keyword(card.keyword)
header._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
header._rvkc_indices[card.rawkeyword].append(idx)
header._modified = False
return header
@classmethod
def _from_blocks(cls, block_iter, is_binary, sep, endcard, padding):
"""
The meat of `Header.fromfile`; in a separate method so that
`Header.fromfile` itself is just responsible for wrapping file
handling. Also used by `_BaseHDU.fromstring`.
``block_iter`` should be a callable which, given a block size n
(typically 2880 bytes as used by the FITS standard) returns an iterator
of byte strings of that block size.
``is_binary`` specifies whether the returned blocks are bytes or text
Returns both the entire header *string*, and the `Header` object
returned by Header.fromstring on that string.
"""
actual_block_size = _block_size(sep)
clen = Card.length + len(sep)
blocks = block_iter(actual_block_size)
# Read the first header block.
try:
block = next(blocks)
except StopIteration:
raise EOFError()
if not is_binary:
# TODO: There needs to be error handling at *this* level for
# non-ASCII characters; maybe at this stage decoding latin-1 might
# be safer
block = encode_ascii(block)
read_blocks = []
is_eof = False
end_found = False
# continue reading header blocks until END card or EOF is reached
while True:
# find the END card
end_found, block = cls._find_end_card(block, clen)
read_blocks.append(decode_ascii(block))
if end_found:
break
try:
block = next(blocks)
except StopIteration:
is_eof = True
break
if not block:
is_eof = True
break
if not is_binary:
block = encode_ascii(block)
if not end_found and is_eof and endcard:
# TODO: Pass this error to validation framework as an ERROR,
# rather than raising an exception
raise OSError('Header missing END card.')
header_str = ''.join(read_blocks)
_check_padding(header_str, actual_block_size, is_eof,
check_block_size=padding)
return header_str, cls.fromstring(header_str, sep=sep)
@classmethod
def _find_end_card(cls, block, card_len):
"""
Utility method to search a header block for the END card and handle
invalid END cards.
This method can also returned a modified copy of the input header block
in case an invalid end card needs to be sanitized.
"""
for mo in HEADER_END_RE.finditer(block):
# Ensure the END card was found, and it started on the
# boundary of a new card (see ticket #142)
if mo.start() % card_len != 0:
continue
# This must be the last header block, otherwise the
# file is malformatted
if mo.group('invalid'):
offset = mo.start()
trailing = block[offset + 3:offset + card_len - 3].rstrip()
if trailing:
trailing = repr(trailing).lstrip('ub')
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Unexpected bytes trailing END keyword: {0}; these '
'bytes will be replaced with spaces on write.'.format(
trailing), AstropyUserWarning)
else:
# TODO: Pass this warning up to the validation framework
warnings.warn(
'Missing padding to end of the FITS block after the '
'END keyword; additional spaces will be appended to '
'the file upon writing to pad out to {0} '
'bytes.'.format(BLOCK_SIZE), AstropyUserWarning)
# Sanitize out invalid END card now that the appropriate
# warnings have been issued
block = (block[:offset] + encode_ascii(END_CARD) +
block[offset + len(END_CARD):])
return True, block
return False, block
def tostring(self, sep='', endcard=True, padding=True):
r"""
Returns a string representation of the header.
By default this uses no separator between cards, adds the END card, and
pads the string with spaces to the next multiple of 2880 bytes. That
is, it returns the header exactly as it would appear in a FITS file.
Parameters
----------
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If True (default) adds the END card to the end of the header
string
padding : bool, optional
If True (default) pads the string with spaces out to the next
multiple of 2880 characters
Returns
-------
s : str
A string representing a FITS header.
"""
lines = []
for card in self._cards:
s = str(card)
# Cards with CONTINUE cards may be longer than 80 chars; so break
# them into multiple lines
while s:
lines.append(s[:Card.length])
s = s[Card.length:]
s = sep.join(lines)
if endcard:
s += sep + _pad('END')
if padding:
s += ' ' * _pad_length(len(s))
return s
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def tofile(self, fileobj, sep='', endcard=True, padding=True,
overwrite=False):
r"""
Writes the header to file or file-like object.
By default this writes the header exactly as it would be written to a
FITS file, with the END card included and padding to the next multiple
of 2880 bytes. However, aspects of this may be controlled.
Parameters
----------
fileobj : str, file, optional
Either the pathname of a file, or an open file handle or file-like
object
sep : str, optional
The character or string with which to separate cards. By default
there is no separator, but one could use ``'\\n'``, for example, to
separate each card with a new line
endcard : bool, optional
If `True` (default) adds the END card to the end of the header
string
padding : bool, optional
If `True` (default) pads the string with spaces out to the next
multiple of 2880 characters
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
"""
close_file = fileobj_closed(fileobj)
if not isinstance(fileobj, _File):
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
try:
blocks = self.tostring(sep=sep, endcard=endcard, padding=padding)
actual_block_size = _block_size(sep)
if padding and len(blocks) % actual_block_size != 0:
raise OSError(
'Header size ({}) is not a multiple of block '
'size ({}).'.format(
len(blocks) - actual_block_size + BLOCK_SIZE,
BLOCK_SIZE))
if not fileobj.simulateonly:
fileobj.flush()
try:
offset = fileobj.tell()
except (AttributeError, OSError):
offset = 0
fileobj.write(blocks.encode('ascii'))
fileobj.flush()
finally:
if close_file:
fileobj.close()
@classmethod
def fromtextfile(cls, fileobj, endcard=False):
"""
Read a header from a simple text file or file-like object.
Equivalent to::
>>> Header.fromfile(fileobj, sep='\\n', endcard=False,
... padding=False)
See Also
--------
fromfile
"""
return cls.fromfile(fileobj, sep='\n', endcard=endcard, padding=False)
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def totextfile(self, fileobj, endcard=False, overwrite=False):
"""
Write the header as text to a file or a file-like object.
Equivalent to::
>>> Header.tofile(fileobj, sep='\\n', endcard=False,
... padding=False, overwrite=overwrite)
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
See Also
--------
tofile
"""
self.tofile(fileobj, sep='\n', endcard=endcard, padding=False,
overwrite=overwrite)
def clear(self):
"""
Remove all cards from the header.
"""
self._cards = []
self._keyword_indices = collections.defaultdict(list)
self._rvkc_indices = collections.defaultdict(list)
def copy(self, strip=False):
"""
Make a copy of the :class:`Header`.
.. versionchanged:: 1.3
`copy.copy` and `copy.deepcopy` on a `Header` will call this
method.
Parameters
----------
strip : bool, optional
If `True`, strip any headers that are specific to one of the
standard HDU types, so that this header can be used in a different
HDU.
Returns
-------
header
A new :class:`Header` instance.
"""
tmp = Header((copy.copy(card) for card in self._cards))
if strip:
tmp._strip()
return tmp
def __copy__(self):
return self.copy()
def __deepcopy__(self, *args, **kwargs):
return self.copy()
@classmethod
def fromkeys(cls, iterable, value=None):
"""
Similar to :meth:`dict.fromkeys`--creates a new `Header` from an
iterable of keywords and an optional default value.
This method is not likely to be particularly useful for creating real
world FITS headers, but it is useful for testing.
Parameters
----------
iterable
Any iterable that returns strings representing FITS keywords.
value : optional
A default value to assign to each keyword; must be a valid type for
FITS keywords.
Returns
-------
header
A new `Header` instance.
"""
d = cls()
if not isinstance(value, tuple):
value = (value,)
for key in iterable:
d.append((key,) + value)
return d
def get(self, key, default=None):
"""
Similar to :meth:`dict.get`--returns the value associated with keyword
in the header, or a default value if the keyword is not found.
Parameters
----------
key : str
A keyword that may or may not be in the header.
default : optional
A default value to return if the keyword is not found in the
header.
Returns
-------
value
The value associated with the given keyword, or the default value
if the keyword is not in the header.
"""
try:
return self[key]
except (KeyError, IndexError):
return default
def set(self, keyword, value=None, comment=None, before=None, after=None):
"""
Set the value and/or comment and/or position of a specified keyword.
If the keyword does not already exist in the header, a new keyword is
created in the specified position, or appended to the end of the header
if no position is specified.
This method is similar to :meth:`Header.update` prior to Astropy v0.1.
.. note::
It should be noted that ``header.set(keyword, value)`` and
``header.set(keyword, value, comment)`` are equivalent to
``header[keyword] = value`` and
``header[keyword] = (value, comment)`` respectively.
New keywords can also be inserted relative to existing keywords
using, for example::
>>> header.insert('NAXIS1', ('NAXIS', 2, 'Number of axes'))
to insert before an existing keyword, or::
>>> header.insert('NAXIS', ('NAXIS1', 4096), after=True)
to insert after an existing keyword.
The only advantage of using :meth:`Header.set` is that it
easily replaces the old usage of :meth:`Header.update` both
conceptually and in terms of function signature.
Parameters
----------
keyword : str
A header keyword
value : str, optional
The value to set for the given keyword; if None the existing value
is kept, but '' may be used to set a blank value
comment : str, optional
The comment to set for the given keyword; if None the existing
comment is kept, but ``''`` may be used to set a blank comment
before : str, int, optional
Name of the keyword, or index of the `Card` before which this card
should be located in the header. The argument ``before`` takes
precedence over ``after`` if both specified.
after : str, int, optional
Name of the keyword, or index of the `Card` after which this card
should be located in the header.
"""
# Create a temporary card that looks like the one being set; if the
# temporary card turns out to be a RVKC this will make it easier to
# deal with the idiosyncrasies thereof
# Don't try to make a temporary card though if they keyword looks like
# it might be a HIERARCH card or is otherwise invalid--this step is
# only for validating RVKCs.
if (len(keyword) <= KEYWORD_LENGTH and
Card._keywd_FSC_RE.match(keyword) and
keyword not in self._keyword_indices):
new_card = Card(keyword, value, comment)
new_keyword = new_card.keyword
else:
new_keyword = keyword
if (new_keyword not in Card._commentary_keywords and
new_keyword in self):
if comment is None:
comment = self.comments[keyword]
if value is None:
value = self[keyword]
self[keyword] = (value, comment)
if before is not None or after is not None:
card = self._cards[self._cardindex(keyword)]
self._relativeinsert(card, before=before, after=after,
replace=True)
elif before is not None or after is not None:
self._relativeinsert((keyword, value, comment), before=before,
after=after)
else:
self[keyword] = (value, comment)
def items(self):
"""Like :meth:`dict.items`."""
for card in self._cards:
yield (card.keyword, card.value)
def keys(self):
"""
Like :meth:`dict.keys`--iterating directly over the `Header`
instance has the same behavior.
"""
for card in self._cards:
yield card.keyword
def values(self):
"""Like :meth:`dict.values`."""
for card in self._cards:
yield card.value
def pop(self, *args):
"""
Works like :meth:`list.pop` if no arguments or an index argument are
supplied; otherwise works like :meth:`dict.pop`.
"""
if len(args) > 2:
raise TypeError('Header.pop expected at most 2 arguments, got '
'{}'.format(len(args)))
if len(args) == 0:
key = -1
else:
key = args[0]
try:
value = self[key]
except (KeyError, IndexError):
if len(args) == 2:
return args[1]
raise
del self[key]
return value
def popitem(self):
"""Similar to :meth:`dict.popitem`."""
try:
k, v = next(self.items())
except StopIteration:
raise KeyError('Header is empty')
del self[k]
return k, v
def setdefault(self, key, default=None):
"""Similar to :meth:`dict.setdefault`."""
try:
return self[key]
except (KeyError, IndexError):
self[key] = default
return default
def update(self, *args, **kwargs):
"""
Update the Header with new keyword values, updating the values of
existing keywords and appending new keywords otherwise; similar to
`dict.update`.
`update` accepts either a dict-like object or an iterable. In the
former case the keys must be header keywords and the values may be
either scalar values or (value, comment) tuples. In the case of an
iterable the items must be (keyword, value) tuples or (keyword, value,
comment) tuples.
Arbitrary arguments are also accepted, in which case the update() is
called again with the kwargs dict as its only argument. That is,
::
>>> header.update(NAXIS1=100, NAXIS2=100)
is equivalent to::
header.update({'NAXIS1': 100, 'NAXIS2': 100})
.. warning::
As this method works similarly to `dict.update` it is very
different from the ``Header.update()`` method in Astropy v0.1.
Use of the old API was
**deprecated** for a long time and is now removed. Most uses of the
old API can be replaced as follows:
* Replace ::
header.update(keyword, value)
with ::
header[keyword] = value
* Replace ::
header.update(keyword, value, comment=comment)
with ::
header[keyword] = (value, comment)
* Replace ::
header.update(keyword, value, before=before_keyword)
with ::
header.insert(before_keyword, (keyword, value))
* Replace ::
header.update(keyword, value, after=after_keyword)
with ::
header.insert(after_keyword, (keyword, value),
after=True)
See also :meth:`Header.set` which is a new method that provides an
interface similar to the old ``Header.update()`` and may help make
transition a little easier.
"""
if args:
other = args[0]
else:
other = None
def update_from_dict(k, v):
if not isinstance(v, tuple):
card = Card(k, v)
elif 0 < len(v) <= 2:
card = Card(*((k,) + v))
else:
raise ValueError(
'Header update value for key %r is invalid; the '
'value must be either a scalar, a 1-tuple '
'containing the scalar value, or a 2-tuple '
'containing the value and a comment string.' % k)
self._update(card)
if other is None:
pass
elif isinstance(other, Header):
for card in other.cards:
self._update(card)
elif hasattr(other, 'items'):
for k, v in other.items():
update_from_dict(k, v)
elif hasattr(other, 'keys'):
for k in other.keys():
update_from_dict(k, other[k])
else:
for idx, card in enumerate(other):
if isinstance(card, Card):
self._update(card)
elif isinstance(card, tuple) and (1 < len(card) <= 3):
self._update(Card(*card))
else:
raise ValueError(
'Header update sequence item #{} is invalid; '
'the item must either be a 2-tuple containing '
'a keyword and value, or a 3-tuple containing '
'a keyword, value, and comment string.'.format(idx))
if kwargs:
self.update(kwargs)
def append(self, card=None, useblanks=True, bottom=False, end=False):
"""
Appends a new keyword+value card to the end of the Header, similar
to `list.append`.
By default if the last cards in the Header have commentary keywords,
this will append the new keyword before the commentary (unless the new
keyword is also commentary).
Also differs from `list.append` in that it can be called with no
arguments: In this case a blank card is appended to the end of the
Header. In the case all the keyword arguments are ignored.
Parameters
----------
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple representing a
single header card; the comment is optional in which case a
2-tuple may be used
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
bottom : bool, optional
If True, instead of appending after the last non-commentary card,
append after the last non-blank card.
end : bool, optional
If True, ignore the useblanks and bottom options, and append at the
very end of the Header.
"""
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
if not end and card.is_blank:
# Blank cards should always just be appended to the end
end = True
if end:
self._cards.append(card)
idx = len(self._cards) - 1
else:
idx = len(self._cards) - 1
while idx >= 0 and self._cards[idx].is_blank:
idx -= 1
if not bottom and card.keyword not in Card._commentary_keywords:
while (idx >= 0 and
self._cards[idx].keyword in Card._commentary_keywords):
idx -= 1
idx += 1
self._cards.insert(idx, card)
self._updateindices(idx)
keyword = Card.normalize_keyword(card.keyword)
self._keyword_indices[keyword].append(idx)
if card.field_specifier is not None:
self._rvkc_indices[card.rawkeyword].append(idx)
if not end:
# If the appended card was a commentary card, and it was appended
# before existing cards with the same keyword, the indices for
# cards with that keyword may have changed
if not bottom and card.keyword in Card._commentary_keywords:
self._keyword_indices[keyword].sort()
# Finally, if useblanks, delete a blank cards from the end
if useblanks and self._countblanks():
# Don't do this unless there is at least one blanks at the end
# of the header; we need to convert the card to its string
# image to see how long it is. In the vast majority of cases
# this will just be 80 (Card.length) but it may be longer for
# CONTINUE cards
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def extend(self, cards, strip=True, unique=False, update=False,
update_first=False, useblanks=True, bottom=False, end=False):
"""
Appends multiple keyword+value cards to the end of the header, similar
to `list.extend`.
Parameters
----------
cards : iterable
An iterable of (keyword, value, [comment]) tuples; see
`Header.append`.
strip : bool, optional
Remove any keywords that have meaning only to specific types of
HDUs, so that only more general keywords are added from extension
Header or Card list (default: `True`).
unique : bool, optional
If `True`, ensures that no duplicate keywords are appended;
keywords already in this header are simply discarded. The
exception is commentary keywords (COMMENT, HISTORY, etc.): they are
only treated as duplicates if their values match.
update : bool, optional
If `True`, update the current header with the values and comments
from duplicate keywords in the input header. This supersedes the
``unique`` argument. Commentary keywords are treated the same as
if ``unique=True``.
update_first : bool, optional
If the first keyword in the header is 'SIMPLE', and the first
keyword in the input header is 'XTENSION', the 'SIMPLE' keyword is
replaced by the 'XTENSION' keyword. Likewise if the first keyword
in the header is 'XTENSION' and the first keyword in the input
header is 'SIMPLE', the 'XTENSION' keyword is replaced by the
'SIMPLE' keyword. This behavior is otherwise dumb as to whether or
not the resulting header is a valid primary or extension header.
This is mostly provided to support backwards compatibility with the
old ``Header.fromTxtFile`` method, and only applies if
``update=True``.
useblanks, bottom, end : bool, optional
These arguments are passed to :meth:`Header.append` while appending
new cards to the header.
"""
temp = Header(cards)
if strip:
temp._strip()
if len(self):
first = self._cards[0].keyword
else:
first = None
# We don't immediately modify the header, because first we need to sift
# out any duplicates in the new header prior to adding them to the
# existing header, but while *allowing* duplicates from the header
# being extended from (see ticket #156)
extend_cards = []
for idx, card in enumerate(temp.cards):
keyword = card.keyword
if keyword not in Card._commentary_keywords:
if unique and not update and keyword in self:
continue
elif update:
if idx == 0 and update_first:
# Dumbly update the first keyword to either SIMPLE or
# XTENSION as the case may be, as was in the case in
# Header.fromTxtFile
if ((keyword == 'SIMPLE' and first == 'XTENSION') or
(keyword == 'XTENSION' and first == 'SIMPLE')):
del self[0]
self.insert(0, card)
else:
self[keyword] = (card.value, card.comment)
elif keyword in self:
self[keyword] = (card.value, card.comment)
else:
extend_cards.append(card)
else:
extend_cards.append(card)
else:
if (unique or update) and keyword in self:
if card.is_blank:
extend_cards.append(card)
continue
for value in self[keyword]:
if value == card.value:
break
else:
extend_cards.append(card)
else:
extend_cards.append(card)
for card in extend_cards:
self.append(card, useblanks=useblanks, bottom=bottom, end=end)
def count(self, keyword):
"""
Returns the count of the given keyword in the header, similar to
`list.count` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword to count instances of in the header
"""
keyword = Card.normalize_keyword(keyword)
# We have to look before we leap, since otherwise _keyword_indices,
# being a defaultdict, will create an entry for the nonexistent keyword
if keyword not in self._keyword_indices:
raise KeyError("Keyword {!r} not found.".format(keyword))
return len(self._keyword_indices[keyword])
def index(self, keyword, start=None, stop=None):
"""
Returns the index if the first instance of the given keyword in the
header, similar to `list.index` if the Header object is treated as a
list of keywords.
Parameters
----------
keyword : str
The keyword to look up in the list of all keywords in the header
start : int, optional
The lower bound for the index
stop : int, optional
The upper bound for the index
"""
if start is None:
start = 0
if stop is None:
stop = len(self._cards)
if stop < start:
step = -1
else:
step = 1
norm_keyword = Card.normalize_keyword(keyword)
for idx in range(start, stop, step):
if self._cards[idx].keyword.upper() == norm_keyword:
return idx
else:
raise ValueError('The keyword {!r} is not in the '
' header.'.format(keyword))
def insert(self, key, card, useblanks=True, after=False):
"""
Inserts a new keyword+value card into the Header at a given location,
similar to `list.insert`.
Parameters
----------
key : int, str, or tuple
The index into the list of header keywords before which the
new keyword should be inserted, or the name of a keyword before
which the new keyword should be inserted. Can also accept a
(keyword, index) tuple for inserting around duplicate keywords.
card : str, tuple
A keyword or a (keyword, value, [comment]) tuple; see
`Header.append`
useblanks : bool, optional
If there are blank cards at the end of the Header, replace the
first blank card so that the total number of cards in the Header
does not increase. Otherwise preserve the number of blank cards.
after : bool, optional
If set to `True`, insert *after* the specified index or keyword,
rather than before it. Defaults to `False`.
"""
if not isinstance(key, int):
# Don't pass through ints to _cardindex because it will not take
# kindly to indices outside the existing number of cards in the
# header, which insert needs to be able to support (for example
# when inserting into empty headers)
idx = self._cardindex(key)
else:
idx = key
if after:
if idx == -1:
idx = len(self._cards)
else:
idx += 1
if idx >= len(self._cards):
# This is just an append (Though it must be an append absolutely to
# the bottom, ignoring blanks, etc.--the point of the insert method
# is that you get exactly what you asked for with no surprises)
self.append(card, end=True)
return
if isinstance(card, str):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: {!r}'.format(card))
self._cards.insert(idx, card)
keyword = card.keyword
# If idx was < 0, determine the actual index according to the rules
# used by list.insert()
if idx < 0:
idx += len(self._cards) - 1
if idx < 0:
idx = 0
# All the keyword indices above the insertion point must be updated
self._updateindices(idx)
keyword = Card.normalize_keyword(keyword)
self._keyword_indices[keyword].append(idx)
count = len(self._keyword_indices[keyword])
if count > 1:
# There were already keywords with this same name
if keyword not in Card._commentary_keywords:
warnings.warn(
'A {!r} keyword already exists in this header. Inserting '
'duplicate keyword.'.format(keyword), AstropyUserWarning)
self._keyword_indices[keyword].sort()
if card.field_specifier is not None:
# Update the index of RVKC as well
rvkc_indices = self._rvkc_indices[card.rawkeyword]
rvkc_indices.append(idx)
rvkc_indices.sort()
if useblanks:
self._useblanks(len(str(card)) // Card.length)
self._modified = True
def remove(self, keyword, ignore_missing=False, remove_all=False):
"""
Removes the first instance of the given keyword from the header similar
to `list.remove` if the Header object is treated as a list of keywords.
Parameters
----------
keyword : str
The keyword of which to remove the first instance in the header.
ignore_missing : bool, optional
When True, ignores missing keywords. Otherwise, if the keyword
is not present in the header a KeyError is raised.
remove_all : bool, optional
When True, all instances of keyword will be removed.
Otherwise only the first instance of the given keyword is removed.
"""
keyword = Card.normalize_keyword(keyword)
if keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
if remove_all:
while keyword in self._keyword_indices:
del self[self._keyword_indices[keyword][0]]
elif not ignore_missing:
raise KeyError("Keyword '{}' not found.".format(keyword))
def rename_keyword(self, oldkeyword, newkeyword, force=False):
"""
Rename a card's keyword in the header.
Parameters
----------
oldkeyword : str or int
Old keyword or card index
newkeyword : str
New keyword
force : bool, optional
When `True`, if the new keyword already exists in the header, force
the creation of a duplicate keyword. Otherwise a
`ValueError` is raised.
"""
oldkeyword = Card.normalize_keyword(oldkeyword)
newkeyword = Card.normalize_keyword(newkeyword)
if newkeyword == 'CONTINUE':
raise ValueError('Can not rename to CONTINUE')
if (newkeyword in Card._commentary_keywords or
oldkeyword in Card._commentary_keywords):
if not (newkeyword in Card._commentary_keywords and
oldkeyword in Card._commentary_keywords):
raise ValueError('Regular and commentary keys can not be '
'renamed to each other.')
elif not force and newkeyword in self:
raise ValueError('Intended keyword {} already exists in header.'
.format(newkeyword))
idx = self.index(oldkeyword)
card = self._cards[idx]
del self[idx]
self.insert(idx, (newkeyword, card.value, card.comment))
def add_history(self, value, before=None, after=None):
"""
Add a ``HISTORY`` card.
Parameters
----------
value : str
History text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('HISTORY', value, before=before, after=after)
def add_comment(self, value, before=None, after=None):
"""
Add a ``COMMENT`` card.
Parameters
----------
value : str
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('COMMENT', value, before=before, after=after)
def add_blank(self, value='', before=None, after=None):
"""
Add a blank card.
Parameters
----------
value : str, optional
Text to be added.
before : str or int, optional
Same as in `Header.update`
after : str or int, optional
Same as in `Header.update`
"""
self._add_commentary('', value, before=before, after=after)
def _update(self, card):
"""
The real update code. If keyword already exists, its value and/or
comment will be updated. Otherwise a new card will be appended.
This will not create a duplicate keyword except in the case of
commentary cards. The only other way to force creation of a duplicate
is to use the insert(), append(), or extend() methods.
"""
keyword, value, comment = card
# Lookups for existing/known keywords are case-insensitive
keyword = keyword.upper()
if keyword.startswith('HIERARCH '):
keyword = keyword[9:]
if (keyword not in Card._commentary_keywords and
keyword in self._keyword_indices):
# Easy; just update the value/comment
idx = self._keyword_indices[keyword][0]
existing_card = self._cards[idx]
existing_card.value = value
if comment is not None:
# '' should be used to explicitly blank a comment
existing_card.comment = comment
if existing_card._modified:
self._modified = True
elif keyword in Card._commentary_keywords:
cards = self._splitcommentary(keyword, value)
if keyword in self._keyword_indices:
# Append after the last keyword of the same type
idx = self.index(keyword, start=len(self) - 1, stop=-1)
isblank = not (keyword or value or comment)
for c in reversed(cards):
self.insert(idx + 1, c, useblanks=(not isblank))
else:
for c in cards:
self.append(c, bottom=True)
else:
# A new keyword! self.append() will handle updating _modified
self.append(card)
def _cardindex(self, key):
"""Returns an index into the ._cards list given a valid lookup key."""
# This used to just set key = (key, 0) and then go on to act as if the
# user passed in a tuple, but it's much more common to just be given a
# string as the key, so optimize more for that case
if isinstance(key, str):
keyword = key
n = 0
elif isinstance(key, int):
# If < 0, determine the actual index
if key < 0:
key += len(self._cards)
if key < 0 or key >= len(self._cards):
raise IndexError('Header index out of range.')
return key
elif isinstance(key, slice):
return key
elif isinstance(key, tuple):
if (len(key) != 2 or not isinstance(key[0], str) or
not isinstance(key[1], int)):
raise ValueError(
'Tuple indices must be 2-tuples consisting of a '
'keyword string and an integer index.')
keyword, n = key
else:
raise ValueError(
'Header indices must be either a string, a 2-tuple, or '
'an integer.')
keyword = Card.normalize_keyword(keyword)
# Returns the index into _cards for the n-th card with the given
# keyword (where n is 0-based)
indices = self._keyword_indices.get(keyword, None)
if keyword and not indices:
if len(keyword) > KEYWORD_LENGTH or '.' in keyword:
raise KeyError("Keyword {!r} not found.".format(keyword))
else:
# Maybe it's a RVKC?
indices = self._rvkc_indices.get(keyword, None)
if not indices:
raise KeyError("Keyword {!r} not found.".format(keyword))
try:
return indices[n]
except IndexError:
raise IndexError('There are only {} {!r} cards in the '
'header.'.format(len(indices), keyword))
def _keyword_from_index(self, idx):
"""
Given an integer index, return the (keyword, repeat) tuple that index
refers to. For most keywords the repeat will always be zero, but it
may be greater than zero for keywords that are duplicated (especially
commentary keywords).
In a sense this is the inverse of self.index, except that it also
supports duplicates.
"""
if idx < 0:
idx += len(self._cards)
keyword = self._cards[idx].keyword
keyword = Card.normalize_keyword(keyword)
repeat = self._keyword_indices[keyword].index(idx)
return keyword, repeat
def _relativeinsert(self, card, before=None, after=None, replace=False):
"""
Inserts a new card before or after an existing card; used to
implement support for the legacy before/after keyword arguments to
Header.update().
If replace=True, move an existing card with the same keyword.
"""
if before is None:
insertionkey = after
else:
insertionkey = before
def get_insertion_idx():
if not (isinstance(insertionkey, int) and
insertionkey >= len(self._cards)):
idx = self._cardindex(insertionkey)
else:
idx = insertionkey
if before is None:
idx += 1
return idx
if replace:
# The card presumably already exists somewhere in the header.
# Check whether or not we actually have to move it; if it does need
# to be moved we just delete it and then it will be reinserted
# below
old_idx = self._cardindex(card.keyword)
insertion_idx = get_insertion_idx()
if (insertion_idx >= len(self._cards) and
old_idx == len(self._cards) - 1):
# The card would be appended to the end, but it's already at
# the end
return
if before is not None:
if old_idx == insertion_idx - 1:
return
elif after is not None and old_idx == insertion_idx:
return
del self[old_idx]
# Even if replace=True, the insertion idx may have changed since the
# old card was deleted
idx = get_insertion_idx()
if card[0] in Card._commentary_keywords:
cards = reversed(self._splitcommentary(card[0], card[1]))
else:
cards = [card]
for c in cards:
self.insert(idx, c)
def _updateindices(self, idx, increment=True):
"""
For all cards with index above idx, increment or decrement its index
value in the keyword_indices dict.
"""
if idx > len(self._cards):
# Save us some effort
return
increment = 1 if increment else -1
for index_sets in (self._keyword_indices, self._rvkc_indices):
for indices in index_sets.values():
for jdx, keyword_index in enumerate(indices):
if keyword_index >= idx:
indices[jdx] += increment
def _countblanks(self):
"""Returns the number of blank cards at the end of the Header."""
for idx in range(1, len(self._cards)):
if not self._cards[-idx].is_blank:
return idx - 1
return 0
def _useblanks(self, count):
for _ in range(count):
if self._cards[-1].is_blank:
del self[-1]
else:
break
def _haswildcard(self, keyword):
"""Return `True` if the input keyword contains a wildcard pattern."""
return (isinstance(keyword, str) and
(keyword.endswith('...') or '*' in keyword or '?' in keyword))
def _wildcardmatch(self, pattern):
"""
Returns a list of indices of the cards matching the given wildcard
pattern.
* '*' matches 0 or more characters
* '?' matches a single character
* '...' matches 0 or more of any non-whitespace character
"""
pattern = pattern.replace('*', r'.*').replace('?', r'.')
pattern = pattern.replace('...', r'\S*') + '$'
pattern_re = re.compile(pattern, re.I)
return [idx for idx, card in enumerate(self._cards)
if pattern_re.match(card.keyword)]
def _set_slice(self, key, value, target):
"""
Used to implement Header.__setitem__ and CardAccessor.__setitem__.
"""
if isinstance(key, slice) or self._haswildcard(key):
if isinstance(key, slice):
indices = range(*key.indices(len(target)))
else:
indices = self._wildcardmatch(key)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
target[idx] = val
return True
return False
def _splitcommentary(self, keyword, value):
"""
Given a commentary keyword and value, returns a list of the one or more
cards needed to represent the full value. This is primarily used to
create the multiple commentary cards needed to represent a long value
that won't fit into a single commentary card.
"""
# The maximum value in each card can be the maximum card length minus
# the maximum key length (which can include spaces if they key length
# less than 8
maxlen = Card.length - KEYWORD_LENGTH
valuestr = str(value)
if len(valuestr) <= maxlen:
# The value can fit in a single card
cards = [Card(keyword, value)]
else:
# The value must be split across multiple consecutive commentary
# cards
idx = 0
cards = []
while idx < len(valuestr):
cards.append(Card(keyword, valuestr[idx:idx + maxlen]))
idx += maxlen
return cards
def _strip(self):
"""
Strip cards specific to a certain kind of header.
Strip cards like ``SIMPLE``, ``BITPIX``, etc. so the rest of
the header can be used to reconstruct another kind of header.
"""
# TODO: Previously this only deleted some cards specific to an HDU if
# _hdutype matched that type. But it seemed simple enough to just
# delete all desired cards anyways, and just ignore the KeyErrors if
# they don't exist.
# However, it might be desirable to make this extendable somehow--have
# a way for HDU classes to specify some headers that are specific only
# to that type, and should be removed otherwise.
if 'NAXIS' in self:
naxis = self['NAXIS']
else:
naxis = 0
if 'TFIELDS' in self:
tfields = self['TFIELDS']
else:
tfields = 0
for idx in range(naxis):
try:
del self['NAXIS' + str(idx + 1)]
except KeyError:
pass
for name in ('TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE',
'TUNIT', 'TDISP', 'TDIM', 'THEAP', 'TBCOL'):
for idx in range(tfields):
try:
del self[name + str(idx + 1)]
except KeyError:
pass
for name in ('SIMPLE', 'XTENSION', 'BITPIX', 'NAXIS', 'EXTEND',
'PCOUNT', 'GCOUNT', 'GROUPS', 'BSCALE', 'BZERO',
'TFIELDS'):
try:
del self[name]
except KeyError:
pass
def _add_commentary(self, key, value, before=None, after=None):
"""
Add a commentary card.
If ``before`` and ``after`` are `None`, add to the last occurrence
of cards of the same name (except blank card). If there is no
card (or blank card), append at the end.
"""
if before is not None or after is not None:
self._relativeinsert((key, value), before=before,
after=after)
else:
self[key] = value
collections.abc.MutableSequence.register(Header)
collections.abc.MutableMapping.register(Header)
class _DelayedHeader:
"""
Descriptor used to create the Header object from the header string that
was stored in HDU._header_str when parsing the file.
"""
def __get__(self, obj, owner=None):
try:
return obj.__dict__['_header']
except KeyError:
if obj._header_str is not None:
hdr = Header.fromstring(obj._header_str)
obj._header_str = None
else:
raise AttributeError("'{}' object has no attribute '_header'"
.format(obj.__class__.__name__))
obj.__dict__['_header'] = hdr
return hdr
def __set__(self, obj, val):
obj.__dict__['_header'] = val
def __delete__(self, obj):
del obj.__dict__['_header']
class _BasicHeaderCards:
"""
This class allows to access cards with the _BasicHeader.cards attribute.
This is needed because during the HDU class detection, some HDUs uses
the .cards interface. Cards cannot be modified here as the _BasicHeader
object will be deleted once the HDU object is created.
"""
def __init__(self, header):
self.header = header
def __getitem__(self, key):
# .cards is a list of cards, so key here is an integer.
# get the keyword name from its index.
key = self.header._keys[key]
# then we get the card from the _BasicHeader._cards list, or parse it
# if needed.
try:
return self.header._cards[key]
except KeyError:
cardstr = self.header._raw_cards[key]
card = Card.fromstring(cardstr)
self.header._cards[key] = card
return card
class _BasicHeader(collections.abc.Mapping):
"""This class provides a fast header parsing, without all the additional
features of the Header class. Here only standard keywords are parsed, no
support for CONTINUE, HIERARCH, COMMENT, HISTORY, or rvkc.
The raw card images are stored and parsed only if needed. The idea is that
to create the HDU objects, only a small subset of standard cards is needed.
Once a card is parsed, which is deferred to the Card class, the Card object
is kept in a cache. This is useful because a small subset of cards is used
a lot in the HDU creation process (NAXIS, XTENSION, ...).
"""
def __init__(self, cards):
# dict of (keywords, card images)
self._raw_cards = cards
self._keys = list(cards.keys())
# dict of (keyword, Card object) storing the parsed cards
self._cards = {}
# the _BasicHeaderCards object allows to access Card objects from
# keyword indices
self.cards = _BasicHeaderCards(self)
self._modified = False
def __getitem__(self, key):
if isinstance(key, int):
key = self._keys[key]
try:
return self._cards[key].value
except KeyError:
# parse the Card and store it
cardstr = self._raw_cards[key]
self._cards[key] = card = Card.fromstring(cardstr)
return card.value
def __len__(self):
return len(self._raw_cards)
def __iter__(self):
return iter(self._raw_cards)
def index(self, keyword):
return self._keys.index(keyword)
@classmethod
def fromfile(cls, fileobj):
"""The main method to parse a FITS header from a file. The parsing is
done with the parse_header function implemented in Cython."""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'rb')
close_file = True
try:
header_str, cards = parse_header(fileobj)
_check_padding(header_str, BLOCK_SIZE, False)
return header_str, cls(cards)
finally:
if close_file:
fileobj.close()
class _CardAccessor:
"""
This is a generic class for wrapping a Header in such a way that you can
use the header's slice/filtering capabilities to return a subset of cards
and do something with them.
This is sort of the opposite notion of the old CardList class--whereas
Header used to use CardList to get lists of cards, this uses Header to get
lists of cards.
"""
# TODO: Consider giving this dict/list methods like Header itself
def __init__(self, header):
self._header = header
def __repr__(self):
return '\n'.join(repr(c) for c in self._header._cards)
def __len__(self):
return len(self._header._cards)
def __iter__(self):
return iter(self._header._cards)
def __eq__(self, other):
# If the `other` item is a scalar we will still treat it as equal if
# this _CardAccessor only contains one item
if not isiterable(other) or isinstance(other, str):
if len(self) == 1:
other = [other]
else:
return False
for a, b in itertools.zip_longest(self, other):
if a != b:
return False
else:
return True
def __ne__(self, other):
return not (self == other)
def __getitem__(self, item):
if isinstance(item, slice) or self._header._haswildcard(item):
return self.__class__(self._header[item])
idx = self._header._cardindex(item)
return self._header._cards[idx]
def _setslice(self, item, value):
"""
Helper for implementing __setitem__ on _CardAccessor subclasses; slices
should always be handled in this same way.
"""
if isinstance(item, slice) or self._header._haswildcard(item):
if isinstance(item, slice):
indices = range(*item.indices(len(self)))
else:
indices = self._header._wildcardmatch(item)
if isinstance(value, str) or not isiterable(value):
value = itertools.repeat(value, len(indices))
for idx, val in zip(indices, value):
self[idx] = val
return True
return False
collections.abc.Mapping.register(_CardAccessor)
collections.abc.Sequence.register(_CardAccessor)
class _HeaderComments(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return '\n'.join('{:>{len}} {}'.format(c.keyword, c.comment,
len=keyword_length)
for c in self._header._cards)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
class _HeaderCommentaryCards(_CardAccessor):
"""
This is used to return a list-like sequence over all the values in the
header for a given commentary keyword, such as HISTORY.
"""
def __init__(self, header, keyword=''):
super().__init__(header)
self._keyword = keyword
self._count = self._header.count(self._keyword)
self._indices = slice(self._count).indices(self._count)
# __len__ and __iter__ need to be overridden from the base class due to the
# different approach this class has to take for slicing
def __len__(self):
return len(range(*self._indices))
def __iter__(self):
for idx in range(*self._indices):
yield self._header[(self._keyword, idx)]
def __repr__(self):
return '\n'.join(self)
def __getitem__(self, idx):
if isinstance(idx, slice):
n = self.__class__(self._header, self._keyword)
n._indices = idx.indices(self._count)
return n
elif not isinstance(idx, int):
raise ValueError('{} index must be an integer'.format(self._keyword))
idx = list(range(*self._indices))[idx]
return self._header[(self._keyword, idx)]
def __setitem__(self, item, value):
"""
Set the value of a specified commentary card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, value, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
self._header[(self._keyword, item)] = value
def _block_size(sep):
"""
Determine the size of a FITS header block if a non-blank separator is used
between cards.
"""
return BLOCK_SIZE + (len(sep) * (BLOCK_SIZE // Card.length - 1))
def _pad_length(stringlen):
"""Bytes needed to pad the input stringlen to the next FITS block."""
return (BLOCK_SIZE - (stringlen % BLOCK_SIZE)) % BLOCK_SIZE
def _check_padding(header_str, block_size, is_eof, check_block_size=True):
# Strip any zero-padding (see ticket #106)
if header_str and header_str[-1] == '\0':
if is_eof and header_str.strip('\0') == '':
# TODO: Pass this warning to validation framework
warnings.warn(
'Unexpected extra padding at the end of the file. This '
'padding may not be preserved when saving changes.',
AstropyUserWarning)
raise EOFError()
else:
# Replace the illegal null bytes with spaces as required by
# the FITS standard, and issue a nasty warning
# TODO: Pass this warning to validation framework
warnings.warn(
'Header block contains null bytes instead of spaces for '
'padding, and is not FITS-compliant. Nulls may be '
'replaced with spaces upon writing.', AstropyUserWarning)
header_str.replace('\0', ' ')
if check_block_size and (len(header_str) % block_size) != 0:
# This error message ignores the length of the separator for
# now, but maybe it shouldn't?
actual_len = len(header_str) - block_size + BLOCK_SIZE
# TODO: Pass this error to validation framework
raise ValueError('Header size is not multiple of {0}: {1}'
.format(BLOCK_SIZE, actual_len))
|
f9eda3a430231b651b36096f8b017acf8b70de6e820062eeac9ca5725adf7108 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which extension HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the extension. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the extension HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
extension arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an extension
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import fitsopen, HDUList
from .hdu.image import PrimaryHDU, ImageHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import fileobj_closed, fileobj_name, fileobj_mode, _is_int
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto',
'append', 'update', 'info', 'tabledump', 'tableload',
'table_to_hdu', 'printdiff']
def getheader(filename, *args, **kwargs):
"""
Get the header from an extension of a FITS file.
Parameters
----------
filename : file path, file object, or file like object
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for extension specification. See the
`getdata` documentation for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None,
**kwargs):
"""
Get the data from an extension of a FITS file (and optionally the
header).
Parameters
----------
filename : file path, file object, or file like object
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for extension specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary header::
getdata('in.fits')
By extension number::
getdata('in.fits', 0) # the primary header
getdata('in.fits', 2) # the second extension
getdata('in.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : array, record array or groups data object
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None and extidx == 0:
try:
hdu = hdulist[1]
data = hdu.data
except IndexError:
raise IndexError('No data in this HDU.')
if data is None:
raise IndexError('No data in this HDU.')
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller('lower')
elif upper:
trans = operator.methodcaller('upper')
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == '':
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(filename, keyword, *args, value=None, comment=None, before=None,
after=None, savecomment=False, **kwargs):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(filename, data, header=None, output_verify='exception',
overwrite=False, checksum=False):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened in a writeable binary
mode such as 'wb' or 'ab+'.
data : array, record array, or groups data object
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify`
for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify,
checksum=checksum)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .connect import is_column_keyword, REMOVE_KEYWORDS
from .column import python_to_tdisp
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from astropy.table.column import BaseColumn
from astropy.time import Time
from astropy.units import Quantity
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError('cannot write table with mixin column(s) {0}'
.format(unsupported_names))
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
if table.masked:
# float column's default mask value needs to be Nan
for column in table.columns.values():
fill_value = column.get_fill_value()
if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20):
column.set_fill_value(np.nan)
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=True)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# in FITS (if at all possible)
int_formats = ('B', 'I', 'J', 'K')
if not (col.format in int_formats or
col.format.p_format in int_formats):
continue
# The astype is necessary because if the string column is less
# than one character, the fill value will be N/A by default which
# is too long, and so no values will get masked.
fill_value = table[col.name].get_fill_value()
col.null = fill_value.astype(table[col.name].dtype)
else:
table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=character_as_bytes)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(table[col.name].info.format,
logical_dtype=logical)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
# Local imports to avoid importing units when it is not required,
# e.g. for command-line scripts
from astropy.units import Unit
from astropy.units.format.fits import UnitScaleError
try:
col.unit = unit.to_string(format='fits')
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
"The column '{0}' could not be stored in FITS format "
"because it has a scale '({1})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units.".format(col.name, str(scale)))
except ValueError:
warnings.warn(
"The unit '{0}' could not be saved to FITS format".format(
unit.to_string()), AstropyUserWarning)
# Try creating a Unit to issue a warning if the unit is not FITS compliant
Unit(col.unit, format='fits', parse_strict='warn')
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop('__coordinate_columns__', {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set all three, even if we have no data.
for attr in 'coord_type', 'coord_unit', 'time_ref_pos':
setattr(col, attr, col_info.get(attr, None))
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
"Meta-data keyword {0} will be ignored since it conflicts "
"with a FITS reserved keyword".format(key), AstropyUserWarning)
# Convert to FITS format
if key == 'comments':
key = 'comment'
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to "
"FITS Header - skipping".format(key, type(value)),
AstropyUserWarning)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to FITS "
"Header - skipping".format(key, type(value)),
AstropyUserWarning)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, table, or group data object
the new data used for appending
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
kwargs
Additional arguments are passed to:
- `~astropy.io.fits.writeto` if the file does not exist or is empty.
In this case ``output_verify`` is the only possible argument.
- `~astropy.io.fits.open` if ``verify`` is True or if ``filename``
is a file object.
- Otherwise no additional arguments can be used.
"""
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode='append', **kwargs)
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode='append')
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified extension with the input data/header.
Parameters
----------
filename : file path, file object, or file like object
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, table, or group data object
the new data used for updating
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
extension specification(s). Header and extension specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension
update(file, dat, hdr, 3) # update the 3rd extension
update(file, dat, 'sci', 2) # update the 2nd SCI extension
update(file, dat, 3, header=hdr) # update the 3rd extension
update(file, dat, header=hdr, ext=5) # update the 5th extension
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop('header', header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, 'update', *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each extension.
Parameters
----------
filename : file path, file object, or file like object
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default='readonly')
# Set the default value for the ignore_missing_end parameter
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for extension specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By extension number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver']
if key in kwargs}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an "
"HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError("Extension specification with HDUList "
"objects not implemented.")
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1,
overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : file path, file object or file-like object
Input fits file.
datafile : file path, file object or file-like object, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : file path, file object or file-like object, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : file path, file object or file-like object, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default='readonly')
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + '_' + repr(ext) + '.txt'
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : file path, file object or file-like object
Input data file containing the table data in ASCII format.
cdfile : file path, file object or file-like object
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : file path, file object or file-like object, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def _getext(filename, mode, *args, ext=None, extname=None, extver=None,
**kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = ('Redundant/conflicting extension arguments(s): {}'.format(
{'args': args, 'ext': ext, 'extname': extname,
'extver': extver}))
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError('Too many positional arguments.')
if (ext is not None and
not (_is_int(ext) or
(isinstance(ext, tuple) and len(ext) == 2 and
isinstance(ext[0], str) and _is_int(ext[1])))):
raise ValueError(
'The ext keyword must be either an extension number '
'(zero-indexed) or a (extname, extver) tuple.')
if extname is not None and not isinstance(extname, str):
raise ValueError('The extname argument must be a string.')
if extver is not None and not _is_int(extver):
raise ValueError('The extver argument must be an integer.')
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError('extver alone cannot specify an extension.')
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU._from_data(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or
isinstance(data, np.recarray)):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray):
hdu = ImageHDU(data, header=header)
else:
raise KeyError('Data must be a numpy array.')
return hdu
def _stat_filename_or_fileobj(filename):
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ''
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = ((name and
(not os.path.exists(name) or
(os.path.getsize(name) == 0)))
or (not name and loc == 0))
return name, closed, noexist_or_empty
def _get_file_mode(filename, default='readonly'):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode))
return mode, closed
|
0928f1aa18907f0a458e7ab8a6a18a78475c67aaced6cb48e0cd1e9198859566 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from functools import wraps
from astropy.utils import data
from distutils.version import LooseVersion
import numpy as np
from astropy.utils.exceptions import AstropyUserWarning
try:
# Support the Python 3.6 PathLike ABC where possible
from os import PathLike
path_like = (str, PathLike)
except ImportError:
path_like = (str,)
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = '_update_{0}'.format(notification)
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.currentThread()
single_thread = (threading.activeCount() == 1 and
curr_thread.getName() == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace(u'\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because `open()` returns an `io.BufferedReader` by default.
This is bad, because `io.BufferedReader` doesn't support random access,
which we need in some cases. We must call open with buffering=0 to get
a raw random-access file reader.
"""
return open(filename, mode, buffering=0)
def fileobj_name(f):
"""
Returns the 'name' of file-like object f, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, str):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if f is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, str):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.ndarray(buffer=s, dtype=dtype, shape=(count,))
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : `~numpy.ndarray`
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile) and not isinstance(outfile, io.BufferedIOBase):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr, order='C'):
fileobj.write(item.tostring())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tostring())
else:
for item in arr.flat:
fileobj.write(item.tostring())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218
return array.view(dtype)
else:
return array.astype(dtype)
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_unsigned(dtype):
return dtype.kind == 'u' and dtype.itemsize >= 2
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(input, strlen):
"""
Split a long string into parts where each part is no longer
than ``strlen`` and no word is cut into two pieces. But if
there is one single word which is longer than ``strlen``, then
it will be split in the middle of the word.
"""
words = []
nblanks = input.count(' ')
nmax = max(nblanks, len(input) // strlen + 1)
arr = np.frombuffer((input + ' ').encode('utf8'), dtype='S1')
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
for idx in range(nmax):
try:
loc = np.nonzero(blank_loc >= strlen + offset)[0][0]
offset = blank_loc[loc - 1] + 1
if loc == 0:
offset = -1
except Exception:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
words.append(input[xoffset:offset])
if len(input) == offset:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
'io/fits/tests/data/{}'.format(filename), 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "({0},){1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
|
2224e90bab2ce0b04b92216ebbc98e1edef0a983d70c68063c86d04b86f372d7 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import re
import warnings
import numpy as np
from .util import _str_to_num, _is_int, translate, _words_group
from .verify import _Verify, _ErrList, VerifyError, VerifyWarning
from . import conf
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['Card', 'Undefined']
FIX_FP_TABLE = str.maketrans('de', 'DE')
FIX_FP_TABLE2 = str.maketrans('dD', 'eE')
CARD_LENGTH = 80
BLANK_CARD = ' ' * CARD_LENGTH
KEYWORD_LENGTH = 8 # The max length for FITS-standard keywords
VALUE_INDICATOR = '= ' # The standard FITS value indicator
VALUE_INDICATOR_LEN = len(VALUE_INDICATOR)
HIERARCH_VALUE_INDICATOR = '=' # HIERARCH cards may use a shortened indicator
class Undefined:
"""Undefined value."""
def __init__(self):
# This __init__ is required to be here for Sphinx documentation
pass
UNDEFINED = Undefined()
class Card(_Verify):
length = CARD_LENGTH
"""The length of a Card image; should always be 80 for valid FITS files."""
# String for a FITS standard compliant (FSC) keyword.
_keywd_FSC_RE = re.compile(r'^[A-Z0-9_-]{0,%d}$' % KEYWORD_LENGTH)
# This will match any printable ASCII character excluding '='
_keywd_hierarch_RE = re.compile(r'^(?:HIERARCH +)?(?:^[ -<>-~]+ ?)+$',
re.I)
# A number sub-string, either an integer or a float in fixed or
# scientific notation. One for FSC and one for non-FSC (NFSC) format:
# NFSC allows lower case of DE for exponent, allows space between sign,
# digits, exponent sign, and exponents
_digits_FSC = r'(\.\d+|\d+(\.\d*)?)([DE][+-]?\d+)?'
_digits_NFSC = r'(\.\d+|\d+(\.\d*)?) *([deDE] *[+-]? *\d+)?'
_numr_FSC = r'[+-]?' + _digits_FSC
_numr_NFSC = r'[+-]? *' + _digits_NFSC
# This regex helps delete leading zeros from numbers, otherwise
# Python might evaluate them as octal values (this is not-greedy, however,
# so it may not strip leading zeros from a float, which is fine)
_number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*?(?P<digt>{})'.format(
_digits_FSC))
_number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*?(?P<digt>{})'.format(
_digits_NFSC))
# FSC commentary card string which must contain printable ASCII characters.
# Note: \Z matches the end of the string without allowing newlines
_ascii_text_re = re.compile(r'[ -~]*\Z')
# Checks for a valid value/comment string. It returns a match object
# for a valid value/comment string.
# The valu group will return a match if a FITS string, boolean,
# number, or complex value is found, otherwise it will return
# None, meaning the keyword is undefined. The comment field will
# return a match if the comment separator is found, though the
# comment maybe an empty string.
_value_FSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
# The <strg> regex is not correct for all cases, but
# it comes pretty darn close. It appears to find the
# end of a string rather well, but will accept
# strings with an odd number of single quotes,
# instead of issuing an error. The FITS standard
# appears vague on this issue and only states that a
# string should not end with two single quotes,
# whereas it should not end with an even number of
# quotes to be precise.
#
# Note that a non-greedy match is done for a string,
# since a greedy match will find a single-quote after
# the comment separator resulting in an incorrect
# match.
r'\'(?P<strg>([ -~]+?|\'\'|)) *?\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_FSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_FSC + r') *, *'
r'(?P<imag>' + _numr_FSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>[!-~][ -~]*)?'
r')?$')
_value_NFSC_RE = re.compile(
r'(?P<valu_field> *'
r'(?P<valu>'
r'\'(?P<strg>([ -~]+?|\'\'|) *?)\'(?=$|/| )|'
r'(?P<bool>[FT])|'
r'(?P<numr>' + _numr_NFSC + r')|'
r'(?P<cplx>\( *'
r'(?P<real>' + _numr_NFSC + r') *, *'
r'(?P<imag>' + _numr_NFSC + r') *\))'
r')? *)'
r'(?P<comm_field>'
r'(?P<sepr>/ *)'
r'(?P<comm>(.|\n)*)'
r')?$')
_rvkc_identifier = r'[a-zA-Z_]\w*'
_rvkc_field = _rvkc_identifier + r'(\.\d+)?'
_rvkc_field_specifier_s = r'{}(\.{})*'.format(_rvkc_field, _rvkc_field)
_rvkc_field_specifier_val = (r'(?P<keyword>{}): (?P<val>{})'.format(
_rvkc_field_specifier_s, _numr_FSC))
_rvkc_keyword_val = r'\'(?P<rawval>{})\''.format(_rvkc_field_specifier_val)
_rvkc_keyword_val_comm = (r' *{} *(/ *(?P<comm>[ -~]*))?$'.format(
_rvkc_keyword_val))
_rvkc_field_specifier_val_RE = re.compile(_rvkc_field_specifier_val + '$')
# regular expression to extract the key and the field specifier from a
# string that is being used to index into a card list that contains
# record value keyword cards (ex. 'DP1.AXIS.1')
_rvkc_keyword_name_RE = (
re.compile(r'(?P<keyword>{})\.(?P<field_specifier>{})$'.format(
_rvkc_identifier, _rvkc_field_specifier_s)))
# regular expression to extract the field specifier and value and comment
# from the string value of a record value keyword card
# (ex "'AXIS.1: 1' / a comment")
_rvkc_keyword_val_comm_RE = re.compile(_rvkc_keyword_val_comm)
_commentary_keywords = {'', 'COMMENT', 'HISTORY', 'END'}
_special_keywords = _commentary_keywords.union(['CONTINUE'])
# The default value indicator; may be changed if required by a convention
# (namely HIERARCH cards)
_value_indicator = VALUE_INDICATOR
def __init__(self, keyword=None, value=None, comment=None, **kwargs):
# For backwards compatibility, support the 'key' keyword argument:
if keyword is None and 'key' in kwargs:
keyword = kwargs['key']
self._keyword = None
self._value = None
self._comment = None
self._valuestring = None
self._image = None
# This attribute is set to False when creating the card from a card
# image to ensure that the contents of the image get verified at some
# point
self._verified = True
# A flag to conveniently mark whether or not this was a valid HIERARCH
# card
self._hierarch = False
# If the card could not be parsed according the the FITS standard or
# any recognized non-standard conventions, this will be True
self._invalid = False
self._field_specifier = None
# These are used primarily only by RVKCs
self._rawkeyword = None
self._rawvalue = None
if not (keyword is not None and value is not None and
self._check_if_rvkc(keyword, value)):
# If _check_if_rvkc passes, it will handle setting the keyword and
# value
if keyword is not None:
self.keyword = keyword
if value is not None:
self.value = value
if comment is not None:
self.comment = comment
self._modified = False
self._valuemodified = False
def __repr__(self):
return repr((self.keyword, self.value, self.comment))
def __str__(self):
return self.image
def __len__(self):
return 3
def __getitem__(self, index):
return (self.keyword, self.value, self.comment)[index]
@property
def keyword(self):
"""Returns the keyword name parsed from the card image."""
if self._keyword is not None:
return self._keyword
elif self._image:
self._keyword = self._parse_keyword()
return self._keyword
else:
self.keyword = ''
return ''
@keyword.setter
def keyword(self, keyword):
"""Set the key attribute; once set it cannot be modified."""
if self._keyword is not None:
raise AttributeError(
'Once set, the Card keyword may not be modified')
elif isinstance(keyword, str):
# Be nice and remove trailing whitespace--some FITS code always
# pads keywords out with spaces; leading whitespace, however,
# should be strictly disallowed.
keyword = keyword.rstrip()
keyword_upper = keyword.upper()
if (len(keyword) <= KEYWORD_LENGTH and
self._keywd_FSC_RE.match(keyword_upper)):
# For keywords with length > 8 they will be HIERARCH cards,
# and can have arbitrary case keywords
if keyword_upper == 'END':
raise ValueError("Keyword 'END' not allowed.")
keyword = keyword_upper
elif self._keywd_hierarch_RE.match(keyword):
# In prior versions of PyFITS (*) HIERARCH cards would only be
# created if the user-supplied keyword explicitly started with
# 'HIERARCH '. Now we will create them automatically for long
# keywords, but we still want to support the old behavior too;
# the old behavior makes it possible to create HEIRARCH cards
# that would otherwise be recognized as RVKCs
# (*) This has never affected Astropy, because it was changed
# before PyFITS was merged into Astropy!
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
if keyword_upper[:9] == 'HIERARCH ':
# The user explicitly asked for a HIERARCH card, so don't
# bug them about it...
keyword = keyword[9:].strip()
else:
# We'll gladly create a HIERARCH card, but a warning is
# also displayed
warnings.warn(
'Keyword name {!r} is greater than 8 characters or '
'contains characters not allowed by the FITS '
'standard; a HIERARCH card will be created.'.format(
keyword), VerifyWarning)
else:
raise ValueError('Illegal keyword name: {!r}.'.format(keyword))
self._keyword = keyword
self._modified = True
else:
raise ValueError('Keyword name {!r} is not a string.'.format(keyword))
@property
def value(self):
"""The value associated with the keyword stored in this card."""
if self.field_specifier:
return float(self._value)
if self._value is not None:
value = self._value
elif self._valuestring is not None or self._image:
value = self._value = self._parse_value()
else:
if self._keyword == '':
self._value = value = ''
else:
self._value = value = UNDEFINED
if conf.strip_header_whitespace and isinstance(value, str):
value = value.rstrip()
return value
@value.setter
def value(self, value):
if self._invalid:
raise ValueError(
'The value of invalid/unparseable cards cannot set. Either '
'delete this card from the header or replace it.')
if value is None:
value = UNDEFINED
try:
oldvalue = self.value
except VerifyError:
# probably a parsing error, falling back to the internal _value
# which should be None. This may happen while calling _fix_value.
oldvalue = self._value
if oldvalue is None:
oldvalue = UNDEFINED
if not isinstance(value,
(str, int, float, complex, bool, Undefined,
np.floating, np.integer, np.complexfloating,
np.bool_)):
raise ValueError('Illegal value: {!r}.'.format(value))
if isinstance(value, float) and (np.isnan(value) or np.isinf(value)):
raise ValueError("Floating point {!r} values are not allowed "
"in FITS headers.".format(value))
elif isinstance(value, str):
m = self._ascii_text_re.match(value)
if not m:
raise ValueError(
'FITS header values must contain standard printable ASCII '
'characters; {!r} contains characters not representable in '
'ASCII or non-printable characters.'.format(value))
elif isinstance(value, bytes):
# Allow str, but only if they can be decoded to ASCII text; note
# this is not even allowed on Python 3 since the `bytes` type is
# not included in `str`. Presently we simply don't
# allow bytes to be assigned to headers, as doing so would too
# easily mask potential user error
valid = True
try:
text_value = value.decode('ascii')
except UnicodeDecodeError:
valid = False
else:
# Check against the printable characters regexp as well
m = self._ascii_text_re.match(text_value)
valid = m is not None
if not valid:
raise ValueError(
'FITS header values must contain standard printable ASCII '
'characters; {!r} contains characters/bytes that do not '
'represent printable characters in ASCII.'.format(value))
elif isinstance(value, np.bool_):
value = bool(value)
if (conf.strip_header_whitespace and
(isinstance(oldvalue, str) and isinstance(value, str))):
# Ignore extra whitespace when comparing the new value to the old
different = oldvalue.rstrip() != value.rstrip()
elif isinstance(oldvalue, bool) or isinstance(value, bool):
different = oldvalue is not value
else:
different = (oldvalue != value or
not isinstance(value, type(oldvalue)))
if different:
self._value = value
self._rawvalue = None
self._modified = True
self._valuestring = None
self._valuemodified = True
if self.field_specifier:
try:
self._value = _int_or_float(self._value)
except ValueError:
raise ValueError('value {} is not a float'.format(
self._value))
@value.deleter
def value(self):
if self._invalid:
raise ValueError(
'The value of invalid/unparseable cards cannot deleted. '
'Either delete this card from the header or replace it.')
if not self.field_specifier:
self.value = ''
else:
raise AttributeError('Values cannot be deleted from record-valued '
'keyword cards')
@property
def rawkeyword(self):
"""On record-valued keyword cards this is the name of the standard <= 8
character FITS keyword that this RVKC is stored in. Otherwise it is
the card's normal keyword.
"""
if self._rawkeyword is not None:
return self._rawkeyword
elif self.field_specifier is not None:
self._rawkeyword = self.keyword.split('.', 1)[0]
return self._rawkeyword
else:
return self.keyword
@property
def rawvalue(self):
"""On record-valued keyword cards this is the raw string value in
the ``<field-specifier>: <value>`` format stored in the card in order
to represent a RVKC. Otherwise it is the card's normal value.
"""
if self._rawvalue is not None:
return self._rawvalue
elif self.field_specifier is not None:
self._rawvalue = '{}: {}'.format(self.field_specifier, self.value)
return self._rawvalue
else:
return self.value
@property
def comment(self):
"""Get the comment attribute from the card image if not already set."""
if self._comment is not None:
return self._comment
elif self._image:
self._comment = self._parse_comment()
return self._comment
else:
self._comment = ''
return ''
@comment.setter
def comment(self, comment):
if self._invalid:
raise ValueError(
'The comment of invalid/unparseable cards cannot set. Either '
'delete this card from the header or replace it.')
if comment is None:
comment = ''
if isinstance(comment, str):
m = self._ascii_text_re.match(comment)
if not m:
raise ValueError(
'FITS header comments must contain standard printable '
'ASCII characters; {!r} contains characters not '
'representable in ASCII or non-printable characters.'
.format(comment))
try:
oldcomment = self.comment
except VerifyError:
# probably a parsing error, falling back to the internal _comment
# which should be None.
oldcomment = self._comment
if oldcomment is None:
oldcomment = ''
if comment != oldcomment:
self._comment = comment
self._modified = True
@comment.deleter
def comment(self):
if self._invalid:
raise ValueError(
'The comment of invalid/unparseable cards cannot deleted. '
'Either delete this card from the header or replace it.')
self.comment = ''
@property
def field_specifier(self):
"""
The field-specifier of record-valued keyword cards; always `None` on
normal cards.
"""
# Ensure that the keyword exists and has been parsed--the will set the
# internal _field_specifier attribute if this is a RVKC.
if self.keyword:
return self._field_specifier
else:
return None
@field_specifier.setter
def field_specifier(self, field_specifier):
if not field_specifier:
raise ValueError('The field-specifier may not be blank in '
'record-valued keyword cards.')
elif not self.field_specifier:
raise AttributeError('Cannot coerce cards to be record-valued '
'keyword cards by setting the '
'field_specifier attribute')
elif field_specifier != self.field_specifier:
self._field_specifier = field_specifier
# The keyword need also be updated
keyword = self._keyword.split('.', 1)[0]
self._keyword = '.'.join([keyword, field_specifier])
self._modified = True
@field_specifier.deleter
def field_specifier(self):
raise AttributeError('The field_specifier attribute may not be '
'deleted from record-valued keyword cards.')
@property
def image(self):
"""
The card "image", that is, the 80 byte character string that represents
this card in an actual FITS header.
"""
if self._image and not self._verified:
self.verify('fix+warn')
if self._image is None or self._modified:
self._image = self._format_image()
return self._image
@property
def is_blank(self):
"""
`True` if the card is completely blank--that is, it has no keyword,
value, or comment. It appears in the header as 80 spaces.
Returns `False` otherwise.
"""
if not self._verified:
# The card image has not been parsed yet; compare directly with the
# string representation of a blank card
return self._image == BLANK_CARD
# If the keyword, value, and comment are all empty (for self.value
# explicitly check that it is a string value, since a blank value is
# returned as '')
return (not self.keyword and
(isinstance(self.value, str) and not self.value) and
not self.comment)
@classmethod
def fromstring(cls, image):
"""
Construct a `Card` object from a (raw) string. It will pad the string
if it is not the length of a card image (80 columns). If the card
image is longer than 80 columns, assume it contains ``CONTINUE``
card(s).
"""
card = cls()
if isinstance(image, bytes):
# FITS supports only ASCII, but decode as latin1 and just take all
# bytes for now; if it results in mojibake due to e.g. UTF-8
# encoded data in a FITS header that's OK because it shouldn't be
# there in the first place
image = image.decode('latin1')
card._image = _pad(image)
card._verified = False
return card
@classmethod
def normalize_keyword(cls, keyword):
"""
`classmethod` to convert a keyword value that may contain a
field-specifier to uppercase. The effect is to raise the key to
uppercase and leave the field specifier in its original case.
Parameters
----------
keyword : or str
A keyword value or a ``keyword.field-specifier`` value
"""
# Test first for the most common case: a standard FITS keyword provided
# in standard all-caps
if (len(keyword) <= KEYWORD_LENGTH and
cls._keywd_FSC_RE.match(keyword)):
return keyword
# Test if this is a record-valued keyword
match = cls._rvkc_keyword_name_RE.match(keyword)
if match:
return '.'.join((match.group('keyword').strip().upper(),
match.group('field_specifier')))
elif len(keyword) > 9 and keyword[:9].upper() == 'HIERARCH ':
# Remove 'HIERARCH' from HIERARCH keywords; this could lead to
# ambiguity if there is actually a keyword card containing
# "HIERARCH HIERARCH", but shame on you if you do that.
return keyword[9:].strip().upper()
else:
# A normal FITS keyword, but provided in non-standard case
return keyword.strip().upper()
def _check_if_rvkc(self, *args):
"""
Determine whether or not the card is a record-valued keyword card.
If one argument is given, that argument is treated as a full card image
and parsed as such. If two arguments are given, the first is treated
as the card keyword (including the field-specifier if the card is
intended as a RVKC), and the second as the card value OR the first value
can be the base keyword, and the second value the 'field-specifier:
value' string.
If the check passes the ._keyword, ._value, and .field_specifier
keywords are set.
Examples
--------
::
self._check_if_rvkc('DP1', 'AXIS.1: 2')
self._check_if_rvkc('DP1.AXIS.1', 2)
self._check_if_rvkc('DP1 = AXIS.1: 2')
"""
if not conf.enable_record_valued_keyword_cards:
return False
if len(args) == 1:
return self._check_if_rvkc_image(*args)
elif len(args) == 2:
keyword, value = args
if not isinstance(keyword, str):
return False
if keyword in self._commentary_keywords:
return False
match = self._rvkc_keyword_name_RE.match(keyword)
if match and isinstance(value, (int, float)):
self._init_rvkc(match.group('keyword'),
match.group('field_specifier'), None, value)
return True
# Testing for ': ' is a quick way to avoid running the full regular
# expression, speeding this up for the majority of cases
if isinstance(value, str) and value.find(': ') > 0:
match = self._rvkc_field_specifier_val_RE.match(value)
if match and self._keywd_FSC_RE.match(keyword):
self._init_rvkc(keyword, match.group('keyword'), value,
match.group('val'))
return True
def _check_if_rvkc_image(self, *args):
"""
Implements `Card._check_if_rvkc` for the case of an unparsed card
image. If given one argument this is the full intact image. If given
two arguments the card has already been split between keyword and
value+comment at the standard value indicator '= '.
"""
if len(args) == 1:
image = args[0]
eq_idx = image.find(VALUE_INDICATOR)
if eq_idx < 0 or eq_idx > 9:
return False
keyword = image[:eq_idx]
rest = image[eq_idx + VALUE_INDICATOR_LEN:]
else:
keyword, rest = args
rest = rest.lstrip()
# This test allows us to skip running the full regular expression for
# the majority of cards that do not contain strings or that definitely
# do not contain RVKC field-specifiers; it's very much a
# micro-optimization but it does make a measurable difference
if not rest or rest[0] != "'" or rest.find(': ') < 2:
return False
match = self._rvkc_keyword_val_comm_RE.match(rest)
if match:
self._init_rvkc(keyword, match.group('keyword'),
match.group('rawval'), match.group('val'))
return True
def _init_rvkc(self, keyword, field_specifier, field, value):
"""
Sort of addendum to Card.__init__ to set the appropriate internal
attributes if the card was determined to be a RVKC.
"""
keyword_upper = keyword.upper()
self._keyword = '.'.join((keyword_upper, field_specifier))
self._rawkeyword = keyword_upper
self._field_specifier = field_specifier
self._value = _int_or_float(value)
self._rawvalue = field
def _parse_keyword(self):
keyword = self._image[:KEYWORD_LENGTH].strip()
keyword_upper = keyword.upper()
if keyword_upper in self._special_keywords:
return keyword_upper
elif (keyword_upper == 'HIERARCH' and self._image[8] == ' ' and
HIERARCH_VALUE_INDICATOR in self._image):
# This is valid HIERARCH card as described by the HIERARCH keyword
# convention:
# http://fits.gsfc.nasa.gov/registry/hierarch_keyword.html
self._hierarch = True
self._value_indicator = HIERARCH_VALUE_INDICATOR
keyword = self._image.split(HIERARCH_VALUE_INDICATOR, 1)[0][9:]
return keyword.strip()
else:
val_ind_idx = self._image.find(VALUE_INDICATOR)
if 0 <= val_ind_idx <= KEYWORD_LENGTH:
# The value indicator should appear in byte 8, but we are
# flexible and allow this to be fixed
if val_ind_idx < KEYWORD_LENGTH:
keyword = keyword[:val_ind_idx]
keyword_upper = keyword_upper[:val_ind_idx]
rest = self._image[val_ind_idx + VALUE_INDICATOR_LEN:]
# So far this looks like a standard FITS keyword; check whether
# the value represents a RVKC; if so then we pass things off to
# the RVKC parser
if self._check_if_rvkc_image(keyword, rest):
return self._keyword
return keyword_upper
else:
warnings.warn(
'The following header keyword is invalid or follows an '
'unrecognized non-standard convention:\n{}'
.format(self._image), AstropyUserWarning)
self._invalid = True
return keyword
def _parse_value(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# Likewise for invalid cards
if self.keyword.upper() in self._commentary_keywords or self._invalid:
return self._image[KEYWORD_LENGTH:].rstrip()
if self._check_if_rvkc(self._image):
return self._value
if len(self._image) > self.length:
values = []
for card in self._itersubcards():
value = card.value.rstrip().replace("''", "'")
if value and value[-1] == '&':
value = value[:-1]
values.append(value)
value = ''.join(values)
self._valuestring = value
return value
m = self._value_NFSC_RE.match(self._split()[1])
if m is None:
raise VerifyError("Unparsable card ({}), fix it first with "
".verify('fix').".format(self.keyword))
if m.group('bool') is not None:
value = m.group('bool') == 'T'
elif m.group('strg') is not None:
value = re.sub("''", "'", m.group('strg'))
elif m.group('numr') is not None:
# Check for numbers with leading 0s.
numr = self._number_NFSC_RE.match(m.group('numr'))
digt = translate(numr.group('digt'), FIX_FP_TABLE2, ' ')
if numr.group('sign') is None:
sign = ''
else:
sign = numr.group('sign')
value = _str_to_num(sign + digt)
elif m.group('cplx') is not None:
# Check for numbers with leading 0s.
real = self._number_NFSC_RE.match(m.group('real'))
rdigt = translate(real.group('digt'), FIX_FP_TABLE2, ' ')
if real.group('sign') is None:
rsign = ''
else:
rsign = real.group('sign')
value = _str_to_num(rsign + rdigt)
imag = self._number_NFSC_RE.match(m.group('imag'))
idigt = translate(imag.group('digt'), FIX_FP_TABLE2, ' ')
if imag.group('sign') is None:
isign = ''
else:
isign = imag.group('sign')
value += _str_to_num(isign + idigt) * 1j
else:
value = UNDEFINED
if not self._valuestring:
self._valuestring = m.group('valu')
return value
def _parse_comment(self):
"""Extract the keyword value from the card image."""
# for commentary cards, no need to parse further
# likewise for invalid/unparseable cards
if self.keyword in Card._commentary_keywords or self._invalid:
return ''
if len(self._image) > self.length:
comments = []
for card in self._itersubcards():
if card.comment:
comments.append(card.comment)
comment = '/ ' + ' '.join(comments).rstrip()
m = self._value_NFSC_RE.match(comment)
else:
m = self._value_NFSC_RE.match(self._split()[1])
if m is not None:
comment = m.group('comm')
if comment:
return comment.rstrip()
return ''
def _split(self):
"""
Split the card image between the keyword and the rest of the card.
"""
if self._image is not None:
# If we already have a card image, don't try to rebuild a new card
# image, which self.image would do
image = self._image
else:
image = self.image
if self.keyword in self._special_keywords:
keyword, valuecomment = image.split(' ', 1)
else:
try:
delim_index = image.index(self._value_indicator)
except ValueError:
delim_index = None
# The equal sign may not be any higher than column 10; anything
# past that must be considered part of the card value
if delim_index is None:
keyword = image[:KEYWORD_LENGTH]
valuecomment = image[KEYWORD_LENGTH:]
elif delim_index > 10 and image[:9] != 'HIERARCH ':
keyword = image[:8]
valuecomment = image[8:]
else:
keyword, valuecomment = image.split(self._value_indicator, 1)
return keyword.strip(), valuecomment.strip()
def _fix_keyword(self):
if self.field_specifier:
keyword, field_specifier = self._keyword.split('.', 1)
self._keyword = '.'.join([keyword.upper(), field_specifier])
else:
self._keyword = self._keyword.upper()
self._modified = True
def _fix_value(self):
"""Fix the card image for fixable non-standard compliance."""
value = None
keyword, valuecomment = self._split()
m = self._value_NFSC_RE.match(valuecomment)
# for the unparsable case
if m is None:
try:
value, comment = valuecomment.split('/', 1)
self.value = value.strip()
self.comment = comment.strip()
except (ValueError, IndexError):
self.value = valuecomment
self._valuestring = self._value
return
elif m.group('numr') is not None:
numr = self._number_NFSC_RE.match(m.group('numr'))
value = translate(numr.group('digt'), FIX_FP_TABLE, ' ')
if numr.group('sign') is not None:
value = numr.group('sign') + value
elif m.group('cplx') is not None:
real = self._number_NFSC_RE.match(m.group('real'))
rdigt = translate(real.group('digt'), FIX_FP_TABLE, ' ')
if real.group('sign') is not None:
rdigt = real.group('sign') + rdigt
imag = self._number_NFSC_RE.match(m.group('imag'))
idigt = translate(imag.group('digt'), FIX_FP_TABLE, ' ')
if imag.group('sign') is not None:
idigt = imag.group('sign') + idigt
value = '({}, {})'.format(rdigt, idigt)
self._valuestring = value
# The value itself has not been modified, but its serialized
# representation (as stored in self._valuestring) has been changed, so
# still set this card as having been modified (see ticket #137)
self._modified = True
def _format_keyword(self):
if self.keyword:
if self.field_specifier:
return '{:{len}}'.format(self.keyword.split('.', 1)[0],
len=KEYWORD_LENGTH)
elif self._hierarch:
return 'HIERARCH {} '.format(self.keyword)
else:
return '{:{len}}'.format(self.keyword, len=KEYWORD_LENGTH)
else:
return ' ' * KEYWORD_LENGTH
def _format_value(self):
# value string
float_types = (float, np.floating, complex, np.complexfloating)
# Force the value to be parsed out first
value = self.value
# But work with the underlying raw value instead (to preserve
# whitespace, for now...)
value = self._value
if self.keyword in self._commentary_keywords:
# The value of a commentary card must be just a raw unprocessed
# string
value = str(value)
elif (self._valuestring and not self._valuemodified and
isinstance(self.value, float_types)):
# Keep the existing formatting for float/complex numbers
value = '{:>20}'.format(self._valuestring)
elif self.field_specifier:
value = _format_value(self._value).strip()
value = "'{}: {}'".format(self.field_specifier, value)
else:
value = _format_value(value)
# For HIERARCH cards the value should be shortened to conserve space
if not self.field_specifier and len(self.keyword) > KEYWORD_LENGTH:
value = value.strip()
return value
def _format_comment(self):
if not self.comment:
return ''
else:
return ' / {}'.format(self._comment)
def _format_image(self):
keyword = self._format_keyword()
value = self._format_value()
is_commentary = keyword.strip() in self._commentary_keywords
if is_commentary:
comment = ''
else:
comment = self._format_comment()
# equal sign string
# by default use the standard value indicator even for HIERARCH cards;
# later we may abbreviate it if necessary
delimiter = VALUE_INDICATOR
if is_commentary:
delimiter = ''
# put all parts together
output = ''.join([keyword, delimiter, value, comment])
# For HIERARCH cards we can save a bit of space if necessary by
# removing the space between the keyword and the equals sign; I'm
# guessing this is part of the HIEARCH card specification
keywordvalue_length = len(keyword) + len(delimiter) + len(value)
if (keywordvalue_length > self.length and
keyword.startswith('HIERARCH')):
if (keywordvalue_length == self.length + 1 and keyword[-1] == ' '):
output = ''.join([keyword[:-1], delimiter, value, comment])
else:
# I guess the HIERARCH card spec is incompatible with CONTINUE
# cards
raise ValueError('The header keyword {!r} with its value is '
'too long'.format(self.keyword))
if len(output) <= self.length:
output = '{:80}'.format(output)
else:
# longstring case (CONTINUE card)
# try not to use CONTINUE if the string value can fit in one line.
# Instead, just truncate the comment
if (isinstance(self.value, str) and
len(value) > (self.length - 10)):
output = self._format_long_image()
else:
warnings.warn('Card is too long, comment will be truncated.',
VerifyWarning)
output = output[:Card.length]
return output
def _format_long_image(self):
"""
Break up long string value/comment into ``CONTINUE`` cards.
This is a primitive implementation: it will put the value
string in one block and the comment string in another. Also,
it does not break at the blank space between words. So it may
not look pretty.
"""
if self.keyword in Card._commentary_keywords:
return self._format_long_commentary_image()
value_length = 67
comment_length = 64
output = []
# do the value string
value = self._value.replace("'", "''")
words = _words_group(value, value_length)
for idx, word in enumerate(words):
if idx == 0:
headstr = '{:{len}}= '.format(self.keyword, len=KEYWORD_LENGTH)
else:
headstr = 'CONTINUE '
# If this is the final CONTINUE remove the '&'
if not self.comment and idx == len(words) - 1:
value_format = "'{}'"
else:
value_format = "'{}&'"
value = value_format.format(word)
output.append('{:80}'.format(headstr + value))
# do the comment string
comment_format = "{}"
if self.comment:
words = _words_group(self.comment, comment_length)
for idx, word in enumerate(words):
# If this is the final CONTINUE remove the '&'
if idx == len(words) - 1:
headstr = "CONTINUE '' / "
else:
headstr = "CONTINUE '&' / "
comment = headstr + comment_format.format(word)
output.append('{:80}'.format(comment))
return ''.join(output)
def _format_long_commentary_image(self):
"""
If a commentary card's value is too long to fit on a single card, this
will render the card as multiple consecutive commentary card of the
same type.
"""
maxlen = Card.length - KEYWORD_LENGTH
value = self._format_value()
output = []
idx = 0
while idx < len(value):
output.append(str(Card(self.keyword, value[idx:idx + maxlen])))
idx += maxlen
return ''.join(output)
def _verify(self, option='warn'):
self._verified = True
errs = _ErrList([])
fix_text = ('Fixed {!r} card to meet the FITS '
'standard.'.format(self.keyword))
# Don't try to verify cards that already don't meet any recognizable
# standard
if self._invalid:
return errs
# verify the equal sign position
if (self.keyword not in self._commentary_keywords and
(self._image and self._image[:9].upper() != 'HIERARCH ' and
self._image.find('=') != 8)):
errs.append(self.run_option(
option,
err_text='Card {!r} is not FITS standard (equal sign not '
'at column 8).'.format(self.keyword),
fix_text=fix_text,
fix=self._fix_value))
# verify the key, it is never fixable
# always fix silently the case where "=" is before column 9,
# since there is no way to communicate back to the _keys.
if ((self._image and self._image[:8].upper() == 'HIERARCH') or
self._hierarch):
pass
else:
if self._image:
# PyFITS will auto-uppercase any standard keyword, so lowercase
# keywords can only occur if they came from the wild
keyword = self._split()[0]
if keyword != keyword.upper():
# Keyword should be uppercase unless it's a HIERARCH card
errs.append(self.run_option(
option,
err_text='Card keyword {!r} is not upper case.'.format(
keyword),
fix_text=fix_text,
fix=self._fix_keyword))
keyword = self.keyword
if self.field_specifier:
keyword = keyword.split('.', 1)[0]
if not self._keywd_FSC_RE.match(keyword):
errs.append(self.run_option(
option,
err_text='Illegal keyword name {!r}'.format(keyword),
fixable=False))
# verify the value, it may be fixable
keyword, valuecomment = self._split()
if self.keyword in self._commentary_keywords:
# For commentary keywords all that needs to be ensured is that it
# contains only printable ASCII characters
if not self._ascii_text_re.match(valuecomment):
errs.append(self.run_option(
option,
err_text='Unprintable string {!r}; commentary cards may '
'only contain printable ASCII characters'.format(
valuecomment),
fixable=False))
else:
m = self._value_FSC_RE.match(valuecomment)
if not m:
errs.append(self.run_option(
option,
err_text='Card {!r} is not FITS standard (invalid value '
'string: {!r}).'.format(self.keyword, valuecomment),
fix_text=fix_text,
fix=self._fix_value))
# verify the comment (string), it is never fixable
m = self._value_NFSC_RE.match(valuecomment)
if m is not None:
comment = m.group('comm')
if comment is not None:
if not self._ascii_text_re.match(comment):
errs.append(self.run_option(
option,
err_text=('Unprintable string {!r}; header comments '
'may only contain printable ASCII '
'characters'.format(comment)),
fixable=False))
return errs
def _itersubcards(self):
"""
If the card image is greater than 80 characters, it should consist of a
normal card followed by one or more CONTINUE card. This method returns
the subcards that make up this logical card.
"""
ncards = len(self._image) // Card.length
for idx in range(0, Card.length * ncards, Card.length):
card = Card.fromstring(self._image[idx:idx + Card.length])
if idx > 0 and card.keyword.upper() != 'CONTINUE':
raise VerifyError(
'Long card images must have CONTINUE cards after '
'the first card.')
if not isinstance(card.value, str):
raise VerifyError('CONTINUE cards must have string values.')
yield card
def _int_or_float(s):
"""
Converts an a string to an int if possible, or to a float.
If the string is neither a string or a float a value error is raised.
"""
if isinstance(s, float):
# Already a float so just pass through
return s
try:
return int(s)
except (ValueError, TypeError):
try:
return float(s)
except (ValueError, TypeError) as e:
raise ValueError(str(e))
def _format_value(value):
"""
Converts a card value to its appropriate string representation as
defined by the FITS format.
"""
# string value should occupies at least 8 columns, unless it is
# a null string
if isinstance(value, str):
if value == '':
return "''"
else:
exp_val_str = value.replace("'", "''")
val_str = "'{:8}'".format(exp_val_str)
return '{:20}'.format(val_str)
# must be before int checking since bool is also int
elif isinstance(value, (bool, np.bool_)):
return '{:>20}'.format(repr(value)[0]) # T or F
elif _is_int(value):
return '{:>20d}'.format(value)
elif isinstance(value, (float, np.floating)):
return '{:>20}'.format(_format_float(value))
elif isinstance(value, (complex, np.complexfloating)):
val_str = '({}, {})'.format(_format_float(value.real),
_format_float(value.imag))
return '{:>20}'.format(val_str)
elif isinstance(value, Undefined):
return ''
else:
return ''
def _format_float(value):
"""Format a floating number to make sure it gets the decimal point."""
value_str = '{:.16G}'.format(value)
if '.' not in value_str and 'E' not in value_str:
value_str += '.0'
elif 'E' in value_str:
# On some Windows builds of Python (and possibly other platforms?) the
# exponent is zero-padded out to, it seems, three digits. Normalize
# the format to pad only to two digits.
significand, exponent = value_str.split('E')
if exponent[0] in ('+', '-'):
sign = exponent[0]
exponent = exponent[1:]
else:
sign = ''
value_str = '{}E{}{:02d}'.format(significand, sign, int(exponent))
# Limit the value string to at most 20 characters.
str_len = len(value_str)
if str_len > 20:
idx = value_str.find('E')
if idx < 0:
value_str = value_str[:20]
else:
value_str = value_str[:20 - (str_len - idx)] + value_str[idx:]
return value_str
def _pad(input):
"""Pad blank space to the input string to be multiple of 80."""
_len = len(input)
if _len == Card.length:
return input
elif _len > Card.length:
strlen = _len % Card.length
if strlen == 0:
return input
else:
return input + ' ' * (Card.length - strlen)
# minimum length is 80
else:
strlen = _len % Card.length
return input + ' ' * (Card.length - strlen)
|
18965e4b231adee9211af0babc5e2fc9ff50ac7fd0fc2bfb39950f3a5f260a05 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module handles the conversion of various VOTABLE datatypes
to/from TABLEDATA_ and BINARY_ formats.
"""
# STDLIB
import re
import sys
from struct import unpack as _struct_unpack
from struct import pack as _struct_pack
# THIRD-PARTY
import numpy as np
from numpy import ma
# ASTROPY
from astropy.utils.xml.writer import xml_escape_cdata
# LOCAL
from .exceptions import (vo_raise, vo_warn, warn_or_raise, W01,
W30, W31, W39, W46, W47, W49, W51, E01, E02, E03, E04, E05, E06)
__all__ = ['get_converter', 'Converter', 'table_column_to_votable_datatype']
pedantic_array_splitter = re.compile(r" +")
array_splitter = re.compile(r"\s+|(?:\s*,\s*)")
"""
A regex to handle splitting values on either whitespace or commas.
SPEC: Usage of commas is not actually allowed by the spec, but many
files in the wild use them.
"""
_zero_int = b'\0\0\0\0'
_empty_bytes = b''
_zero_byte = b'\0'
struct_unpack = _struct_unpack
struct_pack = _struct_pack
if sys.byteorder == 'little':
def _ensure_bigendian(x):
if x.dtype.byteorder != '>':
return x.byteswap()
return x
else:
def _ensure_bigendian(x):
if x.dtype.byteorder == '<':
return x.byteswap()
return x
def _make_masked_array(data, mask):
"""
Masked arrays of zero length that also have a mask of zero length
cause problems in Numpy (at least in 1.6.2). This function
creates a masked array from data and a mask, unless it is zero
length.
"""
# np.ma doesn't like setting mask to []
if len(data):
return ma.array(
np.array(data),
mask=np.array(mask, dtype='bool'))
else:
return ma.array(np.array(data))
def bitarray_to_bool(data, length):
"""
Converts a bit array (a string of bits in a bytes object) to a
boolean Numpy array.
Parameters
----------
data : bytes
The bit array. The most significant byte is read first.
length : int
The number of bits to read. The least significant bits in the
data bytes beyond length will be ignored.
Returns
-------
array : numpy bool array
"""
results = []
for byte in data:
for bit_no in range(7, -1, -1):
bit = byte & (1 << bit_no)
bit = (bit != 0)
results.append(bit)
if len(results) == length:
break
if len(results) == length:
break
return np.array(results, dtype='b1')
def bool_to_bitarray(value):
"""
Converts a numpy boolean array to a bit array (a string of bits in
a bytes object).
Parameters
----------
value : numpy bool array
Returns
-------
bit_array : bytes
The first value in the input array will be the most
significant bit in the result. The length will be `floor((N +
7) / 8)` where `N` is the length of `value`.
"""
value = value.flat
bit_no = 7
byte = 0
bytes = []
for v in value:
if v:
byte |= 1 << bit_no
if bit_no == 0:
bytes.append(byte)
bit_no = 7
byte = 0
else:
bit_no -= 1
if bit_no != 7:
bytes.append(byte)
return struct_pack("{}B".format(len(bytes)), *bytes)
class Converter:
"""
The base class for all converters. Each subclass handles
converting a specific VOTABLE data type to/from the TABLEDATA_ and
BINARY_ on-disk representations.
Parameters
----------
field : `~astropy.io.votable.tree.Field`
object describing the datatype
config : dict
The parser configuration dictionary
pos : tuple
The position in the XML file where the FIELD object was
found. Used for error messages.
"""
def __init__(self, field, config=None, pos=None):
pass
@staticmethod
def _parse_length(read):
return struct_unpack(">I", read(4))[0]
@staticmethod
def _write_length(length):
return struct_pack(">I", int(length))
def supports_empty_values(self, config):
"""
Returns True when the field can be completely empty.
"""
return config.get('version_1_3_or_later')
def parse(self, value, config=None, pos=None):
"""
Convert the string *value* from the TABLEDATA_ format into an
object with the correct native in-memory datatype and mask flag.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : tuple (value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
raise NotImplementedError(
"This datatype must implement a 'parse' method.")
def parse_scalar(self, value, config=None, pos=None):
"""
Parse a single scalar of the underlying type of the converter.
For non-array converters, this is equivalent to parse. For
array converters, this is used to parse a single
element of the array.
Parameters
----------
value : str
value in TABLEDATA format
Returns
-------
native : tuple (value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
return self.parse(value, config, pos)
def output(self, value, mask):
"""
Convert the object *value* (in the native in-memory datatype)
to a unicode string suitable for serializing in the TABLEDATA_
format.
Parameters
----------
value : native type corresponding to this converter
The value
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
tabledata_repr : unicode
"""
raise NotImplementedError(
"This datatype must implement a 'output' method.")
def binparse(self, read):
"""
Reads some number of bytes from the BINARY_ format
representation by calling the function *read*, and returns the
native in-memory object representation for the datatype
handled by *self*.
Parameters
----------
read : function
A function that given a number of bytes, returns a byte
string.
Returns
-------
native : tuple (value, mask)
The value as a Numpy array or scalar, and *mask* is True
if the value is missing.
"""
raise NotImplementedError(
"This datatype must implement a 'binparse' method.")
def binoutput(self, value, mask):
"""
Convert the object *value* in the native in-memory datatype to
a string of bytes suitable for serialization in the BINARY_
format.
Parameters
----------
value : native type corresponding to this converter
The value
mask : bool
If `True`, will return the string representation of a
masked value.
Returns
-------
bytes : byte string
The binary representation of the value, suitable for
serialization in the BINARY_ format.
"""
raise NotImplementedError(
"This datatype must implement a 'binoutput' method.")
class Char(Converter):
"""
Handles the char datatype. (7-bit unsigned characters)
Missing values are not handled for string or unicode types.
"""
default = _empty_bytes
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = '1'
if field.arraysize == '*':
self.format = 'O'
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = '*'
else:
if field.arraysize.endswith('*'):
field.arraysize = field.arraysize[:-1]
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, 'char', field.ID), config)
self.format = 'S{:d}'.format(self.arraysize)
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = ">{:d}s".format(self.arraysize)
if config.get('verify', 'ignore') == 'exception':
self.parse = self._ascii_parse
else:
self.parse = self._str_parse
def supports_empty_values(self, config):
return True
def _ascii_parse(self, value, config=None, pos=None):
if self.arraysize != '*' and len(value) > self.arraysize:
vo_warn(W46, ('char', self.arraysize), config, pos)
return value.encode('ascii'), False
def _str_parse(self, value, config=None, pos=None):
if self.arraysize != '*' and len(value) > self.arraysize:
vo_warn(W46, ('char', self.arraysize), config, pos)
return value.encode('utf-8'), False
def output(self, value, mask):
if mask:
return ''
if not isinstance(value, str):
value = value.decode('ascii')
return xml_escape_cdata(value)
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize))[0]
end = s.find(_zero_byte)
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == '':
return _zero_int
return self._write_length(len(value)) + value
def _binoutput_fixed(self, value, mask):
if mask:
value = _empty_bytes
return struct_pack(self._struct_format, value)
class UnicodeChar(Converter):
"""
Handles the unicodeChar data type. UTF-16-BE.
Missing values are not handled for string or unicode types.
"""
default = ''
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
if field.arraysize is None:
vo_warn(W47, (), config, pos)
field.arraysize = '1'
if field.arraysize == '*':
self.format = 'O'
self.binparse = self._binparse_var
self.binoutput = self._binoutput_var
self.arraysize = '*'
else:
try:
self.arraysize = int(field.arraysize)
except ValueError:
vo_raise(E01, (field.arraysize, 'unicode', field.ID), config)
self.format = 'U{:d}'.format(self.arraysize)
self.binparse = self._binparse_fixed
self.binoutput = self._binoutput_fixed
self._struct_format = ">{:d}s".format(self.arraysize * 2)
def parse(self, value, config=None, pos=None):
if self.arraysize != '*' and len(value) > self.arraysize:
vo_warn(W46, ('unicodeChar', self.arraysize), config, pos)
return value, False
def output(self, value, mask):
if mask:
return ''
return xml_escape_cdata(str(value))
def _binparse_var(self, read):
length = self._parse_length(read)
return read(length * 2).decode('utf_16_be'), False
def _binparse_fixed(self, read):
s = struct_unpack(self._struct_format, read(self.arraysize * 2))[0]
s = s.decode('utf_16_be')
end = s.find('\0')
if end != -1:
return s[:end], False
return s, False
def _binoutput_var(self, value, mask):
if mask or value is None or value == '':
return _zero_int
encoded = value.encode('utf_16_be')
return self._write_length(len(encoded) / 2) + encoded
def _binoutput_fixed(self, value, mask):
if mask:
value = ''
return struct_pack(self._struct_format, value.encode('utf_16_be'))
class Array(Converter):
"""
Handles both fixed and variable-lengths arrays.
"""
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Converter.__init__(self, field, config, pos)
if config.get('verify', 'ignore') == 'exception':
self._splitter = self._splitter_pedantic
else:
self._splitter = self._splitter_lax
def parse_scalar(self, value, config=None, pos=0):
return self._base.parse_scalar(value, config, pos)
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return pedantic_array_splitter.split(value)
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if ',' in value:
vo_warn(W01, (), config, pos)
return array_splitter.split(value)
class VarArray(Array):
"""
Handles variable lengths arrays (i.e. where *arraysize* is '*').
"""
format = 'O'
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config)
self._base = base
self.default = np.array([], dtype=self._base.format)
def output(self, value, mask):
output = self._base.output
result = [output(x, m) for x, m in np.broadcast(value, mask)]
return ' '.join(result)
def binparse(self, read):
length = self._parse_length(read)
result = []
result_mask = []
binparse = self._base.binparse
for i in range(length):
val, mask = binparse(read)
result.append(val)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
def binoutput(self, value, mask):
if value is None or len(value) == 0:
return _zero_int
length = len(value)
result = [self._write_length(length)]
binoutput = self._base.binoutput
for x, m in zip(value, value.mask):
result.append(binoutput(x, m))
return _empty_bytes.join(result)
class ArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays, i.e. where *arraysize*
ends in '*'.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), False
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i:i+items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ScalarVarArray(VarArray):
"""
Handles a variable-length array of numeric scalars.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), False
parts = self._splitter(value, config, pos)
parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = parse(x, config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class NumericArray(Array):
"""
Handles a fixed-length array of numeric scalars.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
Array.__init__(self, field, config, pos)
self._base = base
self._arraysize = arraysize
self.format = "{}{}".format(tuple(arraysize), base.format)
self._items = 1
for dim in arraysize:
self._items *= dim
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = '>' + self.format
self.default = np.empty(arraysize, dtype=self._base.format)
self.default[...] = self._base.default
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
elif config['version_1_3_or_later'] and value == '':
return np.zeros(self._arraysize, dtype=self._base.format), True
parts = self._splitter(value, config, pos)
if len(parts) != self._items:
warn_or_raise(E02, E02, (self._items, len(parts)), config, pos)
if config.get('verify', 'ignore') == 'exception':
return self.parse_parts(parts, config, pos)
else:
if len(parts) == self._items:
pass
elif len(parts) > self._items:
parts = parts[:self._items]
else:
parts = (parts +
([self._base.default] * (self._items - len(parts))))
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
base_parse = self._base.parse
result = []
result_mask = []
for x in parts:
value, mask = base_parse(x, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype=self._base.format).reshape(
self._arraysize)
result_mask = np.array(result_mask, dtype='bool').reshape(
self._arraysize)
return result, result_mask
def output(self, value, mask):
base_output = self._base.output
value = np.asarray(value)
mask = np.asarray(mask)
return ' '.join(base_output(x, m) for x, m in
zip(value.flat, mask.flat))
def binparse(self, read):
result = np.frombuffer(read(self._memsize),
dtype=self._bigendian_format)[0]
result_mask = self._base.is_null(result)
return result, result_mask
def binoutput(self, value, mask):
filtered = self._base.filter_array(value, mask)
filtered = _ensure_bigendian(filtered)
return filtered.tostring()
class Numeric(Converter):
"""
The base class for all numeric data types.
"""
array_type = NumericArray
vararray_type = ScalarVarArray
null = None
def __init__(self, field, config=None, pos=None):
Converter.__init__(self, field, config, pos)
self._memsize = np.dtype(self.format).itemsize
self._bigendian_format = '>' + self.format
if field.values.null is not None:
self.null = np.asarray(field.values.null, dtype=self.format)
self.default = self.null
self.is_null = self._is_null
else:
self.is_null = np.isnan
def binparse(self, read):
result = np.frombuffer(read(self._memsize),
dtype=self._bigendian_format)
return result[0], self.is_null(result[0])
def _is_null(self, value):
return value == self.null
class FloatingPoint(Numeric):
"""
The base class for floating-point datatypes.
"""
default = np.nan
def __init__(self, field, config=None, pos=None):
if config is None:
config = {}
Numeric.__init__(self, field, config, pos)
precision = field.precision
width = field.width
if precision is None:
format_parts = ['{!r:>']
else:
format_parts = ['{:']
if width is not None:
format_parts.append(str(width))
if precision is not None:
if precision.startswith("E"):
format_parts.append('.{:d}g'.format(int(precision[1:])))
elif precision.startswith("F"):
format_parts.append('.{:d}f'.format(int(precision[1:])))
else:
format_parts.append('.{:d}f'.format(int(precision)))
format_parts.append('}')
self._output_format = ''.join(format_parts)
self.nan = np.array(np.nan, self.format)
if self.null is None:
self._null_output = 'NaN'
self._null_binoutput = self.binoutput(self.nan, False)
self.filter_array = self._filter_nan
else:
self._null_output = self.output(np.asarray(self.null), False)
self._null_binoutput = self.binoutput(np.asarray(self.null), False)
self.filter_array = self._filter_null
if config.get('verify', 'ignore') == 'exception':
self.parse = self._parse_pedantic
else:
self.parse = self._parse_permissive
def supports_empty_values(self, config):
return True
def _parse_pedantic(self, value, config=None, pos=None):
if value.strip() == '':
return self.null, True
f = float(value)
return f, self.is_null(f)
def _parse_permissive(self, value, config=None, pos=None):
try:
f = float(value)
return f, self.is_null(f)
except ValueError:
# IRSA VOTables use the word 'null' to specify empty values,
# but this is not defined in the VOTable spec.
if value.strip() != '':
vo_warn(W30, value, config, pos)
return self.null, True
@property
def output_format(self):
return self._output_format
def output(self, value, mask):
if mask:
return self._null_output
if np.isfinite(value):
if not np.isscalar(value):
value = value.dtype.type(value)
result = self._output_format.format(value)
if result.startswith('array'):
raise RuntimeError()
if (self._output_format[2] == 'r' and
result.endswith('.0')):
result = result[:-2]
return result
elif np.isnan(value):
return 'NaN'
elif np.isposinf(value):
return '+InF'
elif np.isneginf(value):
return '-InF'
# Should never raise
vo_raise("Invalid floating point value '{}'".format(value))
def binoutput(self, value, mask):
if mask:
return self._null_binoutput
value = _ensure_bigendian(value)
return value.tostring()
def _filter_nan(self, value, mask):
return np.where(mask, np.nan, value)
def _filter_null(self, value, mask):
return np.where(mask, self.null, value)
class Double(FloatingPoint):
"""
Handles the double datatype. Double-precision IEEE
floating-point.
"""
format = 'f8'
class Float(FloatingPoint):
"""
Handles the float datatype. Single-precision IEEE floating-point.
"""
format = 'f4'
class Integer(Numeric):
"""
The base class for all the integral datatypes.
"""
default = 0
def __init__(self, field, config=None, pos=None):
Numeric.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mask = False
if isinstance(value, str):
value = value.lower()
if value == '':
if config['version_1_3_or_later']:
mask = True
else:
warn_or_raise(W49, W49, (), config, pos)
if self.null is not None:
value = self.null
else:
value = self.default
elif value == 'nan':
mask = True
if self.null is None:
warn_or_raise(W31, W31, (), config, pos)
value = self.default
else:
value = self.null
elif value.startswith('0x'):
value = int(value[2:], 16)
else:
value = int(value, 10)
else:
value = int(value)
if self.null is not None and value == self.null:
mask = True
if value < self.val_range[0]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[0]
elif value > self.val_range[1]:
warn_or_raise(W51, W51, (value, self.bit_size), config, pos)
value = self.val_range[1]
return value, mask
def output(self, value, mask):
if mask:
if self.null is None:
warn_or_raise(W31, W31)
return 'NaN'
return str(self.null)
return str(value)
def binoutput(self, value, mask):
if mask:
if self.null is None:
vo_raise(W31)
else:
value = self.null
value = _ensure_bigendian(value)
return value.tostring()
def filter_array(self, value, mask):
if np.any(mask):
if self.null is not None:
return np.where(mask, self.null, value)
else:
vo_raise(W31)
return value
class UnsignedByte(Integer):
"""
Handles the unsignedByte datatype. Unsigned 8-bit integer.
"""
format = 'u1'
val_range = (0, 255)
bit_size = '8-bit unsigned'
class Short(Integer):
"""
Handles the short datatype. Signed 16-bit integer.
"""
format = 'i2'
val_range = (-32768, 32767)
bit_size = '16-bit'
class Int(Integer):
"""
Handles the int datatype. Signed 32-bit integer.
"""
format = 'i4'
val_range = (-2147483648, 2147483647)
bit_size = '32-bit'
class Long(Integer):
"""
Handles the long datatype. Signed 64-bit integer.
"""
format = 'i8'
val_range = (-9223372036854775808, 9223372036854775807)
bit_size = '64-bit'
class ComplexArrayVarArray(VarArray):
"""
Handles an array of variable-length arrays of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), True
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i:i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
class ComplexVarArray(VarArray):
"""
Handles a variable-length array of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == '':
return ma.array([]), True
parts = self._splitter(value, config, pos)
parse_parts = self._base.parse_parts
result = []
result_mask = []
for i in range(0, len(parts), 2):
value = [float(x) for x in parts[i:i + 2]]
value, mask = parse_parts(value, config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(
np.array(result, dtype=self._base.format), result_mask), False
class ComplexArray(NumericArray):
"""
Handles a fixed-size array of complex numbers.
"""
vararray_type = ComplexArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._items *= 2
def parse(self, value, config=None, pos=None):
parts = self._splitter(value, config, pos)
if parts == ['']:
parts = []
return self.parse_parts(parts, config, pos)
def parse_parts(self, parts, config=None, pos=None):
if len(parts) != self._items:
vo_raise(E02, (self._items, len(parts)), config, pos)
base_parse = self._base.parse_parts
result = []
result_mask = []
for i in range(0, self._items, 2):
value = [float(x) for x in parts[i:i + 2]]
value, mask = base_parse(value, config, pos)
result.append(value)
result_mask.append(mask)
result = np.array(
result, dtype=self._base.format).reshape(self._arraysize)
result_mask = np.array(
result_mask, dtype='bool').reshape(self._arraysize)
return result, result_mask
class Complex(FloatingPoint, Array):
"""
The base class for complex numbers.
"""
array_type = ComplexArray
vararray_type = ComplexVarArray
default = np.nan
def __init__(self, field, config=None, pos=None):
FloatingPoint.__init__(self, field, config, pos)
Array.__init__(self, field, config, pos)
def parse(self, value, config=None, pos=None):
stripped = value.strip()
if stripped == '' or stripped.lower() == 'nan':
return np.nan, True
splitter = self._splitter
parts = [float(x) for x in splitter(value, config, pos)]
if len(parts) != 2:
vo_raise(E03, (value,), config, pos)
return self.parse_parts(parts, config, pos)
_parse_permissive = parse
_parse_pedantic = parse
def parse_parts(self, parts, config=None, pos=None):
value = complex(*parts)
return value, self.is_null(value)
def output(self, value, mask):
if mask:
if self.null is None:
return 'NaN'
else:
value = self.null
real = self._output_format.format(float(value.real))
imag = self._output_format.format(float(value.imag))
if self._output_format[2] == 'r':
if real.endswith('.0'):
real = real[:-2]
if imag.endswith('.0'):
imag = imag[:-2]
return real + ' ' + imag
class FloatComplex(Complex):
"""
Handle floatComplex datatype. Pair of single-precision IEEE
floating-point numbers.
"""
format = 'c8'
class DoubleComplex(Complex):
"""
Handle doubleComplex datatype. Pair of double-precision IEEE
floating-point numbers.
"""
format = 'c16'
class BitArray(NumericArray):
"""
Handles an array of bits.
"""
vararray_type = ArrayVarArray
def __init__(self, field, base, arraysize, config=None, pos=None):
NumericArray.__init__(self, field, base, arraysize, config, pos)
self._bytes = ((self._items - 1) // 8) + 1
@staticmethod
def _splitter_pedantic(value, config=None, pos=None):
return list(re.sub(r'\s', '', value))
@staticmethod
def _splitter_lax(value, config=None, pos=None):
if ',' in value:
vo_warn(W01, (), config, pos)
return list(re.sub(r'\s|,', '', value))
def output(self, value, mask):
if np.any(mask):
vo_warn(W39)
value = np.asarray(value)
mapping = {False: '0', True: '1'}
return ''.join(mapping[x] for x in value.flat)
def binparse(self, read):
data = read(self._bytes)
result = bitarray_to_bool(data, self._items)
result = result.reshape(self._arraysize)
result_mask = np.zeros(self._arraysize, dtype='b1')
return result, result_mask
def binoutput(self, value, mask):
if np.any(mask):
vo_warn(W39)
return bool_to_bitarray(value)
class Bit(Converter):
"""
Handles the bit datatype.
"""
format = 'b1'
array_type = BitArray
vararray_type = ScalarVarArray
default = False
binary_one = b'\x08'
binary_zero = b'\0'
def parse(self, value, config=None, pos=None):
if config is None:
config = {}
mapping = {'1': True, '0': False}
if value is False or value.strip() == '':
if not config['version_1_3_or_later']:
warn_or_raise(W49, W49, (), config, pos)
return False, True
else:
try:
return mapping[value], False
except KeyError:
vo_raise(E04, (value,), config, pos)
def output(self, value, mask):
if mask:
vo_warn(W39)
if value:
return '1'
else:
return '0'
def binparse(self, read):
data = read(1)
return (ord(data) & 0x8) != 0, False
def binoutput(self, value, mask):
if mask:
vo_warn(W39)
if value:
return self.binary_one
return self.binary_zero
class BooleanArray(NumericArray):
"""
Handles an array of boolean values.
"""
vararray_type = ArrayVarArray
def binparse(self, read):
data = read(self._items)
binparse = self._base.binparse_value
result = []
result_mask = []
for char in data:
value, mask = binparse(char)
result.append(value)
result_mask.append(mask)
result = np.array(result, dtype='b1').reshape(
self._arraysize)
result_mask = np.array(result_mask, dtype='b1').reshape(
self._arraysize)
return result, result_mask
def binoutput(self, value, mask):
binoutput = self._base.binoutput
value = np.asarray(value)
mask = np.asarray(mask)
result = [binoutput(x, m)
for x, m in np.broadcast(value.flat, mask.flat)]
return _empty_bytes.join(result)
class Boolean(Converter):
"""
Handles the boolean datatype.
"""
format = 'b1'
array_type = BooleanArray
vararray_type = ScalarVarArray
default = False
binary_question_mark = b'?'
binary_true = b'T'
binary_false = b'F'
def parse(self, value, config=None, pos=None):
if value == '':
return False, True
if value is False:
return False, True
mapping = {'TRUE': (True, False),
'FALSE': (False, False),
'1': (True, False),
'0': (False, False),
'T': (True, False),
'F': (False, False),
'\0': (False, True),
' ': (False, True),
'?': (False, True),
'': (False, True)}
try:
return mapping[value.upper()]
except KeyError:
vo_raise(E05, (value,), config, pos)
def output(self, value, mask):
if mask:
return '?'
if value:
return 'T'
return 'F'
def binparse(self, read):
value = ord(read(1))
return self.binparse_value(value)
_binparse_mapping = {
ord('T'): (True, False),
ord('t'): (True, False),
ord('1'): (True, False),
ord('F'): (False, False),
ord('f'): (False, False),
ord('0'): (False, False),
ord('\0'): (False, True),
ord(' '): (False, True),
ord('?'): (False, True)}
def binparse_value(self, value):
try:
return self._binparse_mapping[value]
except KeyError:
vo_raise(E05, (value,))
def binoutput(self, value, mask):
if mask:
return self.binary_question_mark
if value:
return self.binary_true
return self.binary_false
converter_mapping = {
'double': Double,
'float': Float,
'bit': Bit,
'boolean': Boolean,
'unsignedByte': UnsignedByte,
'short': Short,
'int': Int,
'long': Long,
'floatComplex': FloatComplex,
'doubleComplex': DoubleComplex,
'char': Char,
'unicodeChar': UnicodeChar}
def get_converter(field, config=None, pos=None):
"""
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter
"""
if config is None:
config = {}
if field.datatype not in converter_mapping:
vo_raise(E06, (field.datatype, field.ID), config)
cls = converter_mapping[field.datatype]
converter = cls(field, config, pos)
arraysize = field.arraysize
# With numeric datatypes, special things need to happen for
# arrays.
if (field.datatype not in ('char', 'unicodeChar') and
arraysize is not None):
if arraysize[-1] == '*':
arraysize = arraysize[:-1]
last_x = arraysize.rfind('x')
if last_x == -1:
arraysize = ''
else:
arraysize = arraysize[:last_x]
fixed = False
else:
fixed = True
if arraysize != '':
arraysize = [int(x) for x in arraysize.split("x")]
arraysize.reverse()
else:
arraysize = []
if arraysize != []:
converter = converter.array_type(
field, converter, arraysize, config)
if not fixed:
converter = converter.vararray_type(
field, converter, arraysize, config)
return converter
numpy_dtype_to_field_mapping = {
np.float64().dtype.num: 'double',
np.float32().dtype.num: 'float',
np.bool_().dtype.num: 'bit',
np.uint8().dtype.num: 'unsignedByte',
np.int16().dtype.num: 'short',
np.int32().dtype.num: 'int',
np.int64().dtype.num: 'long',
np.complex64().dtype.num: 'floatComplex',
np.complex128().dtype.num: 'doubleComplex',
np.unicode_().dtype.num: 'unicodeChar'
}
numpy_dtype_to_field_mapping[np.bytes_().dtype.num] = 'char'
def _all_bytes(column):
for x in column:
if not isinstance(x, bytes):
return False
return True
def _all_unicode(column):
for x in column:
if not isinstance(x, str):
return False
return True
def _all_matching_dtype(column):
first_dtype = False
first_shape = ()
for x in column:
if not isinstance(x, np.ndarray) or len(x) == 0:
continue
if first_dtype is False:
first_dtype = x.dtype
first_shape = x.shape[1:]
elif first_dtype != x.dtype:
return False, ()
elif first_shape != x.shape[1:]:
first_shape = ()
return first_dtype, first_shape
def numpy_to_votable_dtype(dtype, shape):
"""
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if dtype.num not in numpy_dtype_to_field_mapping:
raise TypeError(
"{0!r} can not be represented in VOTable".format(dtype))
if dtype.char == 'S':
return {'datatype': 'char',
'arraysize': str(dtype.itemsize)}
elif dtype.char == 'U':
return {'datatype': 'unicodeChar',
'arraysize': str(dtype.itemsize // 4)}
else:
result = {
'datatype': numpy_dtype_to_field_mapping[dtype.num]}
if len(shape):
result['arraysize'] = 'x'.join(str(x) for x in shape)
return result
def table_column_to_votable_datatype(column):
"""
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if column.dtype.char == 'O':
if isinstance(column[0], bytes):
if _all_bytes(column[1:]):
return {'datatype': 'char', 'arraysize': '*'}
elif isinstance(column[0], str):
if _all_unicode(column[1:]):
return {'datatype': 'unicodeChar', 'arraysize': '*'}
elif isinstance(column[0], np.ndarray):
dtype, shape = _all_matching_dtype(column)
if dtype is not False:
result = numpy_to_votable_dtype(dtype, shape)
if 'arraysize' not in result:
result['arraysize'] = '*'
else:
result['arraysize'] += '*'
return result
# All bets are off, do the most generic thing
return {'datatype': 'unicodeChar', 'arraysize': '*'}
return numpy_to_votable_dtype(column.dtype, column.shape[1:])
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.