hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
ab49b60ac9ac5d15975e0959403157ad595d44e0673080d444fd25551185c50d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is the APE5 coordinates API document re-written to work as a series of test
functions.
Note that new tests for coordinates functionality should generally *not* be
added to this file - instead, add them to other appropriate test modules in
this package, like ``test_sky_coord.py``, ``test_frames.py``, or
``test_representation.py``. This file is instead meant mainly to keep track of
deviations from the original APE5 plan.
"""
import pytest
import numpy as np
from numpy.random import randn
from numpy import testing as npt
from ...tests.helper import raises, assert_quantity_allclose as assert_allclose
from ... import units as u
from ... import time
from ... import coordinates as coords
from ...units import allclose
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_representations_api():
from ..representation import SphericalRepresentation, \
UnitSphericalRepresentation, PhysicsSphericalRepresentation, \
CartesianRepresentation
from ... coordinates import Angle, Longitude, Latitude, Distance
# <-----------------Classes for representation of coordinate data-------------->
# These classes inherit from a common base class and internally contain Quantity
# objects, which are arrays (although they may act as scalars, like numpy's
# length-0 "arrays")
# They can be initialized with a variety of ways that make intuitive sense.
# Distance is optional.
UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
UnitSphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg)
SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)
# In the initial implementation, the lat/lon/distance arguments to the
# initializer must be in order. A *possible* future change will be to allow
# smarter guessing of the order. E.g. `Latitude` and `Longitude` objects can be
# given in any order.
UnitSphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg))
SphericalRepresentation(Longitude(8, u.hour), Latitude(5, u.deg), Distance(10, u.kpc))
# Arrays of any of the inputs are fine
UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg)
# Default is to copy arrays, but optionally, it can be a reference
UnitSphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, copy=False)
# strings are parsed by `Latitude` and `Longitude` constructors, so no need to
# implement parsing in the Representation classes
UnitSphericalRepresentation(lon=Angle('2h6m3.3s'), lat=Angle('0.1rad'))
# Or, you can give `Quantity`s with keywords, and they will be internally
# converted to Angle/Distance
c1 = SphericalRepresentation(lon=8*u.hourangle, lat=5*u.deg, distance=10*u.kpc)
# Can also give another representation object with the `reprobj` keyword.
c2 = SphericalRepresentation.from_representation(c1)
# distance, lat, and lon typically will just match in shape
SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=[10, 11]*u.kpc)
# if the inputs are not the same, if possible they will be broadcast following
# numpy's standard broadcasting rules.
c2 = SphericalRepresentation(lon=[8, 9]*u.hourangle, lat=[5, 6]*u.deg, distance=10*u.kpc)
assert len(c2.distance) == 2
# when they can't be broadcast, it is a ValueError (same as Numpy)
with raises(ValueError):
c2 = UnitSphericalRepresentation(lon=[8, 9, 10]*u.hourangle, lat=[5, 6]*u.deg)
# It's also possible to pass in scalar quantity lists with mixed units. These
# are converted to array quantities following the same rule as `Quantity`: all
# elements are converted to match the first element's units.
c2 = UnitSphericalRepresentation(lon=Angle([8*u.hourangle, 135*u.deg]),
lat=Angle([5*u.deg, (6*np.pi/180)*u.rad]))
assert c2.lat.unit == u.deg and c2.lon.unit == u.hourangle
npt.assert_almost_equal(c2.lon[1].value, 9)
# The Quantity initializer itself can also be used to force the unit even if the
# first element doesn't have the right unit
lon = u.Quantity([120*u.deg, 135*u.deg], u.hourangle)
lat = u.Quantity([(5*np.pi/180)*u.rad, 0.4*u.hourangle], u.deg)
c2 = UnitSphericalRepresentation(lon, lat)
# regardless of how input, the `lat` and `lon` come out as angle/distance
assert isinstance(c1.lat, Angle)
assert isinstance(c1.lat, Latitude) # `Latitude` is an `Angle` subclass
assert isinstance(c1.distance, Distance)
# but they are read-only, as representations are immutable once created
with raises(AttributeError):
c1.lat = Latitude(5, u.deg)
# Note that it is still possible to modify the array in-place, but this is not
# sanctioned by the API, as this would prevent things like caching.
c2.lat[:] = [0] * u.deg # possible, but NOT SUPPORTED
# To address the fact that there are various other conventions for how spherical
# coordinates are defined, other conventions can be included as new classes.
# Later there may be other conventions that we implement - for now just the
# physics convention, as it is one of the most common cases.
c3 = PhysicsSphericalRepresentation(phi=120*u.deg, theta=85*u.deg, r=3*u.kpc)
# first dimension must be length-3 if a lone `Quantity` is passed in.
c1 = CartesianRepresentation(randn(3, 100) * u.kpc)
assert c1.xyz.shape[0] == 3
assert c1.xyz.unit == u.kpc
assert c1.x.shape[0] == 100
assert c1.y.shape[0] == 100
assert c1.z.shape[0] == 100
# can also give each as separate keywords
CartesianRepresentation(x=randn(100)*u.kpc, y=randn(100)*u.kpc, z=randn(100)*u.kpc)
# if the units don't match but are all distances, they will automatically be
# converted to match `x`
xarr, yarr, zarr = randn(3, 100)
c1 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.kpc)
c2 = CartesianRepresentation(x=xarr*u.kpc, y=yarr*u.kpc, z=zarr*u.pc)
assert c1.xyz.unit == c2.xyz.unit == u.kpc
assert_allclose((c1.z / 1000) - c2.z, 0*u.kpc, atol=1e-10*u.kpc)
# representations convert into other representations via `represent_as`
srep = SphericalRepresentation(lon=90*u.deg, lat=0*u.deg, distance=1*u.pc)
crep = srep.represent_as(CartesianRepresentation)
assert_allclose(crep.x, 0*u.pc, atol=1e-10*u.pc)
assert_allclose(crep.y, 1*u.pc, atol=1e-10*u.pc)
assert_allclose(crep.z, 0*u.pc, atol=1e-10*u.pc)
# The functions that actually do the conversion are defined via methods on the
# representation classes. This may later be expanded into a full registerable
# transform graph like the coordinate frames, but initially it will be a simpler
# method system
def test_frame_api():
from ..representation import SphericalRepresentation, \
UnitSphericalRepresentation
from ..builtin_frames import ICRS, FK5
# <--------------------Reference Frame/"Low-level" classes--------------------->
# The low-level classes have a dual role: they act as specifiers of coordinate
# frames and they *may* also contain data as one of the representation objects,
# in which case they are the actual coordinate objects themselves.
# They can always accept a representation as a first argument
icrs = ICRS(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg))
# which is stored as the `data` attribute
assert icrs.data.lat == 5*u.deg
assert icrs.data.lon == 8*u.hourangle
# Frames that require additional information like equinoxs or obstimes get them
# as keyword parameters to the frame constructor. Where sensible, defaults are
# used. E.g., FK5 is almost always J2000 equinox
fk5 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg))
J2000 = time.Time('J2000', scale='utc')
fk5_2000 = FK5(UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg), equinox=J2000)
assert fk5.equinox == fk5_2000.equinox
# the information required to specify the frame is immutable
J2001 = time.Time('J2001', scale='utc')
with raises(AttributeError):
fk5.equinox = J2001
# Similar for the representation data.
with raises(AttributeError):
fk5.data = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
# There is also a class-level attribute that lists the attributes needed to
# identify the frame. These include attributes like `equinox` shown above.
assert all(nm in ('equinox', 'obstime') for nm in fk5.get_frame_attr_names())
# the result of `get_frame_attr_names` is called for particularly in the
# high-level class (discussed below) to allow round-tripping between various
# frames. It is also part of the public API for other similar developer /
# advanced users' use.
# The actual position information is accessed via the representation objects
assert_allclose(icrs.represent_as(SphericalRepresentation).lat, 5*u.deg)
# shorthand for the above
assert_allclose(icrs.spherical.lat, 5*u.deg)
assert icrs.cartesian.z.value > 0
# Many frames have a "default" representation, the one in which they are
# conventionally described, often with a special name for some of the
# coordinates. E.g., most equatorial coordinate systems are spherical with RA and
# Dec. This works simply as a shorthand for the longer form above
assert_allclose(icrs.dec, 5*u.deg)
assert_allclose(fk5.ra, 8*u.hourangle)
assert icrs.representation == SphericalRepresentation
# low-level classes can also be initialized with names valid for that representation
# and frame:
icrs_2 = ICRS(ra=8*u.hour, dec=5*u.deg, distance=1*u.kpc)
assert_allclose(icrs.ra, icrs_2.ra)
# and these are taken as the default if keywords are not given:
# icrs_nokwarg = ICRS(8*u.hour, 5*u.deg, distance=1*u.kpc)
# assert icrs_nokwarg.ra == icrs_2.ra and icrs_nokwarg.dec == icrs_2.dec
# they also are capable of computing on-sky or 3d separations from each other,
# which will be a direct port of the existing methods:
coo1 = ICRS(ra=0*u.hour, dec=0*u.deg)
coo2 = ICRS(ra=0*u.hour, dec=1*u.deg)
# `separation` is the on-sky separation
assert coo1.separation(coo2).degree == 1.0
# while `separation_3d` includes the 3D distance information
coo3 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=1*u.kpc)
coo4 = ICRS(ra=0*u.hour, dec=0*u.deg, distance=2*u.kpc)
assert coo3.separation_3d(coo4).kpc == 1.0
# The next example fails because `coo1` and `coo2` don't have distances
with raises(ValueError):
assert coo1.separation_3d(coo2).kpc == 1.0
# repr/str also shows info, with frame and data
# assert repr(fk5) == ''
def test_transform_api():
from ..representation import UnitSphericalRepresentation
from ..builtin_frames import ICRS, FK5
from ..baseframe import frame_transform_graph, BaseCoordinateFrame
from ..transformations import DynamicMatrixTransform
# <------------------------Transformations------------------------------------->
# Transformation functionality is the key to the whole scheme: they transform
# low-level classes from one frame to another.
# (used below but defined above in the API)
fk5 = FK5(ra=8*u.hour, dec=5*u.deg)
# If no data (or `None`) is given, the class acts as a specifier of a frame, but
# without any stored data.
J2001 = time.Time('J2001', scale='utc')
fk5_J2001_frame = FK5(equinox=J2001)
# if they do not have data, the string instead is the frame specification
assert repr(fk5_J2001_frame) == "<FK5 Frame (equinox=J2001.000)>"
# Note that, although a frame object is immutable and can't have data added, it
# can be used to create a new object that does have data by giving the
# `realize_frame` method a representation:
srep = UnitSphericalRepresentation(lon=8*u.hour, lat=5*u.deg)
fk5_j2001_with_data = fk5_J2001_frame.realize_frame(srep)
assert fk5_j2001_with_data.data is not None
# Now `fk5_j2001_with_data` is in the same frame as `fk5_J2001_frame`, but it
# is an actual low-level coordinate, rather than a frame without data.
# These frames are primarily useful for specifying what a coordinate should be
# transformed *into*, as they are used by the `transform_to` method
# E.g., this snippet precesses the point to the new equinox
newfk5 = fk5.transform_to(fk5_J2001_frame)
assert newfk5.equinox == J2001
# classes can also be given to `transform_to`, which then uses the defaults for
# the frame information:
samefk5 = fk5.transform_to(FK5)
# `fk5` was initialized using default `obstime` and `equinox`, so:
assert_allclose(samefk5.ra, fk5.ra, atol=1e-10*u.deg)
assert_allclose(samefk5.dec, fk5.dec, atol=1e-10*u.deg)
# transforming to a new frame necessarily loses framespec information if that
# information is not applicable to the new frame. This means transforms are not
# always round-trippable:
fk5_2 = FK5(ra=8*u.hour, dec=5*u.deg, equinox=J2001)
ic_trans = fk5_2.transform_to(ICRS)
# `ic_trans` does not have an `equinox`, so now when we transform back to FK5,
# it's a *different* RA and Dec
fk5_trans = ic_trans.transform_to(FK5)
assert not allclose(fk5_2.ra, fk5_trans.ra, rtol=0, atol=1e-10*u.deg)
# But if you explicitly give the right equinox, all is fine
fk5_trans_2 = fk5_2.transform_to(FK5(equinox=J2001))
assert_allclose(fk5_2.ra, fk5_trans_2.ra, rtol=0, atol=1e-10*u.deg)
# Trying to transforming a frame with no data is of course an error:
with raises(ValueError):
FK5(equinox=J2001).transform_to(ICRS)
# To actually define a new transformation, the same scheme as in the
# 0.2/0.3 coordinates framework can be re-used - a graph of transform functions
# connecting various coordinate classes together. The main changes are:
# 1) The transform functions now get the frame object they are transforming the
# current data into.
# 2) Frames with additional information need to have a way to transform between
# objects of the same class, but with different framespecinfo values
# An example transform function:
class SomeNewSystem(BaseCoordinateFrame):
pass
@frame_transform_graph.transform(DynamicMatrixTransform, SomeNewSystem, FK5)
def new_to_fk5(newobj, fk5frame):
ot = newobj.obstime
eq = fk5frame.equinox
# ... build a *cartesian* transform matrix using `eq` that transforms from
# the `newobj` frame as observed at `ot` to FK5 an equinox `eq`
matrix = np.eye(3)
return matrix
# Other options for transform functions include one that simply returns the new
# coordinate object, and one that returns a cartesian matrix but does *not*
# require `newobj` or `fk5frame` - this allows optimization of the transform.
def test_highlevel_api():
J2001 = time.Time('J2001', scale='utc')
# <--------------------------"High-level" class-------------------------------->
# The "high-level" class is intended to wrap the lower-level classes in such a
# way that they can be round-tripped, as well as providing a variety of
# convenience functionality. This document is not intended to show *all* of the
# possible high-level functionality, rather how the high-level classes are
# initialized and interact with the low-level classes
# this creates an object that contains an `ICRS` low-level class, initialized
# identically to the first ICRS example further up.
sc = coords.SkyCoord(coords.SphericalRepresentation(lon=8 * u.hour,
lat=5 * u.deg, distance=1 * u.kpc), frame='icrs')
# Other representations and `system` keywords delegate to the appropriate
# low-level class. The already-existing registry for user-defined coordinates
# will be used by `SkyCoordinate` to figure out what various the `system`
# keyword actually means.
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs')
sc = coords.SkyCoord(l=120 * u.deg, b=5 * u.deg, frame='galactic')
# High-level classes can also be initialized directly from low-level objects
sc = coords.SkyCoord(coords.ICRS(ra=8 * u.hour, dec=5 * u.deg))
# The next example raises an error because the high-level class must always
# have position data.
with pytest.raises(ValueError):
sc = coords.SkyCoord(coords.FK5(equinox=J2001)) # raises ValueError
# similarly, the low-level object can always be accessed
# this is how it's supposed to look, but sometimes the numbers get rounded in
# funny ways
# assert repr(sc.frame) == '<ICRS Coordinate: ra=120.0 deg, dec=5.0 deg>'
rscf = repr(sc.frame)
assert rscf.startswith('<ICRS Coordinate: (ra, dec) in deg')
# and the string representation will be inherited from the low-level class.
# same deal, should loook like this, but different archituectures/ python
# versions may round the numbers differently
# assert repr(sc) == '<SkyCoord (ICRS): ra=120.0 deg, dec=5.0 deg>'
rsc = repr(sc)
assert rsc.startswith('<SkyCoord (ICRS): (ra, dec) in deg')
# Supports a variety of possible complex string formats
sc = coords.SkyCoord('8h00m00s +5d00m00.0s', frame='icrs')
# In the next example, the unit is only needed b/c units are ambiguous. In
# general, we *never* accept ambiguity
sc = coords.SkyCoord('8:00:00 +5:00:00.0', unit=(u.hour, u.deg), frame='icrs')
# The next one would yield length-2 array coordinates, because of the comma
sc = coords.SkyCoord(['8h 5d', '2°2′3″ 0.3rad'], frame='icrs')
# It should also interpret common designation styles as a coordinate
# NOT YET
# sc = coords.SkyCoord('SDSS J123456.89-012345.6', frame='icrs')
# but it should also be possible to provide formats for outputting to strings,
# similar to `Time`. This can be added right away or at a later date.
# transformation is done the same as for low-level classes, which it delegates to
sc_fk5_j2001 = sc.transform_to(coords.FK5(equinox=J2001))
assert sc_fk5_j2001.equinox == J2001
# The key difference is that the high-level class remembers frame information
# necessary for round-tripping, unlike the low-level classes:
sc1 = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, equinox=J2001, frame='fk5')
sc2 = sc1.transform_to('icrs')
# The next assertion succeeds, but it doesn't mean anything for ICRS, as ICRS
# isn't defined in terms of an equinox
assert sc2.equinox == J2001
# But it *is* necessary once we transform to FK5
sc3 = sc2.transform_to('fk5')
assert sc3.equinox == J2001
assert_allclose(sc1.ra, sc3.ra)
# `SkyCoord` will also include the attribute-style access that is in the
# v0.2/0.3 coordinate objects. This will *not* be in the low-level classes
sc = coords.SkyCoord(ra=8 * u.hour, dec=5 * u.deg, frame='icrs')
scgal = sc.galactic
assert str(scgal).startswith('<SkyCoord (Galactic): (l, b)')
# the existing `from_name` and `match_to_catalog_*` methods will be moved to the
# high-level class as convenience functionality.
# in remote-data test below!
# m31icrs = coords.SkyCoord.from_name('M31', frame='icrs')
# assert str(m31icrs) == '<SkyCoord (ICRS) RA=10.68471 deg, Dec=41.26875 deg>'
if HAS_SCIPY:
cat1 = coords.SkyCoord(ra=[1, 2]*u.hr, dec=[3, 4.01]*u.deg, distance=[5, 6]*u.kpc, frame='icrs')
cat2 = coords.SkyCoord(ra=[1, 2, 2.01]*u.hr, dec=[3, 4, 5]*u.deg, distance=[5, 200, 6]*u.kpc, frame='icrs')
idx1, sep2d1, dist3d1 = cat1.match_to_catalog_sky(cat2)
idx2, sep2d2, dist3d2 = cat1.match_to_catalog_3d(cat2)
assert np.any(idx1 != idx2)
# additional convenience functionality for the future should be added as methods
# on `SkyCoord`, *not* the low-level classes.
@pytest.mark.remote_data
def test_highlevel_api_remote():
m31icrs = coords.SkyCoord.from_name('M31', frame='icrs')
m31str = str(m31icrs)
assert m31str.startswith('<SkyCoord (ICRS): (ra, dec) in deg\n (')
assert m31str.endswith(')>')
assert '10.68' in m31str
assert '41.26' in m31str
# The above is essentially a replacement of the below, but tweaked so that
# small/moderate changes in what `from_name` returns don't cause the tests
# to fail
# assert str(m31icrs) == '<SkyCoord (ICRS): (ra, dec) in deg\n (10.6847083, 41.26875)>'
m31fk4 = coords.SkyCoord.from_name('M31', frame='fk4')
assert m31icrs.frame != m31fk4.frame
assert np.abs(m31icrs.ra - m31fk4.ra) > .5*u.deg
|
15c0c2fe0e7734a74b0e8dbafe0ac3d5a73798f3b77924dfacc0e7f7a6b139e4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import pytest
import numpy as np
from ... import units as u
from ...tests.helper import assert_quantity_allclose
from .. import (SkyCoord, ICRS, SphericalRepresentation, SphericalDifferential,
SphericalCosLatDifferential, CartesianRepresentation,
CartesianDifferential, Galactic, PrecessedGeocentric)
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
def test_creation_frameobjs():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
for attrnm in ['ra', 'dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
frame='fk5')
assert_quantity_allclose(sc1.ra, 1*u.deg)
assert_quantity_allclose(sc1.dec, 2*u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc1.pm_dec, .1*u.arcsec/u.kyr)
sc2 = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
assert_quantity_allclose(sc2.ra, 1*u.deg)
assert_quantity_allclose(sc2.dec, 2*u.deg)
assert_quantity_allclose(sc2.pm_ra, .2*u.arcsec/u.kyr)
assert_quantity_allclose(sc2.pm_dec, .1*u.arcsec/u.kyr)
sc3 = SkyCoord('1:2:3 4:5:6',
pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
unit=(u.hour, u.deg))
assert_quantity_allclose(sc3.ra, 1*u.hourangle + 2*u.arcmin*15 + 3*u.arcsec*15)
assert_quantity_allclose(sc3.dec, 4*u.deg + 5*u.arcmin + 6*u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(1*u.deg, 2*u.deg, pm_ra_cosdec=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra_cosdec', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(1*u.deg, 2*u.deg,
pm_ra=.2*u.mas/u.yr, pm_dec=.1*u.mas/u.yr,
differential_type=SphericalDifferential)
sc_cpy = SkyCoord(sc)
for attrnm in ['ra', 'dec', 'pm_ra', 'pm_dec']:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(sc_newdiff.pm_ra_cosdec,
reprepr.differentials['s'].d_lon_coslat)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0., 0.]*u.pc)
dif = CartesianDifferential([0, 100, 0.]*u.pc/u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1*u.deg, 2*u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(scope="module", params=[(False, False),
(True, False),
(False, True),
(True, True)])
def sc(request):
incldist, inclrv = request.param
args = [1*u.deg, 2*u.deg]
kwargs = dict(pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
if incldist:
kwargs['distance'] = 213.4*u.pc
if inclrv:
kwargs['radial_velocity'] = 61*u.km/u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(ICRS(ra=[1]*100*u.deg, dec=[2]*100*u.deg,
pm_ra_cosdec=np.random.randn(100)*u.mas/u.yr,
pm_dec=np.random.randn(100)*u.mas/u.yr,))
@pytest.fixture(scope="module")
def sc_for_sep():
return SkyCoord(1*u.deg, 2*u.deg,
pm_dec=1*u.mas/u.yr, pm_ra_cosdec=2*u.mas/u.yr)
def test_separation(sc, sc_for_sep):
sc.separation(sc_for_sep)
def test_accessors(sc, scmany):
sc.data.differentials['s']
sph = sc.spherical
gal = sc.galactic
if (sc.data.get_name().startswith('unit') and not
sc.data.differentials['s'].get_name().startswith('unit')):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail('.velocity fails if there is an RV but not distance')
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to('galactic')
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail('Should fail for no-distance cases')
else:
trans = sc.transform_to(PrecessedGeocentric(equinox='B1975'))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_position_angle(sc, sc_for_sep):
sc.position_angle(sc_for_sep)
def test_constellations(sc):
const = sc.get_constellation()
assert const == 'Pisces'
def test_separation_3d_with_differentials():
c1 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=100*u.pc,
pm_ra_cosdec=5*u.mas/u.yr,
pm_dec=-7*u.mas/u.yr,
radial_velocity=160*u.km/u.s)
c2 = SkyCoord(ra=138*u.deg, dec=-17*u.deg, distance=105*u.pc,
pm_ra_cosdec=15*u.mas/u.yr,
pm_dec=-74*u.mas/u.yr,
radial_velocity=-60*u.km/u.s)
sep = c1.separation_3d(c2)
assert_quantity_allclose(sep, 5*u.pc)
|
931c27a6730454baed3c07554164db95430b9966b41938c8f482f76b19bfb6a6 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import testing as npt
from ... import units as u
from ...time import Time
from ..builtin_frames import ICRS, AltAz
from ..builtin_frames.utils import get_jd12
from .. import EarthLocation
from .. import SkyCoord
from ...tests.helper import catch_warnings
from ... import _erfa as erfa
from ...utils import iers
from .utils import randomly_sample_sphere
# These fixtures are used in test_iau_fullstack
@pytest.fixture(scope="function")
def fullstack_icrs():
ra, dec, _ = randomly_sample_sphere(1000)
return ICRS(ra=ra, dec=dec)
@pytest.fixture(scope="function")
def fullstack_fiducial_altaz(fullstack_icrs):
altazframe = AltAz(location=EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m),
obstime=Time('J2000'))
return fullstack_icrs.transform_to(altazframe)
@pytest.fixture(scope="function", params=['J2000.1', 'J2010'])
def fullstack_times(request):
return Time(request.param)
@pytest.fixture(scope="function", params=[(0, 0, 0), (23, 0, 0), (-70, 0, 0), (0, 100, 0), (23, 0, 3000)])
def fullstack_locations(request):
return EarthLocation(lat=request.param[0]*u.deg, lon=request.param[0]*u.deg,
height=request.param[0]*u.m)
@pytest.fixture(scope="function",
params=[(0*u.bar, 0*u.deg_C, 0, 1*u.micron),
(1*u.bar, 0*u.deg_C, 0*u.one, 1*u.micron),
(1*u.bar, 10*u.deg_C, 0, 1*u.micron),
(1*u.bar, 0*u.deg_C, 50*u.percent, 1*u.micron),
(1*u.bar, 0*u.deg_C, 0, 21*u.cm)])
def fullstack_obsconditions(request):
return request.param
def _erfa_check(ira, idec, astrom):
"""
This function does the same thing the astropy layer is supposed to do, but
all in erfa
"""
cra, cdec = erfa.atciq(ira, idec, 0, 0, 0, 0, astrom)
az, zen, ha, odec, ora = erfa.atioq(cra, cdec, astrom)
alt = np.pi/2-zen
cra2, cdec2 = erfa.atoiq('A', az, zen, astrom)
ira2, idec2 = erfa.aticq(cra2, cdec2, astrom)
dct = locals()
del dct['astrom']
return dct
def test_iau_fullstack(fullstack_icrs, fullstack_fiducial_altaz,
fullstack_times, fullstack_locations,
fullstack_obsconditions):
"""
Test the full transform from ICRS <-> AltAz
"""
# create the altaz frame
altazframe = AltAz(obstime=fullstack_times, location=fullstack_locations,
pressure=fullstack_obsconditions[0],
temperature=fullstack_obsconditions[1],
relative_humidity=fullstack_obsconditions[2],
obswl=fullstack_obsconditions[3])
aacoo = fullstack_icrs.transform_to(altazframe)
# compare aacoo to the fiducial AltAz - should always be different
assert np.all(np.abs(aacoo.alt - fullstack_fiducial_altaz.alt) > 50*u.milliarcsecond)
assert np.all(np.abs(aacoo.az - fullstack_fiducial_altaz.az) > 50*u.milliarcsecond)
# if the refraction correction is included, we *only* do the comparisons
# where altitude >5 degrees. The SOFA guides imply that below 5 is where
# where accuracy gets more problematic, and testing reveals that alt<~0
# gives garbage round-tripping, and <10 can give ~1 arcsec uncertainty
if fullstack_obsconditions[0].value == 0:
# but if there is no refraction correction, check everything
msk = slice(None)
tol = 5*u.microarcsecond
else:
msk = aacoo.alt > 5*u.deg
# most of them aren't this bad, but some of those at low alt are offset
# this much. For alt > 10, this is always better than 100 masec
tol = 750*u.milliarcsecond
# now make sure the full stack round-tripping works
icrs2 = aacoo.transform_to(ICRS)
adras = np.abs(fullstack_icrs.ra - icrs2.ra)[msk]
addecs = np.abs(fullstack_icrs.dec - icrs2.dec)[msk]
assert np.all(adras < tol), 'largest RA change is {0} mas, > {1}'.format(np.max(adras.arcsec*1000), tol)
assert np.all(addecs < tol), 'largest Dec change is {0} mas, > {1}'.format(np.max(addecs.arcsec*1000), tol)
# check that we're consistent with the ERFA alt/az result
xp, yp = u.Quantity(iers.IERS_Auto.open().pm_xy(fullstack_times)).to_value(u.radian)
lon = fullstack_locations.geodetic[0].to_value(u.radian)
lat = fullstack_locations.geodetic[1].to_value(u.radian)
height = fullstack_locations.geodetic[2].to_value(u.m)
jd1, jd2 = get_jd12(fullstack_times, 'utc')
pressure = fullstack_obsconditions[0].to_value(u.hPa)
temperature = fullstack_obsconditions[1].to_value(u.deg_C)
# Relative humidity can be a quantity or a number.
relative_humidity = u.Quantity(fullstack_obsconditions[2], u.one).value
obswl = fullstack_obsconditions[3].to_value(u.micron)
astrom, eo = erfa.apco13(jd1, jd2,
fullstack_times.delta_ut1_utc,
lon, lat, height,
xp, yp,
pressure, temperature, relative_humidity,
obswl)
erfadct = _erfa_check(fullstack_icrs.ra.rad, fullstack_icrs.dec.rad, astrom)
npt.assert_allclose(erfadct['alt'], aacoo.alt.radian, atol=1e-7)
npt.assert_allclose(erfadct['az'], aacoo.az.radian, atol=1e-7)
def test_fiducial_roudtrip(fullstack_icrs, fullstack_fiducial_altaz):
"""
Test the full transform from ICRS <-> AltAz
"""
aacoo = fullstack_icrs.transform_to(fullstack_fiducial_altaz)
# make sure the round-tripping works
icrs2 = aacoo.transform_to(ICRS)
npt.assert_allclose(fullstack_icrs.ra.deg, icrs2.ra.deg)
npt.assert_allclose(fullstack_icrs.dec.deg, icrs2.dec.deg)
def test_future_altaz():
"""
While this does test the full stack, it is mostly meant to check that a
warning is raised when attempting to get to AltAz in the future (beyond
IERS tables)
"""
from ...utils.exceptions import AstropyWarning
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from ..builtin_frames import utils
if hasattr(utils, '__warningregistry__'):
utils.__warningregistry__.clear()
with catch_warnings() as found_warnings:
location = EarthLocation(lat=0*u.deg, lon=0*u.deg)
t = Time('J2161')
SkyCoord(1*u.deg, 2*u.deg).transform_to(AltAz(location=location, obstime=t))
# check that these message(s) appear among any other warnings. If tests are run with
# --remote-data then the IERS table will be an instance of IERS_Auto which is
# assured of being "fresh". In this case getting times outside the range of the
# table does not raise an exception. Only if using IERS_B (which happens without
# --remote-data, i.e. for all CI testing) do we expect another warning.
messages_to_find = ["Tried to get polar motions for times after IERS data is valid."]
if isinstance(iers.IERS_Auto.iers_table, iers.IERS_B):
messages_to_find.append("(some) times are outside of range covered by IERS table.")
messages_found = [False for _ in messages_to_find]
for w in found_warnings:
if issubclass(w.category, AstropyWarning):
for i, message_to_find in enumerate(messages_to_find):
if message_to_find in str(w.message):
messages_found[i] = True
assert all(messages_found)
|
ad8700436fec1b0326e6a5c5ae0db8afc1769405477ca726d490103b461be25c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Regression tests for coordinates-related bugs that don't have an obvious other
place to live
"""
import io
import pytest
import numpy as np
from ... import units as u
from .. import (AltAz, EarthLocation, SkyCoord, get_sun, ICRS, CIRS, ITRS,
GeocentricTrueEcliptic, Longitude, Latitude, GCRS, HCRS,
get_moon, FK4, FK4NoETerms, BaseCoordinateFrame,
QuantityAttribute, SphericalRepresentation,
UnitSphericalRepresentation, CartesianRepresentation)
from ..sites import get_builtin_sites
from ...time import Time
from ...utils import iers
from ...table import Table
from ...tests.helper import assert_quantity_allclose, catch_warnings
from .test_matching import HAS_SCIPY, OLDER_SCIPY
from ...units import allclose as quantity_allclose
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
def test_regression_5085():
"""
PR #5085 was put in place to fix the following issue.
Issue: https://github.com/astropy/astropy/issues/5069
At root was the transformation of Ecliptic coordinates with
non-scalar times.
"""
times = Time(["2015-08-28 03:30", "2015-09-05 10:30", "2015-09-15 18:35"])
latitudes = Latitude([3.9807075, -5.00733806, 1.69539491]*u.deg)
longitudes = Longitude([311.79678613, 72.86626741, 199.58698226]*u.deg)
distances = u.Quantity([0.00243266, 0.0025424, 0.00271296]*u.au)
coo = GeocentricTrueEcliptic(lat=latitudes,
lon=longitudes,
distance=distances, equinox=times)
# expected result
ras = Longitude([310.50095400, 314.67109920, 319.56507428]*u.deg)
decs = Latitude([-18.25190443, -17.1556676, -15.71616522]*u.deg)
distances = u.Quantity([1.78309901, 1.710874, 1.61326649]*u.au)
expected_result = GCRS(ra=ras, dec=decs,
distance=distances, obstime="J2000").cartesian.xyz
actual_result = coo.transform_to(GCRS(obstime="J2000")).cartesian.xyz
assert_quantity_allclose(expected_result, actual_result)
def test_regression_3920():
"""
Issue: https://github.com/astropy/astropy/issues/3920
"""
loc = EarthLocation.from_geodetic(0*u.deg, 0*u.deg, 0)
time = Time('2010-1-1')
aa = AltAz(location=loc, obstime=time)
sc = SkyCoord(10*u.deg, 3*u.deg)
assert sc.transform_to(aa).shape == tuple()
# That part makes sense: the input is a scalar so the output is too
sc2 = SkyCoord(10*u.deg, 3*u.deg, 1*u.AU)
assert sc2.transform_to(aa).shape == tuple()
# in 3920 that assert fails, because the shape is (1,)
# check that the same behavior occurs even if transform is from low-level classes
icoo = ICRS(sc.data)
icoo2 = ICRS(sc2.data)
assert icoo.transform_to(aa).shape == tuple()
assert icoo2.transform_to(aa).shape == tuple()
def test_regression_3938():
"""
Issue: https://github.com/astropy/astropy/issues/3938
"""
# Set up list of targets - we don't use `from_name` here to avoid
# remote_data requirements, but it does the same thing
# vega = SkyCoord.from_name('Vega')
vega = SkyCoord(279.23473479*u.deg, 38.78368896*u.deg)
# capella = SkyCoord.from_name('Capella')
capella = SkyCoord(79.17232794*u.deg, 45.99799147*u.deg)
# sirius = SkyCoord.from_name('Sirius')
sirius = SkyCoord(101.28715533*u.deg, -16.71611586*u.deg)
targets = [vega, capella, sirius]
# Feed list of targets into SkyCoord
combined_coords = SkyCoord(targets)
# Set up AltAz frame
time = Time('2012-01-01 00:00:00')
location = EarthLocation('10d', '45d', 0)
aa = AltAz(location=location, obstime=time)
combined_coords.transform_to(aa)
# in 3938 the above yields ``UnitConversionError: '' (dimensionless) and 'pc' (length) are not convertible``
def test_regression_3998():
"""
Issue: https://github.com/astropy/astropy/issues/3998
"""
time = Time('2012-01-01 00:00:00')
assert time.isscalar
sun = get_sun(time)
assert sun.isscalar
# in 3998, the above yields False - `sun` is a length-1 vector
assert sun.obstime is time
def test_regression_4033():
"""
Issue: https://github.com/astropy/astropy/issues/4033
"""
# alb = SkyCoord.from_name('Albireo')
alb = SkyCoord(292.68033548*u.deg, 27.95968007*u.deg)
alb_wdist = SkyCoord(alb, distance=133*u.pc)
# de = SkyCoord.from_name('Deneb')
de = SkyCoord(310.35797975*u.deg, 45.28033881*u.deg)
de_wdist = SkyCoord(de, distance=802*u.pc)
aa = AltAz(location=EarthLocation(lat=45*u.deg, lon=0*u.deg), obstime='2010-1-1')
deaa = de.transform_to(aa)
albaa = alb.transform_to(aa)
alb_wdistaa = alb_wdist.transform_to(aa)
de_wdistaa = de_wdist.transform_to(aa)
# these work fine
sepnod = deaa.separation(albaa)
sepwd = deaa.separation(alb_wdistaa)
assert_quantity_allclose(sepnod, 22.2862*u.deg, rtol=1e-6)
assert_quantity_allclose(sepwd, 22.2862*u.deg, rtol=1e-6)
# parallax should be present when distance added
assert np.abs(sepnod - sepwd) > 1*u.marcsec
# in 4033, the following fail with a recursion error
assert_quantity_allclose(de_wdistaa.separation(alb_wdistaa), 22.2862*u.deg, rtol=1e-3)
assert_quantity_allclose(alb_wdistaa.separation(deaa), 22.2862*u.deg, rtol=1e-3)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_4082():
"""
Issue: https://github.com/astropy/astropy/issues/4082
"""
from .. import search_around_sky, search_around_3d
cat = SkyCoord([10.076, 10.00455], [18.54746, 18.54896], unit='deg')
search_around_sky(cat[0:1], cat, seplimit=u.arcsec * 60, storekdtree=False)
# in the issue, this raises a TypeError
# also check 3d for good measure, although it's not really affected by this bug directly
cat3d = SkyCoord([10.076, 10.00455]*u.deg, [18.54746, 18.54896]*u.deg, distance=[0.1, 1.5]*u.kpc)
search_around_3d(cat3d[0:1], cat3d, 1*u.kpc, storekdtree=False)
def test_regression_4210():
"""
Issue: https://github.com/astropy/astropy/issues/4210
Related PR with actual change: https://github.com/astropy/astropy/pull/4211
"""
crd = SkyCoord(0*u.deg, 0*u.deg, distance=1*u.AU)
ecl = crd.geocentrictrueecliptic
# bug was that "lambda", which at the time was the name of the geocentric
# ecliptic longitude, is a reserved keyword. So this just makes sure the
# new name is are all valid
ecl.lon
# and for good measure, check the other ecliptic systems are all the same
# names for their attributes
from ..builtin_frames import ecliptic
for frame_name in ecliptic.__all__:
eclcls = getattr(ecliptic, frame_name)
eclobj = eclcls(1*u.deg, 2*u.deg, 3*u.AU)
eclobj.lat
eclobj.lon
eclobj.distance
def test_regression_futuretimes_4302():
"""
Checks that an error is not raised for future times not covered by IERS
tables (at least in a simple transform like CIRS->ITRS that simply requires
the UTC<->UT1 conversion).
Relevant comment: https://github.com/astropy/astropy/pull/4302#discussion_r44836531
"""
from ...utils.exceptions import AstropyWarning
# this is an ugly hack to get the warning to show up even if it has already
# appeared
from ..builtin_frames import utils
if hasattr(utils, '__warningregistry__'):
utils.__warningregistry__.clear()
with catch_warnings() as found_warnings:
future_time = Time('2511-5-1')
c = CIRS(1*u.deg, 2*u.deg, obstime=future_time)
c.transform_to(ITRS(obstime=future_time))
if not isinstance(iers.IERS_Auto.iers_table, iers.IERS_Auto):
saw_iers_warnings = False
for w in found_warnings:
if issubclass(w.category, AstropyWarning):
if '(some) times are outside of range covered by IERS table' in str(w.message):
saw_iers_warnings = True
break
assert saw_iers_warnings, 'Never saw IERS warning'
def test_regression_4996():
# this part is the actual regression test
deltat = np.linspace(-12, 12, 1000)*u.hour
times = Time('2012-7-13 00:00:00') + deltat
suncoo = get_sun(times)
assert suncoo.shape == (len(times),)
# and this is an additional test to make sure more complex arrays work
times2 = Time('2012-7-13 00:00:00') + deltat.reshape(10, 20, 5)
suncoo2 = get_sun(times2)
assert suncoo2.shape == times2.shape
# this is intentionally not allclose - they should be *exactly* the same
assert np.all(suncoo.ra.ravel() == suncoo2.ra.ravel())
def test_regression_4293():
"""Really just an extra test on FK4 no e, after finding that the units
were not always taken correctly. This test is against explicitly doing
the transformations on pp170 of Explanatory Supplement to the Astronomical
Almanac (Seidelmann, 2005).
See https://github.com/astropy/astropy/pull/4293#issuecomment-234973086
"""
# Check all over sky, but avoiding poles (note that FK4 did not ignore
# e terms within 10∘ of the poles... see p170 of explan.supp.).
ra, dec = np.meshgrid(np.arange(0, 359, 45), np.arange(-80, 81, 40))
fk4 = FK4(ra.ravel() * u.deg, dec.ravel() * u.deg)
Dc = -0.065838*u.arcsec
Dd = +0.335299*u.arcsec
# Dc * tan(obliquity), as given on p.170
Dctano = -0.028553*u.arcsec
fk4noe_dec = (fk4.dec - (Dd*np.cos(fk4.ra) -
Dc*np.sin(fk4.ra))*np.sin(fk4.dec) -
Dctano*np.cos(fk4.dec))
fk4noe_ra = fk4.ra - (Dc*np.cos(fk4.ra) +
Dd*np.sin(fk4.ra)) / np.cos(fk4.dec)
fk4noe = fk4.transform_to(FK4NoETerms)
# Tolerance here just set to how well the coordinates match, which is much
# better than the claimed accuracy of <1 mas for this first-order in
# v_earth/c approximation.
# Interestingly, if one divides by np.cos(fk4noe_dec) in the ra correction,
# the match becomes good to 2 μas.
assert_quantity_allclose(fk4noe.ra, fk4noe_ra, atol=11.*u.uas, rtol=0)
assert_quantity_allclose(fk4noe.dec, fk4noe_dec, atol=3.*u.uas, rtol=0)
def test_regression_4926():
times = Time('2010-01-1') + np.arange(20)*u.day
green = get_builtin_sites()['greenwich']
# this is the regression test
moon = get_moon(times, green)
# this is an additional test to make sure the GCRS->ICRS transform works for complex shapes
moon.transform_to(ICRS())
# and some others to increase coverage of transforms
moon.transform_to(HCRS(obstime="J2000"))
moon.transform_to(HCRS(obstime=times))
def test_regression_5209():
"check that distances are not lost on SkyCoord init"
time = Time('2015-01-01')
moon = get_moon(time)
new_coord = SkyCoord([moon])
assert_quantity_allclose(new_coord[0].distance, moon.distance)
def test_regression_5133():
N = 1000
np.random.seed(12345)
lon = np.random.uniform(-10, 10, N) * u.deg
lat = np.random.uniform(50, 52, N) * u.deg
alt = np.random.uniform(0, 10., N) * u.km
time = Time('2010-1-1')
objects = EarthLocation.from_geodetic(lon, lat, height=alt)
itrs_coo = objects.get_itrs(time)
homes = [EarthLocation.from_geodetic(lon=-1 * u.deg, lat=52 * u.deg, height=h)
for h in (0, 1000, 10000)*u.km]
altaz_frames = [AltAz(obstime=time, location=h) for h in homes]
altaz_coos = [itrs_coo.transform_to(f) for f in altaz_frames]
# they should all be different
for coo in altaz_coos[1:]:
assert not quantity_allclose(coo.az, coo.az[0])
assert not quantity_allclose(coo.alt, coo.alt[0])
def test_itrs_vals_5133():
time = Time('2010-1-1')
el = EarthLocation.from_geodetic(lon=20*u.deg, lat=45*u.deg, height=0*u.km)
lons = [20, 30, 20]*u.deg
lats = [44, 45, 45]*u.deg
alts = [0, 0, 10]*u.km
coos = [EarthLocation.from_geodetic(lon, lat, height=alt).get_itrs(time)
for lon, lat, alt in zip(lons, lats, alts)]
aaf = AltAz(obstime=time, location=el)
aacs = [coo.transform_to(aaf) for coo in coos]
assert all([coo.isscalar for coo in aacs])
# the ~1 arcsec tolerance is b/c aberration makes it not exact
assert_quantity_allclose(aacs[0].az, 180*u.deg, atol=1*u.arcsec)
assert aacs[0].alt < 0*u.deg
assert aacs[0].distance > 50*u.km
# it should *not* actually be 90 degrees, b/c constant latitude is not
# straight east anywhere except the equator... but should be close-ish
assert_quantity_allclose(aacs[1].az, 90*u.deg, atol=5*u.deg)
assert aacs[1].alt < 0*u.deg
assert aacs[1].distance > 50*u.km
assert_quantity_allclose(aacs[2].alt, 90*u.deg, atol=1*u.arcsec)
assert_quantity_allclose(aacs[2].distance, 10*u.km)
def test_regression_simple_5133():
t = Time('J2010')
obj = EarthLocation(-1*u.deg, 52*u.deg, height=[100., 0.]*u.km)
home = EarthLocation(-1*u.deg, 52*u.deg, height=10.*u.km)
aa = obj.get_itrs(t).transform_to(AltAz(obstime=t, location=home))
# az is more-or-less undefined for straight up or down
assert_quantity_allclose(aa.alt, [90, -90]*u.deg, rtol=1e-5)
assert_quantity_allclose(aa.distance, [90, 10]*u.km)
def test_regression_5743():
sc = SkyCoord([5, 10], [20, 30], unit=u.deg,
obstime=['2017-01-01T00:00', '2017-01-01T00:10'])
assert sc[0].obstime.shape == tuple()
def test_regression_5889_5890():
# ensure we can represent all Representations and transform to ND frames
greenwich = EarthLocation(
*u.Quantity([3980608.90246817, -102.47522911, 4966861.27310067],
unit=u.m))
times = Time("2017-03-20T12:00:00") + np.linspace(-2, 2, 3)*u.hour
moon = get_moon(times, location=greenwich)
targets = SkyCoord([350.7*u.deg, 260.7*u.deg], [18.4*u.deg, 22.4*u.deg])
targs2d = targets[:, np.newaxis]
targs2d.transform_to(moon)
def test_regression_6236():
# sunpy changes its representation upon initialisation of a frame,
# including via `realize_frame`. Ensure this works.
class MyFrame(BaseCoordinateFrame):
default_representation = CartesianRepresentation
my_attr = QuantityAttribute(default=0, unit=u.m)
class MySpecialFrame(MyFrame):
def __init__(self, *args, **kwargs):
_rep_kwarg = kwargs.get('representation', None)
super().__init__(*args, **kwargs)
if not _rep_kwarg:
self.representation = self.default_representation
self._data = self.data.represent_as(self.representation)
rep1 = UnitSphericalRepresentation([0., 1]*u.deg, [2., 3.]*u.deg)
rep2 = SphericalRepresentation([10., 11]*u.deg, [12., 13.]*u.deg,
[14., 15.]*u.kpc)
mf1 = MyFrame(rep1, my_attr=1.*u.km)
mf2 = mf1.realize_frame(rep2)
# Normally, data is stored as is, but the representation gets set to a
# default, even if a different representation instance was passed in.
# realize_frame should do the same. Just in case, check attrs are passed.
assert mf1.data is rep1
assert mf2.data is rep2
assert mf1.representation is CartesianRepresentation
assert mf2.representation is CartesianRepresentation
assert mf2.my_attr == mf1.my_attr
# It should be independent of whether I set the reprensentation explicitly
mf3 = MyFrame(rep1, my_attr=1.*u.km, representation='unitspherical')
mf4 = mf3.realize_frame(rep2)
assert mf3.data is rep1
assert mf4.data is rep2
assert mf3.representation is UnitSphericalRepresentation
assert mf4.representation is CartesianRepresentation
assert mf4.my_attr == mf3.my_attr
# This should be enough to help sunpy, but just to be sure, a test
# even closer to what is done there, i.e., transform the representation.
msf1 = MySpecialFrame(rep1, my_attr=1.*u.km)
msf2 = msf1.realize_frame(rep2)
assert msf1.data is not rep1 # Gets transformed to Cartesian.
assert msf2.data is not rep2
assert type(msf1.data) is CartesianRepresentation
assert type(msf2.data) is CartesianRepresentation
assert msf1.representation is CartesianRepresentation
assert msf2.representation is CartesianRepresentation
assert msf2.my_attr == msf1.my_attr
# And finally a test where the input is not transformed.
msf3 = MySpecialFrame(rep1, my_attr=1.*u.km,
representation='unitspherical')
msf4 = msf3.realize_frame(rep2)
assert msf3.data is rep1
assert msf4.data is not rep2
assert msf3.representation is UnitSphericalRepresentation
assert msf4.representation is CartesianRepresentation
assert msf4.my_attr == msf3.my_attr
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_6347():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg)
sc2 = SkyCoord([1.1, 2.1]*u.deg, [3.1, 4.1]*u.deg)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_sky(sc2, 10*u.arcmin)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_sky(sc2, 1*u.arcmin)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_sky(sc2, 10*u.arcmin)
assert len(d2d_10) == 2
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
@pytest.mark.skipif(not HAS_SCIPY, reason='No Scipy')
@pytest.mark.skipif(OLDER_SCIPY, reason='Scipy too old')
def test_regression_6347_3d():
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5, 6]*u.kpc)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, [5.1, 6.1]*u.kpc)
sc0 = sc1[:0]
idx1_10, idx2_10, d2d_10, d3d_10 = sc1.search_around_3d(sc2, 500*u.pc)
idx1_1, idx2_1, d2d_1, d3d_1 = sc1.search_around_3d(sc2, 50*u.pc)
idx1_0, idx2_0, d2d_0, d3d_0 = sc0.search_around_3d(sc2, 500*u.pc)
assert len(d2d_10) > 0
assert len(d2d_0) == 0
assert type(d2d_0) is type(d2d_10)
assert len(d2d_1) == 0
assert type(d2d_1) is type(d2d_10)
def test_regression_6300():
"""Check that importing old frame attribute names from astropy.coordinates
still works. See comments at end of #6300
"""
from ...utils.exceptions import AstropyDeprecationWarning
from .. import CartesianRepresentation
from .. import (TimeFrameAttribute, QuantityFrameAttribute,
CartesianRepresentationFrameAttribute)
with catch_warnings() as found_warnings:
attr = TimeFrameAttribute(default=Time("J2000"))
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
with catch_warnings() as found_warnings:
attr = QuantityFrameAttribute(default=5*u.km)
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
with catch_warnings() as found_warnings:
attr = CartesianRepresentationFrameAttribute(
default=CartesianRepresentation([5,6,7]*u.kpc))
for w in found_warnings:
if issubclass(w.category, AstropyDeprecationWarning):
break
else:
assert False, "Deprecation warning not raised"
def test_gcrs_itrs_cartesian_repr():
# issue 6436: transformation failed if coordinate representation was
# Cartesian
gcrs = GCRS(CartesianRepresentation((859.07256, -4137.20368, 5295.56871),
unit='km'), representation='cartesian')
gcrs.transform_to(ITRS)
@pytest.mark.skipif('not HAS_YAML')
def test_regression_6446():
# this succeeds even before 6446:
sc1 = SkyCoord([1, 2], [3, 4], unit='deg')
t1 = Table([sc1])
sio1 = io.StringIO()
t1.write(sio1, format='ascii.ecsv')
# but this fails due to the 6446 bug
c1 = SkyCoord(1, 3, unit='deg')
c2 = SkyCoord(2, 4, unit='deg')
sc2 = SkyCoord([c1, c2])
t2 = Table([sc2])
sio2 = io.StringIO()
t2.write(sio2, format='ascii.ecsv')
assert sio1.getvalue() == sio2.getvalue()
def test_regression_6448():
"""
This tests the more narrow problem reported in 6446 that 6448 is meant to
fix. `test_regression_6446` also covers this, but this test is provided
so that this is still tested even if YAML isn't installed.
"""
sc1 = SkyCoord([1, 2], [3, 4], unit='deg')
# this should always succeed even prior to 6448
assert sc1.galcen_v_sun is None
c1 = SkyCoord(1, 3, unit='deg')
c2 = SkyCoord(2, 4, unit='deg')
sc2 = SkyCoord([c1, c2])
# without 6448 this fails
assert sc2.galcen_v_sun is None
def test_regression_6597():
frame_name = 'galactic'
c1 = SkyCoord(1, 3, unit='deg', frame=frame_name)
c2 = SkyCoord(2, 4, unit='deg', frame=frame_name)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame_name
def test_regression_6597_2():
"""
This tests the more subtle flaw that #6597 indirectly uncovered: that even
in the case that the frames are ra/dec, they still might be the wrong *kind*
"""
frame = FK4(equinox='J1949')
c1 = SkyCoord(1, 3, unit='deg', frame=frame)
c2 = SkyCoord(2, 4, unit='deg', frame=frame)
sc1 = SkyCoord([c1, c2])
assert sc1.frame.name == frame.name
def test_regression_6697():
"""
Test for regression of a bug in get_gcrs_posvel that introduced errors at the 1m/s level.
Comparison data is derived from calculation in PINT
https://github.com/nanograv/PINT/blob/master/pint/erfautils.py
"""
pint_vels = CartesianRepresentation(*(348.63632871, -212.31704928, -0.60154936), unit=u.m/u.s)
location = EarthLocation(*(5327448.9957829, -1718665.73869569, 3051566.90295403), unit=u.m)
t = Time(2458036.161966612, format='jd', scale='utc')
obsgeopos, obsgeovel = location.get_gcrs_posvel(t)
delta = (obsgeovel-pint_vels).norm()
assert delta < 1*u.cm/u.s
|
f60bdb4e58d83ed086a1b1c3edf80b7dab02a6551d8323973abeb698d38f1048 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import testing as npt
from ...tests.helper import assert_quantity_allclose as assert_allclose
from ... import units as u
from ...utils import minversion
from .. import matching
"""
These are the tests for coordinate matching.
Note that this requires scipy.
"""
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False):
OLDER_SCIPY = False
else:
OLDER_SCIPY = True
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching_function():
from .. import ICRS
from ..matching import match_coordinates_3d
# this only uses match_coordinates_3d because that's the actual implementation
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
npt.assert_array_almost_equal(d2d.degree, [0, 0.1])
assert d3d.value[0] == 0
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog, nthneighbor=2)
assert np.all(idx == 2)
npt.assert_array_almost_equal(d2d.degree, [1, 0.9])
npt.assert_array_less(d3d.value, 0.02)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching_function_3d_and_sky():
from .. import ICRS
from ..matching import match_coordinates_3d, match_coordinates_sky
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc)
idx, d2d, d3d = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx, [2, 3])
assert_allclose(d2d, [1, 1.9] * u.deg)
assert np.abs(d3d[0].to_value(u.kpc) - np.radians(1)) < 1e-6
assert np.abs(d3d[1].to_value(u.kpc) - 5*np.radians(1.9)) < 1e-5
idx, d2d, d3d = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx, [3, 1])
assert_allclose(d2d, [0, 0.1] * u.deg)
assert_allclose(d3d, [4, 4.0000019] * u.kpc)
@pytest.mark.parametrize('functocheck, args, defaultkdtname, bothsaved',
[(matching.match_coordinates_3d, [], 'kdtree_3d', False),
(matching.match_coordinates_sky, [], 'kdtree_sky', False),
(matching.search_around_3d, [1*u.kpc], 'kdtree_3d', True),
(matching.search_around_sky, [1*u.deg], 'kdtree_sky', False)
])
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_kdtree_storage(functocheck, args, defaultkdtname, bothsaved):
from .. import ICRS
def make_scs():
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc)
return cmatch, ccatalog
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=False)
assert 'kdtree' not in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args)
assert defaultkdtname in ccatalog.cache
assert 'kdtree' not in ccatalog.cache
cmatch, ccatalog = make_scs()
functocheck(cmatch, ccatalog, *args, storekdtree=True)
assert 'kdtree' in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
cmatch, ccatalog = make_scs()
assert 'tislit_cheese' not in ccatalog.cache
functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese')
assert 'tislit_cheese' in ccatalog.cache
assert defaultkdtname not in ccatalog.cache
assert 'kdtree' not in ccatalog.cache
if bothsaved:
assert 'tislit_cheese' in cmatch.cache
assert defaultkdtname not in cmatch.cache
assert 'kdtree' not in cmatch.cache
else:
assert 'tislit_cheese' not in cmatch.cache
# now a bit of a hacky trick to make sure it at least tries to *use* it
ccatalog.cache['tislit_cheese'] = 1
cmatch.cache['tislit_cheese'] = 1
with pytest.raises(TypeError) as e:
functocheck(cmatch, ccatalog, *args, storekdtree='tislit_cheese')
assert 'KD' in e.value.args[0]
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_python_kdtree(monkeypatch):
from .. import ICRS
cmatch = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 2]*u.kpc)
ccatalog = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 2, 3, 4]*u.kpc)
monkeypatch.delattr("scipy.spatial.cKDTree")
matching.match_coordinates_sky(cmatch, ccatalog)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_matching_method():
from .. import ICRS, SkyCoord
from ...utils import NumpyRNGContext
from ..matching import match_coordinates_3d, match_coordinates_sky
with NumpyRNGContext(987654321):
cmatch = ICRS(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
ccatalog = ICRS(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_3d(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_3d(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
# should be the same as above because there's no distance, but just make sure this method works
idx1, d2d1, d3d1 = SkyCoord(cmatch).match_to_catalog_sky(ccatalog)
idx2, d2d2, d3d2 = match_coordinates_sky(cmatch, ccatalog)
npt.assert_array_equal(idx1, idx2)
assert_allclose(d2d1, d2d2)
assert_allclose(d3d1, d3d2)
assert len(idx1) == len(d2d1) == len(d3d1) == 20
@pytest.mark.skipif(str('not HAS_SCIPY'))
@pytest.mark.skipif(str('OLDER_SCIPY'))
def test_search_around():
from .. import ICRS, SkyCoord
from ..matching import search_around_sky, search_around_3d
coo1 = ICRS([4, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
coo2 = ICRS([1, 2, 3, 4]*u.degree, [0, 0, 0, 0]*u.degree, distance=[1, 1, 1, 5] * u.kpc)
idx1_1deg, idx2_1deg, d2d_1deg, d3d_1deg = search_around_sky(coo1, coo2, 1.01*u.deg)
idx1_0p05deg, idx2_0p05deg, d2d_0p05deg, d3d_0p05deg = search_around_sky(coo1, coo2, 0.05*u.deg)
assert list(zip(idx1_1deg, idx2_1deg)) == [(0, 2), (0, 3), (1, 1), (1, 2)]
assert d2d_1deg[0] == 1.0*u.deg
assert_allclose(d2d_1deg, [1, 0, .1, .9]*u.deg)
assert list(zip(idx1_0p05deg, idx2_0p05deg)) == [(0, 3)]
idx1_1kpc, idx2_1kpc, d2d_1kpc, d3d_1kpc = search_around_3d(coo1, coo2, 1*u.kpc)
idx1_sm, idx2_sm, d2d_sm, d3d_sm = search_around_3d(coo1, coo2, 0.05*u.kpc)
assert list(zip(idx1_1kpc, idx2_1kpc)) == [(0, 0), (0, 1), (0, 2), (1, 3)]
assert list(zip(idx1_sm, idx2_sm)) == [(0, 1), (0, 2)]
assert_allclose(d2d_sm, [2, 1]*u.deg)
# Test for the non-matches, #4877
coo1 = ICRS([4.1, 2.1]*u.degree, [0, 0]*u.degree, distance=[1, 5] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(coo1, coo2, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, coo2, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test when one or both of the coordinate arrays is empty, #4875
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, coo2, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_sky(coo1, empty, 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
empty = ICRS(ra=[] * u.degree, dec=[] * u.degree, distance=[] * u.kpc)
idx1, idx2, d2d, d3d = search_around_sky(empty, empty[:], 1*u.arcsec)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, coo2, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(coo1, empty, 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
idx1, idx2, d2d, d3d = search_around_3d(empty, empty[:], 1*u.m)
assert idx1.size == idx2.size == d2d.size == d3d.size == 0
assert idx1.dtype == idx2.dtype == np.int
assert d2d.unit == u.deg
assert d3d.unit == u.kpc
# Test that input without distance units results in a
# 'dimensionless_unscaled' unit
cempty = SkyCoord(ra=[], dec=[], unit=u.deg)
idx1, idx2, d2d, d3d = search_around_3d(cempty, cempty[:], 1*u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
idx1, idx2, d2d, d3d = search_around_sky(cempty, cempty[:], 1*u.m)
assert d2d.unit == u.deg
assert d3d.unit == u.dimensionless_unscaled
@pytest.mark.skipif(str('not HAS_SCIPY'))
@pytest.mark.skipif(str('OLDER_SCIPY'))
def test_search_around_scalar():
from astropy.coordinates import SkyCoord, Angle
cat = SkyCoord([1, 2, 3], [-30, 45, 8], unit="deg")
target = SkyCoord('1.1 -30.1', unit="deg")
with pytest.raises(ValueError) as excinfo:
cat.search_around_sky(target, Angle('2d'))
# make sure the error message is *specific* to search_around_sky rather than
# generic as reported in #3359
assert 'search_around_sky' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
cat.search_around_3d(target, Angle('2d'))
assert 'search_around_3d' in str(excinfo.value)
@pytest.mark.skipif(str('not HAS_SCIPY'))
@pytest.mark.skipif(str('OLDER_SCIPY'))
def test_match_catalog_empty():
from astropy.coordinates import SkyCoord
sc1 = SkyCoord(1, 2, unit="deg")
cat0 = SkyCoord([], [], unit="deg")
cat1 = SkyCoord([1.1], [2.1], unit="deg")
cat2 = SkyCoord([1.1, 3], [2.1, 5], unit="deg")
sc1.match_to_catalog_sky(cat2)
sc1.match_to_catalog_3d(cat2)
sc1.match_to_catalog_sky(cat1)
sc1.match_to_catalog_3d(cat1)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat1[0])
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat1[0])
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_sky(cat0)
assert 'catalog' in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
sc1.match_to_catalog_3d(cat0)
assert 'catalog' in str(excinfo.value)
|
73955cc34d94799b0cdc862e62c6047efe686caebbc1e79733b7544c5ae7e241 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from copy import deepcopy
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from ...tests.helper import (assert_quantity_allclose as
assert_allclose_quantity, catch_warnings)
from ...utils import isiterable
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyDeprecationWarning
from ..angles import Longitude, Latitude, Angle
from ..distances import Distance
from ..representation import (REPRESENTATION_CLASSES,
DIFFERENTIAL_CLASSES,
BaseRepresentation,
SphericalRepresentation,
UnitSphericalRepresentation,
SphericalCosLatDifferential,
CartesianRepresentation,
CylindricalRepresentation,
PhysicsSphericalRepresentation,
CartesianDifferential,
SphericalDifferential,
_combine_xyz)
# Preserve the original REPRESENTATION_CLASSES dict so that importing
# the test file doesn't add a persistent test subclass (LogDRepresentation)
def setup_function(func):
func.REPRESENTATION_CLASSES_ORIG = deepcopy(REPRESENTATION_CLASSES)
def teardown_function(func):
REPRESENTATION_CLASSES.clear()
REPRESENTATION_CLASSES.update(func.REPRESENTATION_CLASSES_ORIG)
class TestSphericalRepresentation:
def test_name(self):
assert SphericalRepresentation.get_name() == 'spherical'
assert SphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = SphericalRepresentation()
def test_init_quantity(self):
s3 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert s3.distance == 10 * u.kpc
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
assert isinstance(s3.distance, Distance)
def test_init_lonlat(self):
s2 = SphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg),
Distance(10, u.kpc))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert s2.distance == 10. * u.kpc
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
assert isinstance(s2.distance, Distance)
# also test that wrap_angle is preserved
s3 = SphericalRepresentation(Longitude(-90, u.degree,
wrap_angle=180*u.degree),
Latitude(-45, u.degree),
Distance(1., u.Rsun))
assert s3.lon == -90. * u.degree
assert s3.lon.wrap_angle == 180 * u.degree
def test_init_array(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert_allclose(s1.distance.kpc, [1, 2])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
assert isinstance(s1.distance, Distance)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
distance = Distance([1, 2] * u.kpc)
s1 = SphericalRepresentation(lon=lon, lat=lat, distance=distance, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
distance[:] = [8, 9] * u.Mpc
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
assert_allclose_quantity(distance, s1.distance)
def test_init_float32_array(self):
"""Regression test against #2983"""
lon = Longitude(np.float32([1., 2.]), u.degree)
lat = Latitude(np.float32([3., 4.]), u.degree)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
assert s1.lon.dtype == np.float32
assert s1.lat.dtype == np.float32
assert s1._values['lon'].dtype == np.float32
assert s1._values['lat'].dtype == np.float32
def test_reprobj(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg, distance=10 * u.kpc)
s2 = SphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
assert_allclose_quantity(s2.distance, 10 * u.kpc)
def test_broadcasting(self):
s1 = SphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg,
distance=10 * u.kpc)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
assert_allclose_quantity(s1.distance, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = SphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg,
distance=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters lon, lat, and distance cannot be broadcast"
def test_readonly(self):
s1 = SphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg,
distance=1. * u.kpc)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
with pytest.raises(AttributeError):
s1.distance = 1. * u.kpc
def test_getitem_len_iterable(self):
s = SphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg,
distance=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.distance, [1, 1, 1] * u.kpc)
assert len(s) == 10
assert isiterable(s)
def test_getitem_len_iterable_scalar(self):
s = SphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg,
distance=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
with pytest.raises(TypeError):
len(s)
assert not isiterable(s)
class TestUnitSphericalRepresentation:
def test_name(self):
assert UnitSphericalRepresentation.get_name() == 'unitspherical'
assert UnitSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = UnitSphericalRepresentation()
def test_init_quantity(self):
s3 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
assert s3.lon == 8. * u.hourangle
assert s3.lat == 5. * u.deg
assert isinstance(s3.lon, Longitude)
assert isinstance(s3.lat, Latitude)
def test_init_lonlat(self):
s2 = UnitSphericalRepresentation(Longitude(8, u.hour),
Latitude(5, u.deg))
assert s2.lon == 8. * u.hourangle
assert s2.lat == 5. * u.deg
assert isinstance(s2.lon, Longitude)
assert isinstance(s2.lat, Latitude)
def test_init_array(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose(s1.lon.degree, [120, 135])
assert_allclose(s1.lat.degree, [5, 6])
assert isinstance(s1.lon, Longitude)
assert isinstance(s1.lat, Latitude)
def test_init_array_nocopy(self):
lon = Longitude([8, 9] * u.hourangle)
lat = Latitude([5, 6] * u.deg)
s1 = UnitSphericalRepresentation(lon=lon, lat=lat, copy=False)
lon[:] = [1, 2] * u.rad
lat[:] = [3, 4] * u.arcmin
assert_allclose_quantity(lon, s1.lon)
assert_allclose_quantity(lat, s1.lat)
def test_reprobj(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle, lat=5 * u.deg)
s2 = UnitSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.lon, 8. * u.hourangle)
assert_allclose_quantity(s2.lat, 5. * u.deg)
def test_broadcasting(self):
s1 = UnitSphericalRepresentation(lon=[8, 9] * u.hourangle,
lat=[5, 6] * u.deg)
assert_allclose_quantity(s1.lon, [120, 135] * u.degree)
assert_allclose_quantity(s1.lat, [5, 6] * u.degree)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = UnitSphericalRepresentation(lon=[8, 9, 10] * u.hourangle,
lat=[5, 6] * u.deg)
assert exc.value.args[0] == "Input parameters lon and lat cannot be broadcast"
def test_readonly(self):
s1 = UnitSphericalRepresentation(lon=8 * u.hourangle,
lat=5 * u.deg)
with pytest.raises(AttributeError):
s1.lon = 1. * u.deg
with pytest.raises(AttributeError):
s1.lat = 1. * u.deg
def test_getitem(self):
s = UnitSphericalRepresentation(lon=np.arange(10) * u.deg,
lat=-np.arange(10) * u.deg)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.lon, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.lat, [-2, -4, -6] * u.deg)
def test_getitem_scalar(self):
s = UnitSphericalRepresentation(lon=1 * u.deg,
lat=-2 * u.deg)
with pytest.raises(TypeError):
s_slc = s[0]
class TestPhysicsSphericalRepresentation:
def test_name(self):
assert PhysicsSphericalRepresentation.get_name() == 'physicsspherical'
assert PhysicsSphericalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = PhysicsSphericalRepresentation()
def test_init_quantity(self):
s3 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
assert s3.phi == 8. * u.hourangle
assert s3.theta == 5. * u.deg
assert s3.r == 10 * u.kpc
assert isinstance(s3.phi, Angle)
assert isinstance(s3.theta, Angle)
assert isinstance(s3.r, Distance)
def test_init_phitheta(self):
s2 = PhysicsSphericalRepresentation(Angle(8, u.hour),
Angle(5, u.deg),
Distance(10, u.kpc))
assert s2.phi == 8. * u.hourangle
assert s2.theta == 5. * u.deg
assert s2.r == 10. * u.kpc
assert isinstance(s2.phi, Angle)
assert isinstance(s2.theta, Angle)
assert isinstance(s2.r, Distance)
def test_init_array(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert_allclose(s1.phi.degree, [120, 135])
assert_allclose(s1.theta.degree, [5, 6])
assert_allclose(s1.r.kpc, [1, 2])
assert isinstance(s1.phi, Angle)
assert isinstance(s1.theta, Angle)
assert isinstance(s1.r, Distance)
def test_init_array_nocopy(self):
phi = Angle([8, 9] * u.hourangle)
theta = Angle([5, 6] * u.deg)
r = Distance([1, 2] * u.kpc)
s1 = PhysicsSphericalRepresentation(phi=phi, theta=theta, r=r, copy=False)
phi[:] = [1, 2] * u.rad
theta[:] = [3, 4] * u.arcmin
r[:] = [8, 9] * u.Mpc
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(theta, s1.theta)
assert_allclose_quantity(r, s1.r)
def test_reprobj(self):
s1 = PhysicsSphericalRepresentation(phi=8 * u.hourangle, theta=5 * u.deg, r=10 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
assert_allclose_quantity(s2.phi, 8. * u.hourangle)
assert_allclose_quantity(s2.theta, 5. * u.deg)
assert_allclose_quantity(s2.r, 10 * u.kpc)
def test_broadcasting(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=10 * u.kpc)
assert_allclose_quantity(s1.phi, [120, 135] * u.degree)
assert_allclose_quantity(s1.theta, [5, 6] * u.degree)
assert_allclose_quantity(s1.r, [10, 10] * u.kpc)
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = PhysicsSphericalRepresentation(phi=[8, 9, 10] * u.hourangle,
theta=[5, 6] * u.deg,
r=[1, 2] * u.kpc)
assert exc.value.args[0] == "Input parameters phi, theta, and r cannot be broadcast"
def test_readonly(self):
s1 = PhysicsSphericalRepresentation(phi=[8, 9] * u.hourangle,
theta=[5, 6] * u.deg,
r=[10, 20] * u.kpc)
with pytest.raises(AttributeError):
s1.phi = 1. * u.deg
with pytest.raises(AttributeError):
s1.theta = 1. * u.deg
with pytest.raises(AttributeError):
s1.r = 1. * u.kpc
def test_getitem(self):
s = PhysicsSphericalRepresentation(phi=np.arange(10) * u.deg,
theta=np.arange(5, 15) * u.deg,
r=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.phi, [2, 4, 6] * u.deg)
assert_allclose_quantity(s_slc.theta, [7, 9, 11] * u.deg)
assert_allclose_quantity(s_slc.r, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = PhysicsSphericalRepresentation(phi=1 * u.deg,
theta=2 * u.deg,
r=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
class TestCartesianRepresentation:
def test_name(self):
assert CartesianRepresentation.get_name() == 'cartesian'
assert CartesianRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CartesianRepresentation()
def test_init_quantity(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_singleunit(self):
s1 = CartesianRepresentation(x=1, y=2, z=3, unit=u.kpc)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc,
y=[2, 3, 4] * u.Mpc,
z=[3, 4, 5] * u.kpc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.Mpc
assert s1.z.unit is u.kpc
assert_allclose(s1.x.value, [1, 2, 3])
assert_allclose(s1.y.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_one_array(self):
s1 = CartesianRepresentation(x=[1, 2, 3] * u.pc)
assert s1.x.unit is u.pc
assert s1.y.unit is u.pc
assert s1.z.unit is u.pc
assert_allclose(s1.x.value, 1)
assert_allclose(s1.y.value, 2)
assert_allclose(s1.z.value, 3)
r = np.arange(27.).reshape(3, 3, 3) * u.kpc
s2 = CartesianRepresentation(r, xyz_axis=0)
assert s2.shape == (3, 3)
assert s2.x.unit == u.kpc
assert np.all(s2.x == r[0])
assert np.all(s2.xyz == r)
assert np.all(s2.get_xyz(xyz_axis=0) == r)
s3 = CartesianRepresentation(r, xyz_axis=1)
assert s3.shape == (3, 3)
assert np.all(s3.x == r[:, 0])
assert np.all(s3.y == r[:, 1])
assert np.all(s3.z == r[:, 2])
assert np.all(s3.get_xyz(xyz_axis=1) == r)
s4 = CartesianRepresentation(r, xyz_axis=2)
assert s4.shape == (3, 3)
assert np.all(s4.x == r[:, :, 0])
assert np.all(s4.get_xyz(xyz_axis=2) == r)
s5 = CartesianRepresentation(r, unit=u.pc)
assert s5.x.unit == u.pc
assert np.all(s5.xyz == r)
s6 = CartesianRepresentation(r.value, unit=u.pc, xyz_axis=2)
assert s6.x.unit == u.pc
assert np.all(s6.get_xyz(xyz_axis=2).value == r.value)
def test_init_one_array_size_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc)
assert exc.value.args[0].startswith("too many values to unpack")
def test_init_xyz_but_more_than_one_array_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3] * u.pc, y=[2, 3, 4] * u.pc,
z=[3, 4, 5] * u.pc, xyz_axis=0)
assert 'xyz_axis should only be set' in str(exc)
def test_init_one_array_yz_fail(self):
with pytest.raises(ValueError) as exc:
CartesianRepresentation(x=[1, 2, 3, 4] * u.pc, y=[1, 2] * u.pc)
assert exc.value.args[0] == ("x, y, and z are required to instantiate "
"CartesianRepresentation")
def test_init_array_nocopy(self):
x = [8, 9, 10] * u.pc
y = [5, 6, 7] * u.Mpc
z = [2, 3, 4] * u.kpc
s1 = CartesianRepresentation(x=x, y=y, z=z, copy=False)
x[:] = [1, 2, 3] * u.kpc
y[:] = [9, 9, 8] * u.kpc
z[:] = [1, 2, 1] * u.kpc
assert_allclose_quantity(x, s1.x)
assert_allclose_quantity(y, s1.y)
assert_allclose_quantity(z, s1.z)
def test_xyz_is_view_if_possible(self):
xyz = np.arange(1., 10.).reshape(3, 3)
s1 = CartesianRepresentation(xyz, unit=u.kpc, copy=False)
s1_xyz = s1.xyz
assert s1_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s1.x[0] == 0.
assert s1_xyz.value[0, 0] == 0.
# Not possible: we don't check that tuples are from the same array
xyz = np.arange(1., 10.).reshape(3, 3)
s2 = CartesianRepresentation(*xyz, unit=u.kpc, copy=False)
s2_xyz = s2.xyz
assert s2_xyz.value[0, 0] == 1.
xyz[0, 0] = 0.
assert s2.x[0] == 0.
assert s2_xyz.value[0, 0] == 1.
def test_reprobj(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
s2 = CartesianRepresentation.from_representation(s1)
assert s2.x == 1 * u.kpc
assert s2.y == 2 * u.kpc
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=5 * u.kpc)
assert s1.x.unit == u.kpc
assert s1.y.unit == u.kpc
assert s1.z.unit == u.kpc
assert_allclose(s1.x.value, [1, 2])
assert_allclose(s1.y.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters x, y, and z cannot be broadcast"
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.x = 1. * u.kpc
with pytest.raises(AttributeError):
s1.y = 1. * u.kpc
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def test_xyz(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert isinstance(s1.xyz, u.Quantity)
assert s1.xyz.unit is u.kpc
assert_allclose(s1.xyz.value, [1, 2, 3])
def test_unit_mismatch(self):
q_len = u.Quantity([1], u.km)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_nonlen, y=q_len, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_nonlen, z=q_len)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CartesianRepresentation(x=q_len, y=q_len, z=q_nonlen)
assert exc.value.args[0] == "x, y, and z should have matching physical types"
def test_unit_non_length(self):
s1 = CartesianRepresentation(x=1 * u.kg, y=2 * u.kg, z=3 * u.kg)
s2 = CartesianRepresentation(x=1 * u.km / u.s, y=2 * u.km / u.s, z=3 * u.km / u.s)
banana = u.def_unit('banana')
s3 = CartesianRepresentation(x=1 * banana, y=2 * banana, z=3 * banana)
def test_getitem(self):
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
def test_getitem_scalar(self):
s = CartesianRepresentation(x=1 * u.m,
y=-2 * u.m,
z=3 * u.km)
with pytest.raises(TypeError):
s_slc = s[0]
def test_transform(self):
s1 = CartesianRepresentation(x=[1, 2] * u.kpc, y=[3, 4] * u.kpc, z=[5, 6] * u.kpc)
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
s2 = s1.transform(matrix)
assert_allclose(s2.x.value, [1 * 1 + 2 * 3 + 3 * 5, 1 * 2 + 2 * 4 + 3 * 6])
assert_allclose(s2.y.value, [4 * 1 + 5 * 3 + 6 * 5, 4 * 2 + 5 * 4 + 6 * 6])
assert_allclose(s2.z.value, [7 * 1 + 8 * 3 + 9 * 5, 7 * 2 + 8 * 4 + 9 * 6])
assert s2.x.unit is u.kpc
assert s2.y.unit is u.kpc
assert s2.z.unit is u.kpc
class TestCylindricalRepresentation:
def test_name(self):
assert CylindricalRepresentation.get_name() == 'cylindrical'
assert CylindricalRepresentation.get_name() in REPRESENTATION_CLASSES
def test_empty_init(self):
with pytest.raises(TypeError) as exc:
s = CylindricalRepresentation()
def test_init_quantity(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
assert s1.rho.unit is u.kpc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, 1)
assert_allclose(s1.phi.value, 2)
assert_allclose(s1.z.value, 3)
def test_init_array(self):
s1 = CylindricalRepresentation(rho=[1, 2, 3] * u.pc,
phi=[2, 3, 4] * u.deg,
z=[3, 4, 5] * u.kpc)
assert s1.rho.unit is u.pc
assert s1.phi.unit is u.deg
assert s1.z.unit is u.kpc
assert_allclose(s1.rho.value, [1, 2, 3])
assert_allclose(s1.phi.value, [2, 3, 4])
assert_allclose(s1.z.value, [3, 4, 5])
def test_init_array_nocopy(self):
rho = [8, 9, 10] * u.pc
phi = [5, 6, 7] * u.deg
z = [2, 3, 4] * u.kpc
s1 = CylindricalRepresentation(rho=rho, phi=phi, z=z, copy=False)
rho[:] = [9, 2, 3] * u.kpc
phi[:] = [1, 2, 3] * u.arcmin
z[:] = [-2, 3, 8] * u.kpc
assert_allclose_quantity(rho, s1.rho)
assert_allclose_quantity(phi, s1.phi)
assert_allclose_quantity(z, s1.z)
def test_reprobj(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc, phi=2 * u.deg, z=3 * u.kpc)
s2 = CylindricalRepresentation.from_representation(s1)
assert s2.rho == 1 * u.kpc
assert s2.phi == 2 * u.deg
assert s2.z == 3 * u.kpc
def test_broadcasting(self):
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=5 * u.kpc)
assert s1.rho.unit == u.kpc
assert s1.phi.unit == u.deg
assert s1.z.unit == u.kpc
assert_allclose(s1.rho.value, [1, 2])
assert_allclose(s1.phi.value, [3, 4])
assert_allclose(s1.z.value, [5, 5])
def test_broadcasting_mismatch(self):
with pytest.raises(ValueError) as exc:
s1 = CylindricalRepresentation(rho=[1, 2] * u.kpc, phi=[3, 4] * u.deg, z=[5, 6, 7] * u.kpc)
assert exc.value.args[0] == "Input parameters rho, phi, and z cannot be broadcast"
def test_readonly(self):
s1 = CylindricalRepresentation(rho=1 * u.kpc,
phi=20 * u.deg,
z=3 * u.kpc)
with pytest.raises(AttributeError):
s1.rho = 1. * u.kpc
with pytest.raises(AttributeError):
s1.phi = 20 * u.deg
with pytest.raises(AttributeError):
s1.z = 1. * u.kpc
def unit_mismatch(self):
q_len = u.Quantity([1], u.kpc)
q_nonlen = u.Quantity([1], u.kg)
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_nonlen, phi=10 * u.deg, z=q_len)
assert exc.value.args[0] == "rho and z should have matching physical types"
with pytest.raises(u.UnitsError) as exc:
s1 = CylindricalRepresentation(rho=q_len, phi=10 * u.deg, z=q_nonlen)
assert exc.value.args[0] == "rho and z should have matching physical types"
def test_getitem(self):
s = CylindricalRepresentation(rho=np.arange(10) * u.pc,
phi=-np.arange(10) * u.deg,
z=1 * u.kpc)
s_slc = s[2:8:2]
assert_allclose_quantity(s_slc.rho, [2, 4, 6] * u.pc)
assert_allclose_quantity(s_slc.phi, [-2, -4, -6] * u.deg)
assert_allclose_quantity(s_slc.z, [1, 1, 1] * u.kpc)
def test_getitem_scalar(self):
s = CylindricalRepresentation(rho=1 * u.pc,
phi=-2 * u.deg,
z=3 * u.kpc)
with pytest.raises(TypeError):
s_slc = s[0]
def test_cartesian_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = SphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = SphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.lon, s4.lon)
assert_allclose_quantity(s2.lat, s4.lat)
assert_allclose_quantity(s2.distance, s4.distance)
def test_cartesian_physics_spherical_roundtrip():
s1 = CartesianRepresentation(x=[1, 2000.] * u.kpc,
y=[3000., 4.] * u.pc,
z=[5., 6000.] * u.pc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
def test_spherical_physics_spherical_roundtrip():
s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc)
s2 = PhysicsSphericalRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = PhysicsSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s3.lon)
assert_allclose_quantity(s1.lat, s3.lat)
assert_allclose_quantity(s1.distance, s3.distance)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.theta, s4.theta)
assert_allclose_quantity(s2.r, s4.r)
assert_allclose_quantity(s1.lon, s4.phi)
assert_allclose_quantity(s1.lat, 90. * u.deg - s4.theta)
assert_allclose_quantity(s1.distance, s4.r)
def test_cartesian_cylindrical_roundtrip():
s1 = CartesianRepresentation(x=np.array([1., 2000.]) * u.kpc,
y=np.array([3000., 4.]) * u.pc,
z=np.array([5., 600.]) * u.cm)
s2 = CylindricalRepresentation.from_representation(s1)
s3 = CartesianRepresentation.from_representation(s2)
s4 = CylindricalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.x, s3.x)
assert_allclose_quantity(s1.y, s3.y)
assert_allclose_quantity(s1.z, s3.z)
assert_allclose_quantity(s2.rho, s4.rho)
assert_allclose_quantity(s2.phi, s4.phi)
assert_allclose_quantity(s2.z, s4.z)
def test_unit_spherical_roundtrip():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = CartesianRepresentation.from_representation(s1)
s3 = SphericalRepresentation.from_representation(s2)
s4 = UnitSphericalRepresentation.from_representation(s3)
assert_allclose_quantity(s1.lon, s4.lon)
assert_allclose_quantity(s1.lat, s4.lat)
def test_no_unnecessary_copies():
s1 = UnitSphericalRepresentation(lon=[10., 30.] * u.deg,
lat=[5., 6.] * u.arcmin)
s2 = s1.represent_as(UnitSphericalRepresentation)
assert s2 is s1
assert np.may_share_memory(s1.lon, s2.lon)
assert np.may_share_memory(s1.lat, s2.lat)
s3 = s1.represent_as(SphericalRepresentation)
assert np.may_share_memory(s1.lon, s3.lon)
assert np.may_share_memory(s1.lat, s3.lat)
s4 = s1.represent_as(CartesianRepresentation)
s5 = s4.represent_as(CylindricalRepresentation)
assert np.may_share_memory(s5.z, s4.z)
def test_representation_repr():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert repr(r1) == ('<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 2.5, 1.' if NUMPY_LT_1_14
else '1., 2.5, 1.')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert repr(r2) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' ({})>').format(' 1., 2., 3.' if NUMPY_LT_1_14
else '1., 2., 3.')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
if NUMPY_LT_1_14:
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)]>')
else:
assert repr(r3) == ('<CartesianRepresentation (x, y, z) in kpc\n'
' [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>')
def test_representation_repr_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
if NUMPY_LT_1_14:
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n'
' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n'
' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]]>')
else:
assert repr(cr) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>')
# This was broken before.
if NUMPY_LT_1_14:
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n'
' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n'
' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]]>')
else:
assert repr(cr.T) == (
'<CartesianRepresentation (x, y, z) in m\n'
' [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>')
def test_representation_str():
r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc)
assert str(r1) == ('( 1., 2.5, 1.) (deg, deg, kpc)' if NUMPY_LT_1_14 else
'(1., 2.5, 1.) (deg, deg, kpc)')
r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
assert str(r2) == ('( 1., 2., 3.) kpc' if NUMPY_LT_1_14 else
'(1., 2., 3.) kpc')
r3 = CartesianRepresentation(x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc)
assert str(r3) == ('[( 1., 4., 9.), ( 2., 4., 10.), ( 3., 4., 11.)] kpc'
if NUMPY_LT_1_14 else
'[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc')
def test_representation_str_multi_d():
"""Regression test for #5889."""
cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit='m')
if NUMPY_LT_1_14:
assert str(cr) == (
'[[( 0., 9., 18.), ( 1., 10., 19.), ( 2., 11., 20.)],\n'
' [( 3., 12., 21.), ( 4., 13., 22.), ( 5., 14., 23.)],\n'
' [( 6., 15., 24.), ( 7., 16., 25.), ( 8., 17., 26.)]] m')
else:
assert str(cr) == (
'[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n'
' [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n'
' [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m')
# This was broken before.
if NUMPY_LT_1_14:
assert str(cr.T) == (
'[[( 0., 9., 18.), ( 3., 12., 21.), ( 6., 15., 24.)],\n'
' [( 1., 10., 19.), ( 4., 13., 22.), ( 7., 16., 25.)],\n'
' [( 2., 11., 20.), ( 5., 14., 23.), ( 8., 17., 26.)]] m')
else:
assert str(cr.T) == (
'[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n'
' [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n'
' [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m')
def test_subclass_representation():
from ..builtin_frames import ICRS
class Longitude180(Longitude):
def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs):
self = super().__new__(cls, angle, unit=unit, wrap_angle=wrap_angle,
**kwargs)
return self
class SphericalWrap180Representation(SphericalRepresentation):
attr_classes = OrderedDict([('lon', Longitude180),
('lat', Latitude),
('distance', u.Quantity)])
recommended_units = {'lon': u.deg, 'lat': u.deg}
class ICRSWrap180(ICRS):
frame_specific_representation_info = ICRS._frame_specific_representation_info.copy()
frame_specific_representation_info[SphericalWrap180Representation] = \
frame_specific_representation_info[SphericalRepresentation]
default_representation = SphericalWrap180Representation
c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m)
assert c.ra.value == -1
assert c.ra.unit is u.deg
assert c.dec.value == -2
assert c.dec.unit is u.deg
def test_minimal_subclass():
# Basically to check what we document works;
# see doc/coordinates/representations.rst
class LogDRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('logd', u.Dex)])
def to_cartesian(self):
d = self.logd.physical
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False)
ld1 = LogDRepresentation(90.*u.deg, 0.*u.deg, 1.*u.dex(u.kpc))
ld2 = LogDRepresentation(lon=90.*u.deg, lat=0.*u.deg, logd=1.*u.dex(u.kpc))
assert np.all(ld1.lon == ld2.lon)
assert np.all(ld1.lat == ld2.lat)
assert np.all(ld1.logd == ld2.logd)
c = ld1.to_cartesian()
assert_allclose_quantity(c.xyz, [0., 10., 0.] * u.kpc, atol=1.*u.npc)
ld3 = LogDRepresentation.from_cartesian(c)
assert np.all(ld3.lon == ld2.lon)
assert np.all(ld3.lat == ld2.lat)
assert np.all(ld3.logd == ld2.logd)
s = ld1.represent_as(SphericalRepresentation)
assert_allclose_quantity(s.lon, ld1.lon)
assert_allclose_quantity(s.distance, 10.*u.kpc)
assert_allclose_quantity(s.lat, ld1.lat)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), lon=1.*u.deg)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), True, False)
with pytest.raises(TypeError):
LogDRepresentation(0.*u.deg, 1.*u.deg, 1.*u.dex(u.kpc), foo='bar')
with pytest.raises(ValueError):
# check we cannot redefine an existing class.
class LogDRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('logr', u.Dex)])
def test_combine_xyz():
x, y, z = np.arange(27).reshape(3, 9) * u.kpc
xyz = _combine_xyz(x, y, z, xyz_axis=0)
assert xyz.shape == (3, 9)
assert np.all(xyz[0] == x)
assert np.all(xyz[1] == y)
assert np.all(xyz[2] == z)
x, y, z = np.arange(27).reshape(3, 3, 3) * u.kpc
xyz = _combine_xyz(x, y, z, xyz_axis=0)
assert xyz.ndim == 3
assert np.all(xyz[0] == x)
assert np.all(xyz[1] == y)
assert np.all(xyz[2] == z)
xyz = _combine_xyz(x, y, z, xyz_axis=1)
assert xyz.ndim == 3
assert np.all(xyz[:, 0] == x)
assert np.all(xyz[:, 1] == y)
assert np.all(xyz[:, 2] == z)
xyz = _combine_xyz(x, y, z, xyz_axis=-1)
assert xyz.ndim == 3
assert np.all(xyz[..., 0] == x)
assert np.all(xyz[..., 1] == y)
assert np.all(xyz[..., 2] == z)
class TestCartesianRepresentationWithDifferential:
def test_init_differential(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
# Check that a single differential gets turned into a 1-item dict.
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
assert s1.x.unit is u.kpc
assert s1.y.unit is u.kpc
assert s1.z.unit is u.kpc
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# can also pass in an explicit dictionary
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': diff})
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
# using the wrong key will cause it to fail
with pytest.raises(ValueError):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'1 / s2': diff})
# make sure other kwargs are handled properly
s1 = CartesianRepresentation(x=1, y=2, z=3,
differentials=diff, copy=False, unit=u.kpc)
assert len(s1.differentials) == 1
assert s1.differentials['s'] is diff
with pytest.raises(TypeError): # invalid type passed to differentials
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials='garmonbozia')
# make sure differentials can't accept differentials
with pytest.raises(TypeError):
CartesianDifferential(d_x=1 * u.km/u.s, d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s, differentials=diff)
def test_init_differential_compatible(self):
# TODO: more extensive checking of this
# should fail - representation and differential not compatible
diff = SphericalDifferential(d_lon=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
with pytest.raises(TypeError):
CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
def test_init_differential_multiple_equivalent_keys(self):
d1 = CartesianDifferential(*[1, 2, 3] * u.km/u.s)
d2 = CartesianDifferential(*[4, 5, 6] * u.km/u.s)
# verify that the check against expected_unit validates against passing
# in two different but equivalent keys
with pytest.raises(ValueError):
r1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials={'s': d1, 'yr': d2})
def test_init_array_broadcasting(self):
arr1 = np.arange(8).reshape(4, 2) * u.km/u.s
diff = CartesianDifferential(d_x=arr1, d_y=arr1, d_z=arr1)
# shapes aren't compatible
arr2 = np.arange(27).reshape(3, 9) * u.kpc
with pytest.raises(ValueError):
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
arr2 = np.arange(8).reshape(4, 2) * u.kpc
rep = CartesianRepresentation(x=arr2, y=arr2, z=arr2,
differentials=diff)
assert rep.x.unit is u.kpc
assert rep.y.unit is u.kpc
assert rep.z.unit is u.kpc
assert len(rep.differentials) == 1
assert rep.differentials['s'] is diff
assert rep.xyz.shape == rep.differentials['s'].d_xyz.shape
def test_reprobj(self):
# should succeed - representation and differential are compatible
diff = SphericalCosLatDifferential(d_lon_coslat=1 * u.mas/u.yr,
d_lat=2 * u.mas/u.yr,
d_distance=3 * u.km/u.s)
r1 = SphericalRepresentation(lon=15*u.deg, lat=21*u.deg,
distance=1*u.pc,
differentials=diff)
r2 = CartesianRepresentation.from_representation(r1)
assert r2.get_name() == 'cartesian'
assert not r2.differentials
def test_readonly(self):
s1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc)
with pytest.raises(AttributeError): # attribute is not settable
s1.differentials = 'thing'
def test_represent_as(self):
diff = CartesianDifferential(d_x=1 * u.km/u.s,
d_y=2 * u.km/u.s,
d_z=3 * u.km/u.s)
rep1 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc,
differentials=diff)
# Only change the representation, drop the differential
new_rep = rep1.represent_as(SphericalRepresentation)
assert new_rep.get_name() == 'spherical'
assert not new_rep.differentials # dropped
# Pass in separate classes for representation, differential
new_rep = rep1.represent_as(SphericalRepresentation,
SphericalCosLatDifferential)
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# Pass in a dictionary for the differential classes
new_rep = rep1.represent_as(SphericalRepresentation,
{'s': SphericalCosLatDifferential})
assert new_rep.get_name() == 'spherical'
assert new_rep.differentials['s'].get_name() == 'sphericalcoslat'
# make sure represent_as() passes through the differentials
for name in REPRESENTATION_CLASSES:
if name == 'radial':
# TODO: Converting a CartesianDifferential to a
# RadialDifferential fails, even on `master`
continue
new_rep = rep1.represent_as(REPRESENTATION_CLASSES[name],
DIFFERENTIAL_CLASSES[name])
assert new_rep.get_name() == name
assert len(new_rep.differentials) == 1
assert new_rep.differentials['s'].get_name() == name
with pytest.raises(ValueError) as excinfo:
rep1.represent_as('name')
assert 'use frame object' in str(excinfo.value)
def test_getitem(self):
d = CartesianDifferential(d_x=np.arange(10) * u.m/u.s,
d_y=-np.arange(10) * u.m/u.s,
d_z=1. * u.m/u.s)
s = CartesianRepresentation(x=np.arange(10) * u.m,
y=-np.arange(10) * u.m,
z=3 * u.km,
differentials=d)
s_slc = s[2:8:2]
s_dif = s_slc.differentials['s']
assert_allclose_quantity(s_slc.x, [2, 4, 6] * u.m)
assert_allclose_quantity(s_slc.y, [-2, -4, -6] * u.m)
assert_allclose_quantity(s_slc.z, [3, 3, 3] * u.km)
assert_allclose_quantity(s_dif.d_x, [2, 4, 6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_y, [-2, -4, -6] * u.m/u.s)
assert_allclose_quantity(s_dif.d_z, [1, 1, 1] * u.m/u.s)
def test_transform(self):
d1 = CartesianDifferential(d_x=[1, 2] * u.km/u.s,
d_y=[3, 4] * u.km/u.s,
d_z=[5, 6] * u.km/u.s)
r1 = CartesianRepresentation(x=[1, 2] * u.kpc,
y=[3, 4] * u.kpc,
z=[5, 6] * u.kpc,
differentials=d1)
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
r2 = r1.transform(matrix)
d2 = r2.differentials['s']
assert_allclose_quantity(d2.d_x, [22., 28]*u.km/u.s)
assert_allclose_quantity(d2.d_y, [49, 64]*u.km/u.s)
assert_allclose_quantity(d2.d_z, [76, 100.]*u.km/u.s)
def test_with_differentials(self):
# make sure with_differential correctly creates a new copy with the same
# differential
cr = CartesianRepresentation([1, 2, 3]*u.kpc)
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr2 = cr.with_differentials(diff)
assert cr.differentials != cr2.differentials
assert cr2.differentials['s'] is diff
# make sure it works even if a differential is present already
diff2 = CartesianDifferential([.1, .2, .3]*u.m/u.s)
cr3 = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
cr4 = cr3.with_differentials(diff2)
assert cr4.differentials['s'] != cr3.differentials['s']
assert cr4.differentials['s'] == diff2
# also ensure a *scalar* differential will works
cr5 = cr.with_differentials(diff)
assert len(cr5.differentials) == 1
assert cr5.differentials['s'] == diff
# make sure we don't update the original representation's dict
d1 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s)
d2 = CartesianDifferential(*np.random.random((3, 5)), unit=u.km/u.s**2)
r1 = CartesianRepresentation(*np.random.random((3, 5)), unit=u.pc,
differentials=d1)
r2 = r1.with_differentials(d2)
assert r1.differentials['s'] is r2.differentials['s']
assert 's2' not in r1.differentials
assert 's2' in r2.differentials
def test_repr_with_differentials():
diff = CartesianDifferential([.1, .2, .3]*u.km/u.s)
cr = CartesianRepresentation([1, 2, 3]*u.kpc, differentials=diff)
assert "has differentials w.r.t.: 's'" in repr(cr)
def test_to_cartesian():
"""
Test that to_cartesian drops the differential.
"""
sd = SphericalDifferential(d_lat=1*u.deg, d_lon=2*u.deg, d_distance=10*u.m)
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m,
differentials=sd)
cart = sr.to_cartesian()
assert cart.get_name() == 'cartesian'
assert not cart.differentials
def test_recommended_units_deprecation():
sr = SphericalRepresentation(lat=1*u.deg, lon=2*u.deg, distance=10*u.m)
with catch_warnings(AstropyDeprecationWarning) as w:
sr.recommended_units
assert 'recommended_units' in str(w[0].message)
with catch_warnings(AstropyDeprecationWarning) as w:
class MyClass(SphericalRepresentation):
attr_classes = SphericalRepresentation.attr_classes
recommended_units = {}
assert 'recommended_units' in str(w[0].message)
@pytest.fixture
def unitphysics():
"""
This fixture is used
"""
had_unit = False
if hasattr(PhysicsSphericalRepresentation, '_unit_representation'):
orig = PhysicsSphericalRepresentation._unit_representation
had_unit = True
class UnitPhysicsSphericalRepresentation(BaseRepresentation):
attr_classes = OrderedDict([('phi', Angle),
('theta', Angle)])
def __init__(self, phi, theta, differentials=None, copy=True):
super().__init__(phi, theta, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {0}'.format(theta.to(u.degree)))
@property
def phi(self):
return self._phi
@property
def theta(self):
return self._theta
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return OrderedDict(
(('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)),
('theta', CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False))))
def scale_factors(self):
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('phi', sintheta),
('theta', l)))
def to_cartesian(self):
x = np.sin(self.theta) * np.cos(self.phi)
y = np.sin(self.theta) * np.sin(self.phi)
z = np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, copy=False)
def norm(self):
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
PhysicsSphericalRepresentation._unit_representation = UnitPhysicsSphericalRepresentation
yield UnitPhysicsSphericalRepresentation
if had_unit:
PhysicsSphericalRepresentation._unit_representation = orig
else:
del PhysicsSphericalRepresentation._unit_representation
# remove from the module-level representations, if present
REPRESENTATION_CLASSES.pop(UnitPhysicsSphericalRepresentation.get_name(), None)
def test_unitphysics(unitphysics):
obj = unitphysics(phi=0*u.deg, theta=10*u.deg)
objkw = unitphysics(phi=0*u.deg, theta=10*u.deg)
assert objkw.phi == obj.phi
assert objkw.theta == obj.theta
asphys = obj.represent_as(PhysicsSphericalRepresentation)
assert asphys.phi == obj.phi
assert asphys.theta == obj.theta
assert_allclose_quantity(asphys.r, 1*u.dimensionless_unscaled)
assph = obj.represent_as(SphericalRepresentation)
assert assph.lon == obj.phi
assert assph.lat == 80*u.deg
assert_allclose_quantity(assph.distance, 1*u.dimensionless_unscaled)
|
d75d7d59a3e15e246d9911f387ee8fd4043122c90f414c244c1d5c4eefecba6d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for GCRS coordinate transformations, primarily to/from AltAz.
"""
import pytest
import numpy as np
from ... import units as u
from ...tests.helper import (assert_quantity_allclose as assert_allclose)
from ...time import Time
from .. import (EarthLocation, get_sun, ICRS, GCRS, CIRS, ITRS, AltAz,
PrecessedGeocentric, CartesianRepresentation, SkyCoord,
SphericalRepresentation, UnitSphericalRepresentation,
HCRS, HeliocentricTrueEcliptic)
from ..._erfa import epv00
from .utils import randomly_sample_sphere
from ..builtin_frames.utils import get_jd12
from .. import solar_system_ephemeris
from ...units import allclose
try:
import jplephem # pylint: disable=W0611
except ImportError:
HAS_JPLEPHEM = False
else:
HAS_JPLEPHEM = True
def test_icrs_cirs():
"""
Check a few cases of ICRS<->CIRS for consistency.
Also includes the CIRS<->CIRS transforms at different times, as those go
through ICRS
"""
ra, dec, dist = randomly_sample_sphere(200)
inod = ICRS(ra=ra, dec=dec)
iwd = ICRS(ra=ra, dec=dec, distance=dist*u.pc)
cframe1 = CIRS()
cirsnod = inod.transform_to(cframe1) # uses the default time
# first do a round-tripping test
inod2 = cirsnod.transform_to(ICRS)
assert_allclose(inod.ra, inod2.ra)
assert_allclose(inod.dec, inod2.dec)
# now check that a different time yields different answers
cframe2 = CIRS(obstime=Time('J2005', scale='utc'))
cirsnod2 = inod.transform_to(cframe2)
assert not allclose(cirsnod.ra, cirsnod2.ra, rtol=1e-8)
assert not allclose(cirsnod.dec, cirsnod2.dec, rtol=1e-8)
# parallax effects should be included, so with and w/o distance should be different
cirswd = iwd.transform_to(cframe1)
assert not allclose(cirswd.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirswd.dec, cirsnod.dec, rtol=1e-8)
# and the distance should transform at least somehow
assert not allclose(cirswd.distance, iwd.distance, rtol=1e-8)
# now check that the cirs self-transform works as expected
cirsnod3 = cirsnod.transform_to(cframe1) # should be a no-op
assert_allclose(cirsnod.ra, cirsnod3.ra)
assert_allclose(cirsnod.dec, cirsnod3.dec)
cirsnod4 = cirsnod.transform_to(cframe2) # should be different
assert not allclose(cirsnod4.ra, cirsnod.ra, rtol=1e-8)
assert not allclose(cirsnod4.dec, cirsnod.dec, rtol=1e-8)
cirsnod5 = cirsnod4.transform_to(cframe1) # should be back to the same
assert_allclose(cirsnod.ra, cirsnod5.ra)
assert_allclose(cirsnod.dec, cirsnod5.dec)
ra, dec, dist = randomly_sample_sphere(200)
icrs_coords = [ICRS(ra=ra, dec=dec), ICRS(ra=ra, dec=dec, distance=dist*u.pc)]
gcrs_frames = [GCRS(), GCRS(obstime=Time('J2005', scale='utc'))]
@pytest.mark.parametrize('icoo', icrs_coords)
def test_icrs_gcrs(icoo):
"""
Check ICRS<->GCRS for consistency
"""
gcrscoo = icoo.transform_to(gcrs_frames[0]) # uses the default time
# first do a round-tripping test
icoo2 = gcrscoo.transform_to(ICRS)
assert_allclose(icoo.distance, icoo2.distance)
assert_allclose(icoo.ra, icoo2.ra)
assert_allclose(icoo.dec, icoo2.dec)
assert isinstance(icoo2.data, icoo.data.__class__)
# now check that a different time yields different answers
gcrscoo2 = icoo.transform_to(gcrs_frames[1])
assert not allclose(gcrscoo.ra, gcrscoo2.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo.dec, gcrscoo2.dec, rtol=1e-8, atol=1e-10*u.deg)
# now check that the cirs self-transform works as expected
gcrscoo3 = gcrscoo.transform_to(gcrs_frames[0]) # should be a no-op
assert_allclose(gcrscoo.ra, gcrscoo3.ra)
assert_allclose(gcrscoo.dec, gcrscoo3.dec)
gcrscoo4 = gcrscoo.transform_to(gcrs_frames[1]) # should be different
assert not allclose(gcrscoo4.ra, gcrscoo.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo4.dec, gcrscoo.dec, rtol=1e-8, atol=1e-10*u.deg)
gcrscoo5 = gcrscoo4.transform_to(gcrs_frames[0]) # should be back to the same
assert_allclose(gcrscoo.ra, gcrscoo5.ra, rtol=1e-8, atol=1e-10*u.deg)
assert_allclose(gcrscoo.dec, gcrscoo5.dec, rtol=1e-8, atol=1e-10*u.deg)
# also make sure that a GCRS with a different geoloc/geovel gets a different answer
# roughly a moon-like frame
gframe3 = GCRS(obsgeoloc=[385000., 0, 0]*u.km, obsgeovel=[1, 0, 0]*u.km/u.s)
gcrscoo6 = icoo.transform_to(gframe3) # should be different
assert not allclose(gcrscoo.ra, gcrscoo6.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrscoo.dec, gcrscoo6.dec, rtol=1e-8, atol=1e-10*u.deg)
icooviag3 = gcrscoo6.transform_to(ICRS) # and now back to the original
assert_allclose(icoo.ra, icooviag3.ra)
assert_allclose(icoo.dec, icooviag3.dec)
@pytest.mark.parametrize('gframe', gcrs_frames)
def test_icrs_gcrs_dist_diff(gframe):
"""
Check that with and without distance give different ICRS<->GCRS answers
"""
gcrsnod = icrs_coords[0].transform_to(gframe)
gcrswd = icrs_coords[1].transform_to(gframe)
# parallax effects should be included, so with and w/o distance should be different
assert not allclose(gcrswd.ra, gcrsnod.ra, rtol=1e-8, atol=1e-10*u.deg)
assert not allclose(gcrswd.dec, gcrsnod.dec, rtol=1e-8, atol=1e-10*u.deg)
# and the distance should transform at least somehow
assert not allclose(gcrswd.distance, icrs_coords[1].distance, rtol=1e-8,
atol=1e-10*u.pc)
def test_cirs_to_altaz():
"""
Check the basic CIRS<->AltAz transforms. More thorough checks implicitly
happen in `test_iau_fullstack`
"""
from .. import EarthLocation
ra, dec, dist = randomly_sample_sphere(200)
cirs = CIRS(ra=ra, dec=dec, obstime='J2000')
crepr = SphericalRepresentation(lon=ra, lat=dec, distance=dist)
cirscart = CIRS(crepr, obstime=cirs.obstime, representation=CartesianRepresentation)
loc = EarthLocation(lat=0*u.deg, lon=0*u.deg, height=0*u.m)
altazframe = AltAz(location=loc, obstime=Time('J2005'))
cirs2 = cirs.transform_to(altazframe).transform_to(cirs)
cirs3 = cirscart.transform_to(altazframe).transform_to(cirs)
# check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert_allclose(cirs.ra, cirs3.ra)
assert_allclose(cirs.dec, cirs3.dec)
def test_gcrs_itrs():
"""
Check basic GCRS<->ITRS transforms for round-tripping.
"""
ra, dec, _ = randomly_sample_sphere(200)
gcrs = GCRS(ra=ra, dec=dec, obstime='J2000')
gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006')
gcrs2 = gcrs.transform_to(ITRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(ITRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
assert not allclose(gcrs.ra, gcrs6_2.ra)
assert not allclose(gcrs.dec, gcrs6_2.dec)
# also try with the cartesian representation
gcrsc = gcrs.realize_frame(gcrs.data)
gcrsc.representation = CartesianRepresentation
gcrsc2 = gcrsc.transform_to(ITRS).transform_to(gcrsc)
assert_allclose(gcrsc.spherical.lon.deg, gcrsc2.ra.deg)
assert_allclose(gcrsc.spherical.lat, gcrsc2.dec)
def test_cirs_itrs():
"""
Check basic CIRS<->ITRS transforms for round-tripping.
"""
ra, dec, _ = randomly_sample_sphere(200)
cirs = CIRS(ra=ra, dec=dec, obstime='J2000')
cirs6 = CIRS(ra=ra, dec=dec, obstime='J2006')
cirs2 = cirs.transform_to(ITRS).transform_to(cirs)
cirs6_2 = cirs6.transform_to(ITRS).transform_to(cirs) # different obstime
# just check round-tripping
assert_allclose(cirs.ra, cirs2.ra)
assert_allclose(cirs.dec, cirs2.dec)
assert not allclose(cirs.ra, cirs6_2.ra)
assert not allclose(cirs.dec, cirs6_2.dec)
def test_gcrs_cirs():
"""
Check GCRS<->CIRS transforms for round-tripping. More complicated than the
above two because it's multi-hop
"""
ra, dec, _ = randomly_sample_sphere(200)
gcrs = GCRS(ra=ra, dec=dec, obstime='J2000')
gcrs6 = GCRS(ra=ra, dec=dec, obstime='J2006')
gcrs2 = gcrs.transform_to(CIRS).transform_to(gcrs)
gcrs6_2 = gcrs6.transform_to(CIRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs2.ra)
assert_allclose(gcrs.dec, gcrs2.dec)
assert not allclose(gcrs.ra, gcrs6_2.ra)
assert not allclose(gcrs.dec, gcrs6_2.dec)
# now try explicit intermediate pathways and ensure they're all consistent
gcrs3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(ITRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs3.ra)
assert_allclose(gcrs.dec, gcrs3.dec)
gcrs4 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(ICRS).transform_to(gcrs)
assert_allclose(gcrs.ra, gcrs4.ra)
assert_allclose(gcrs.dec, gcrs4.dec)
def test_gcrs_altaz():
"""
Check GCRS<->AltAz transforms for round-tripping. Has multiple paths
"""
from .. import EarthLocation
ra, dec, _ = randomly_sample_sphere(1)
gcrs = GCRS(ra=ra[0], dec=dec[0], obstime='J2000')
# check array times sure N-d arrays work
times = Time(np.linspace(2456293.25, 2456657.25, 51) * u.day,
format='jd', scale='utc')
loc = EarthLocation(lon=10 * u.deg, lat=80. * u.deg)
aaframe = AltAz(obstime=times, location=loc)
aa1 = gcrs.transform_to(aaframe)
aa2 = gcrs.transform_to(ICRS).transform_to(CIRS).transform_to(aaframe)
aa3 = gcrs.transform_to(ITRS).transform_to(CIRS).transform_to(aaframe)
# make sure they're all consistent
assert_allclose(aa1.alt, aa2.alt)
assert_allclose(aa1.az, aa2.az)
assert_allclose(aa1.alt, aa3.alt)
assert_allclose(aa1.az, aa3.az)
def test_precessed_geocentric():
assert PrecessedGeocentric().equinox.jd == Time('J2000', scale='utc').jd
gcrs_coo = GCRS(180*u.deg, 2*u.deg, distance=10000*u.km)
pgeo_coo = gcrs_coo.transform_to(PrecessedGeocentric)
assert np.abs(gcrs_coo.ra - pgeo_coo.ra) > 10*u.marcsec
assert np.abs(gcrs_coo.dec - pgeo_coo.dec) > 10*u.marcsec
assert_allclose(gcrs_coo.distance, pgeo_coo.distance)
gcrs_roundtrip = pgeo_coo.transform_to(GCRS)
assert_allclose(gcrs_coo.ra, gcrs_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs_roundtrip.distance)
pgeo_coo2 = gcrs_coo.transform_to(PrecessedGeocentric(equinox='B1850'))
assert np.abs(gcrs_coo.ra - pgeo_coo2.ra) > 1.5*u.deg
assert np.abs(gcrs_coo.dec - pgeo_coo2.dec) > 0.5*u.deg
assert_allclose(gcrs_coo.distance, pgeo_coo2.distance)
gcrs2_roundtrip = pgeo_coo2.transform_to(GCRS)
assert_allclose(gcrs_coo.ra, gcrs2_roundtrip.ra)
assert_allclose(gcrs_coo.dec, gcrs2_roundtrip.dec)
assert_allclose(gcrs_coo.distance, gcrs2_roundtrip.distance)
# shared by parametrized tests below. Some use the whole AltAz, others use just obstime
totest_frames = [AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('J2000')), # J2000 is often a default so this might work when others don't
AltAz(location=EarthLocation(120*u.deg, -35*u.deg),
obstime=Time('J2000')),
AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('2014-01-01 00:00:00')),
AltAz(location=EarthLocation(-90*u.deg, 65*u.deg),
obstime=Time('2014-08-01 08:00:00')),
AltAz(location=EarthLocation(120*u.deg, -35*u.deg),
obstime=Time('2014-01-01 00:00:00'))
]
MOONDIST = 385000*u.km # approximate moon semi-major orbit axis of moon
MOONDIST_CART = CartesianRepresentation(3**-0.5*MOONDIST, 3**-0.5*MOONDIST, 3**-0.5*MOONDIST)
EARTHECC = 0.017 + 0.005 # roughly earth orbital eccentricity, but with an added tolerance
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_sunish(testframe):
"""
Sanity-check that the sun is at a reasonable distance from any altaz
"""
sun = get_sun(testframe.obstime)
assert sun.frame.name == 'gcrs'
# the .to(u.au) is not necessary, it just makes the asserts on failure more readable
assert (EARTHECC - 1)*u.au < sun.distance.to(u.au) < (EARTHECC + 1)*u.au
sunaa = sun.transform_to(testframe)
assert (EARTHECC - 1)*u.au < sunaa.distance.to(u.au) < (EARTHECC + 1)*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a GCRS->AltAz transformation
"""
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.au) < 7000*u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
# also should add checks that the alt/az are different for different earth locations
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_altaz_bothroutes(testframe):
"""
Repeat of both the moonish and sunish tests above to make sure the two
routes through the coordinate graph are consistent with each other
"""
sun = get_sun(testframe.obstime)
sunaa_viaicrs = sun.transform_to(ICRS).transform_to(testframe)
sunaa_viaitrs = sun.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe)
moon = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa_viaicrs = moon.transform_to(ICRS).transform_to(testframe)
moonaa_viaitrs = moon.transform_to(ITRS(obstime=testframe.obstime)).transform_to(testframe)
assert_allclose(sunaa_viaicrs.cartesian.xyz, sunaa_viaitrs.cartesian.xyz)
assert_allclose(moonaa_viaicrs.cartesian.xyz, moonaa_viaitrs.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_altaz_moonish(testframe):
"""
Sanity-check that an object resembling the moon goes to the right place with
a CIRS<->AltAz transformation
"""
moon = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonaa = moon.transform_to(testframe)
assert 1000*u.km < np.abs(moonaa.distance - moon.distance).to(u.km) < 7000*u.km
# now check that it round-trips
moon2 = moonaa.transform_to(moon)
assert_allclose(moon.cartesian.xyz, moon2.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_altaz_nodist(testframe):
"""
Check that a UnitSphericalRepresentation coordinate round-trips for the
CIRS<->AltAz transformation.
"""
coo0 = CIRS(UnitSphericalRepresentation(10*u.deg, 20*u.deg), obstime=testframe.obstime)
# check that it round-trips
coo1 = coo0.transform_to(testframe).transform_to(coo0)
assert_allclose(coo0.cartesian.xyz, coo1.cartesian.xyz)
@pytest.mark.parametrize('testframe', totest_frames)
def test_cirs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from CIRS
"""
moonish = CIRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS)
assert 0.97*u.au < moonicrs.distance < 1.03*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_gcrs_icrs_moonish(testframe):
"""
check that something like the moon goes to about the right distance from the
ICRS origin when starting from GCRS
"""
moonish = GCRS(MOONDIST_CART, obstime=testframe.obstime)
moonicrs = moonish.transform_to(ICRS)
assert 0.97*u.au < moonicrs.distance < 1.03*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_icrs_gcrscirs_sunish(testframe):
"""
check that the ICRS barycenter goes to about the right distance from various
~geocentric frames (other than testframe)
"""
# slight offset to avoid divide-by-zero errors
icrs = ICRS(0*u.deg, 0*u.deg, distance=10*u.km)
gcrs = icrs.transform_to(GCRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < gcrs.distance.to(u.au) < (EARTHECC + 1)*u.au
cirs = icrs.transform_to(CIRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < cirs.distance.to(u.au) < (EARTHECC + 1)*u.au
itrs = icrs.transform_to(ITRS(obstime=testframe.obstime))
assert (EARTHECC - 1)*u.au < itrs.spherical.distance.to(u.au) < (EARTHECC + 1)*u.au
@pytest.mark.parametrize('testframe', totest_frames)
def test_icrs_altaz_moonish(testframe):
"""
Check that something expressed in *ICRS* as being moon-like goes to the
right AltAz distance
"""
# we use epv00 instead of get_sun because get_sun includes aberration
earth_pv_helio, earth_pv_bary = epv00(*get_jd12(testframe.obstime, 'tdb'))
earth_icrs_xyz = earth_pv_bary[0]*u.au
moonoffset = [0, 0, MOONDIST.value]*MOONDIST.unit
moonish_icrs = ICRS(CartesianRepresentation(earth_icrs_xyz + moonoffset))
moonaa = moonish_icrs.transform_to(testframe)
# now check that the distance change is similar to earth radius
assert 1000*u.km < np.abs(moonaa.distance - MOONDIST).to(u.au) < 7000*u.km
def test_gcrs_self_transform_closeby():
"""
Tests GCRS self transform for objects which are nearby and thus
have reasonable parallax.
Moon positions were originally created using JPL DE432s ephemeris.
The two lunar positions (one geocentric, one at a defined location)
are created via a transformation from ICRS to two different GCRS frames.
We test that the GCRS-GCRS self transform can correctly map one GCRS
frame onto the other.
"""
t = Time("2014-12-25T07:00")
moon_geocentric = SkyCoord(GCRS(318.10579159*u.deg,
-11.65281165*u.deg,
365042.64880308*u.km, obstime=t))
# this is the location of the Moon as seen from La Palma
obsgeoloc = [-5592982.59658935, -63054.1948592, 3059763.90102216]*u.m
obsgeovel = [4.59798494, -407.84677071, 0.]*u.m/u.s
moon_lapalma = SkyCoord(GCRS(318.7048445*u.deg,
-11.98761996*u.deg,
369722.8231031*u.km,
obstime=t,
obsgeoloc=obsgeoloc,
obsgeovel=obsgeovel))
transformed = moon_geocentric.transform_to(moon_lapalma.frame)
delta = transformed.separation_3d(moon_lapalma)
assert_allclose(delta, 0.0*u.m, atol=1*u.m)
@pytest.mark.remote_data
@pytest.mark.skipif('not HAS_JPLEPHEM')
def test_ephemerides():
"""
We test that using different ephemerides gives very similar results
for transformations
"""
t = Time("2014-12-25T07:00")
moon = SkyCoord(GCRS(318.10579159*u.deg,
-11.65281165*u.deg,
365042.64880308*u.km, obstime=t))
icrs_frame = ICRS()
hcrs_frame = HCRS(obstime=t)
ecl_frame = HeliocentricTrueEcliptic(equinox=t)
cirs_frame = CIRS(obstime=t)
moon_icrs_builtin = moon.transform_to(icrs_frame)
moon_hcrs_builtin = moon.transform_to(hcrs_frame)
moon_helioecl_builtin = moon.transform_to(ecl_frame)
moon_cirs_builtin = moon.transform_to(cirs_frame)
with solar_system_ephemeris.set('jpl'):
moon_icrs_jpl = moon.transform_to(icrs_frame)
moon_hcrs_jpl = moon.transform_to(hcrs_frame)
moon_helioecl_jpl = moon.transform_to(ecl_frame)
moon_cirs_jpl = moon.transform_to(cirs_frame)
# most transformations should differ by an amount which is
# non-zero but of order milliarcsecs
sep_icrs = moon_icrs_builtin.separation(moon_icrs_jpl)
sep_hcrs = moon_hcrs_builtin.separation(moon_hcrs_jpl)
sep_helioecl = moon_helioecl_builtin.separation(moon_helioecl_jpl)
sep_cirs = moon_cirs_builtin.separation(moon_cirs_jpl)
assert_allclose([sep_icrs, sep_hcrs, sep_helioecl], 0.0*u.deg, atol=10*u.mas)
assert all(sep > 10*u.microarcsecond for sep in (sep_icrs, sep_hcrs, sep_helioecl))
# CIRS should be the same
assert_allclose(sep_cirs, 0.0*u.deg, atol=1*u.microarcsecond)
|
e4aabb07017518d790b71155c2b97bf0843292413225c7f969a35b20dc286066 |
import pytest
from ...tests.helper import assert_quantity_allclose
from ...units import allclose as quantity_allclose
from ... import units as u
from .. import Longitude, Latitude, EarthLocation
from ..sites import get_builtin_sites, get_downloaded_sites, SiteRegistry
def test_builtin_sites():
reg = get_builtin_sites()
greenwich = reg['greenwich']
lon, lat, el = greenwich.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = reg.names
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
@pytest.mark.remote_data(source='astropy')
def test_online_sites():
reg = get_downloaded_sites()
keck = reg['keck']
lon, lat, el = keck.to_geodetic()
assert_quantity_allclose(lon, -Longitude('155:28.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(lat, Latitude('19:49.7', unit=u.deg),
atol=0.001*u.deg)
assert_quantity_allclose(el, 4160*u.m, atol=1*u.m)
names = reg.names
assert 'keck' in names
assert 'ctio' in names
with pytest.raises(KeyError) as exc:
reg['nonexistent site']
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use the 'names' attribute to see available sites."
with pytest.raises(KeyError) as exc:
reg['kec']
assert exc.value.args[0] == "Site 'kec' not in database. Use the 'names' attribute to see available sites. Did you mean one of: 'keck'?'"
@pytest.mark.remote_data(source='astropy')
# this will *try* the online so we have to make it remote_data, even though it
# could fall back on the non-remote version
def test_EarthLocation_basic():
greenwichel = EarthLocation.of_site('greenwich')
lon, lat, el = greenwichel.to_geodetic()
assert_quantity_allclose(lon, Longitude('0:0:0', unit=u.deg),
atol=10*u.arcsec)
assert_quantity_allclose(lat, Latitude('51:28:40', unit=u.deg),
atol=1*u.arcsec)
assert_quantity_allclose(el, 46*u.m, atol=1*u.m)
names = EarthLocation.get_site_names()
assert 'greenwich' in names
assert 'example_site' in names
with pytest.raises(KeyError) as exc:
EarthLocation.of_site('nonexistent site')
assert exc.value.args[0] == "Site 'nonexistent site' not in database. Use EarthLocation.get_site_names to see available sites."
def test_EarthLocation_state_offline():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_builtin=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_builtin=True)
assert oldreg is not newreg
@pytest.mark.remote_data(source='astropy')
def test_EarthLocation_state_online():
EarthLocation._site_registry = None
EarthLocation._get_site_registry(force_download=True)
assert EarthLocation._site_registry is not None
oldreg = EarthLocation._site_registry
newreg = EarthLocation._get_site_registry()
assert oldreg is newreg
newreg = EarthLocation._get_site_registry(force_download=True)
assert oldreg is not newreg
def test_registry():
reg = SiteRegistry()
assert len(reg.names) == 0
names = ['sitea', 'site A']
loc = EarthLocation.from_geodetic(lat=1*u.deg, lon=2*u.deg, height=3*u.km)
reg.add_site(names, loc)
assert len(reg.names) == 2
loc1 = reg['SIteA']
assert loc1 is loc
loc2 = reg['sIte a']
assert loc2 is loc
def test_non_EarthLocation():
"""
A regression test for a typo bug pointed out at the bottom of
https://github.com/astropy/astropy/pull/4042
"""
class EarthLocation2(EarthLocation):
pass
# This lets keeps us from needing to do remote_data
# note that this does *not* mess up the registry for EarthLocation because
# registry is cached on a per-class basis
EarthLocation2._get_site_registry(force_builtin=True)
el2 = EarthLocation2.of_site('greenwich')
assert type(el2) is EarthLocation2
assert el2.info.name == 'Royal Observatory Greenwich'
def check_builtin_matches_remote(download_url=True):
"""
This function checks that the builtin sites registry is consistent with the
remote registry (or a registry at some other location).
Note that current this is *not* run by the testing suite (because it
doesn't start with "test", and is instead meant to be used as a check
before merging changes in astropy-data)
"""
builtin_registry = EarthLocation._get_site_registry(force_builtin=True)
dl_registry = EarthLocation._get_site_registry(force_download=download_url)
in_dl = {}
matches = {}
for name in builtin_registry.names:
in_dl[name] = name in dl_registry
if in_dl[name]:
matches[name] = quantity_allclose(builtin_registry[name], dl_registry[name])
else:
matches[name] = False
if not all(matches.values()):
# this makes sure we actually see which don't match
print("In builtin registry but not in download:")
for name in in_dl:
if not in_dl[name]:
print(' ', name)
print("In both but not the same value:")
for name in matches:
if not matches[name] and in_dl[name]:
print(' ', name, 'builtin:', builtin_registry[name], 'download:', dl_registry[name])
assert False, "Builtin and download registry aren't consistent - failures printed to stdout"
|
7fb30b11685f269e29c9397a601d8ad67e7f209b8831979ae5c16801e2b6c33d | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
import pytest
import numpy as np
import numpy.testing as npt
from ... import units as u
from ...tests.helper import (catch_warnings,
assert_quantity_allclose as assert_allclose)
from ..representation import REPRESENTATION_CLASSES
from ...coordinates import (ICRS, FK4, FK5, Galactic, SkyCoord, Angle,
SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation, AltAz,
BaseCoordinateFrame, Attribute,
frame_transform_graph, RepresentationMapping)
from ...coordinates import Latitude, EarthLocation
from ...time import Time
from ...utils import minversion, isiterable
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyDeprecationWarning
from ...units import allclose as quantity_allclose
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5)
J2001 = Time('J2001', scale='utc')
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1.)
return quantity_allclose(a, b, rtol, atol)
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False):
OLDER_SCIPY = False
else:
OLDER_SCIPY = True
def test_transform_to():
for frame in (FK5, FK5(equinox=Time('J1975.0')),
FK4, FK4(equinox=Time('J1975.0')),
SkyCoord(RA, DEC, 'fk4', equinox='J1980')):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame='icrs')
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, 'J1975.0'):
for obstime0 in (None, 'J1980.0'):
for equinox1 in (None, 'J1975.0'):
for obstime1 in (None, 'J1980.0'):
rt_sets.append((rt_frame0, rt_frame1,
equinox0, equinox1,
obstime0, obstime1))
rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1')
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {'equinox': equinox0, 'obstime': obstime0}
attrs1 = {'equinox': equinox1, 'obstime': obstime1}
# Remove None values
attrs0 = dict((k, v) for k, v in attrs0.items() if v is not None)
attrs1 = dict((k, v) for k, v in attrs1.items() if v is not None)
# Go out and back
sc = SkyCoord(frame0, RA, DEC, **attrs0)
# Keep only frame attributes for frame1
attrs1 = dict((attr, val) for attr, val in attrs1.items()
if attr in frame1.get_frame_attr_names())
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = dict((attr, val) for attr, val in attrs0.items()
if attr in frame0.get_frame_attr_names())
# also, if any are None, fill in with defaults
for attrnm in frame0.get_frame_attr_names():
if attrs0.get(attrnm, None) is None:
if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None:
if 'equinox' in attrs0:
attrs0[attrnm] = attrs0['equinox']
else:
attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian represenation input coordinates.
"""
sc = SkyCoord('1d 2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1d', '2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1°2′3″', '2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
sc = SkyCoord('1°2′3″ 2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
with pytest.raises(ValueError) as err:
SkyCoord('1d 2d 3d')
assert "Cannot parse first argument data" in str(err)
sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic')
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg),
np.array(['deg', 'deg'])):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ',
u.hourangle, [u.hourangle, u.hourangle]):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ('hourangle,deg', (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert 'Unit keyword must have one to three unit values' in str(err)
for unit in ('m', (u.m, u.deg), ''):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord([('1d', '2d'),
(1 * u.deg, 2 * u.deg),
'1d 2d',
('1°', '2°'),
'1° 2°'], unit='deg')
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord(['1d 2d 3d'])
assert "Cannot parse first argument data" in str(err)
with pytest.raises(ValueError) as err:
SkyCoord([('1d', '2d', '3d')])
assert "Cannot parse first argument data" in str(err)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
assert "One or more elements of input sequence does not have a length" in str(err)
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (['1 2', '3 4'],
[['1', '2'], ['3', '4']],
[[1, 2], [3, 4]]):
sc = SkyCoord(a, unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian represenation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, 'icrs')
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, 'icrs', ra='1d')
assert "conflicts with keyword argument 'ra'" in str(err)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, 'icrs')
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
FRAME_DEPRECATION_WARNING = ("Passing a frame as a positional argument is now "
"deprecated, use the frame= keyword argument "
"instead.")
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame='icrs')
assert sc.frame.name == 'icrs'
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == 'icrs'
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(RA, DEC, 'icrs')
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(RA, DEC, ICRS)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord('icrs', RA, DEC)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(ICRS, RA, DEC)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
sc = SkyCoord(sc)
assert sc.frame.name == 'icrs'
sc = SkyCoord(C_ICRS)
assert sc.frame.name == 'icrs'
SkyCoord(C_ICRS, frame='icrs')
assert sc.frame.name == 'icrs'
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame='galactic')
assert 'Cannot override frame=' in str(err)
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox='J1999', obstime='J2100')
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == 'J1999' # Just the raw value (not validated)
assert sc.obstime == 'J2001'
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == Time('J1999') # Coming from the self.frame object
assert sc.obstime == Time('J2001')
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999')
assert sc.equinox == Time('J1999')
assert sc.obstime == Time('J1999')
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = '1h2m3s 1d2m3s'
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap('15.5125 1.03417')
assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s')
assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s')
with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s')
def test_seps():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(0 * u.deg, 2 * u.deg, frame='icrs')
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg)/u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc, frame='icrs')
sc4 = SkyCoord(1 * u.deg, 1 * u.deg, distance=2 * u.kpc, frame='icrs')
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
assert repr(sc1) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' ({})>').format(' 0., 1.' if NUMPY_LT_1_14 else
'0., 1.')
assert repr(sc2) == ('<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 1., 1.' if NUMPY_LT_1_14
else '1., 1., 1.')
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs')
assert repr(sc3).startswith('<SkyCoord (ICRS): (ra, dec) in deg\n')
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' ({})>').format(' 0., 1.' if NUMPY_LT_1_14
else '0., 1.')
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith("<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223., -3695529., "
"-4641767.) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0.0, "
"obswl=1.0 micron): (az, alt, distance) in "
"(deg, deg, m)\n")
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame='icrs')
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame='icrs')
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to('fk5')
assert sc5.ra == sc2.transform_to('fk5').ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to('fk5')
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to('fk5').ra)
def test_position_angle():
c1 = SkyCoord(0*u.deg, 0*u.deg)
c2 = SkyCoord(1*u.deg, 0*u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg)
c3 = SkyCoord(1*u.deg, 0.1*u.deg)
assert c1.position_angle(c3) < 90*u.deg
c4 = SkyCoord(0*u.deg, 1*u.deg)
assert_allclose(c1.position_angle(c4), 0*u.deg)
carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg)
carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360*u.degree)
assert np.all(res > 270*u.degree)
cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs')
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from ..angle_utilities import position_angle
result = position_angle(10., 20., 10., 20.)
assert result.unit is u.radian
assert result.value == 0.
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950')
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181
dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc)
dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950',
distance=1.*u.pc)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from ...table import Table, Column
t = Table()
t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg))
c = SkyCoord(t['ra'], t['dec'])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'),
('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None),
('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'),
('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'),
('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z')
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3))
units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3')
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(Galactic, c1, c2, c3, unit=(unit1, unit2, unit3),
representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), Galactic,
unit=(unit1, unit2, unit3), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3}
sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2, unit3),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(Galactic, unit=(unit1, unit2, unit3),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2),
representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
Galactic,
unit=(unit1, unit2, unit3), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(Galactic, unit=(unit1, unit2),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3*unit3}
sc = Galactic(c1*unit1, c2*unit2,
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3}
sc = Galactic(representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
sc = Galactic(c1*unit1, c2*unit2, representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
kwargs = {attr1: c1*unit1, attr2: c2*unit2}
sc = Galactic(representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
@pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'),
[x for x in base_unit_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation=repr_name,
frame='galactic')
assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3))
c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3
sc = SkyCoord([(c1, c2, c3)], representation=repr_name, frame='galactic')
assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3))
def test_skycoord_string_coordinate_input():
sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation='unitspherical')
assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'),
Angle('02:03:04', unit='deg')),
('ra', 'dec'))
sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation='unitspherical')
assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'),
Angle(['02:03:04'], unit='deg')),
('ra', 'dec'))
def test_units():
sc = SkyCoord(1, 2, 3, unit='m', representation='cartesian') # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2*u.km, 3, unit='m', representation='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation='cartesian')
assert 'should have matching physical types' in str(err)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation='spherical')
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0),
('all', 0),
('all', 1)])
def test_wcs_methods(mode, origin):
from ...wcs import WCS
from ...utils.data import get_pkg_data_contents
from ...wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents('../../wcs/tests/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == 'B1950.000'
assert c2.obstime.value == 'B1950.000'
c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980'))
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c2 = c.transform_to(FK5(equinox='J1990'))
assert c2.equinox.value == 'J1990.000'
assert c2.obstime.value == 'J1980.000'
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5')
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000')
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == 'B1950.000'
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == 'J2000.000'
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation='cartesian', frame='fk5',
obstime='J1999.9', equinox='J1988.8')
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation == c4.representation
def test_no_copy():
c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(str('not HAS_SCIPY'))
@pytest.mark.skipif(str('OLDER_SCIPY'))
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from ...utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(equinox='J2010'))
assert c1.equinox == Time('J2010')
# Frame instance with data (data gets ignored)
c2 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(1. * u.deg, 2 * u.deg,
equinox='J2010'))
assert c2.equinox == Time('J2010')
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time('J2010')
# Check duplicate arguments
with pytest.raises(ValueError) as exc:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001')
assert exc.value.args[0] == ("cannot specify frame attribute "
"'equinox' directly in SkyCoord "
"since a frame instance was passed in")
def test_init_with_frame_instance_positional():
# Frame instance
with pytest.raises(ValueError) as exc:
c1 = SkyCoord(3 * u.deg, 4 * u.deg, FK5(equinox='J2010'))
assert exc.value.args[0] == ("FK5 instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
# Positional frame instance with data raises exception
with pytest.raises(ValueError) as exc:
SkyCoord(3 * u.deg, 4 * u.deg, FK5(1. * u.deg, 2 * u.deg, equinox='J2010'))
assert exc.value.args[0] == ("FK5 instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
# Positional SkyCoord instance (for frame) raises exception
with pytest.raises(ValueError) as exc:
SkyCoord(3 * u.deg, 4 * u.deg, SkyCoord(1. * u.deg, 2 * u.deg, equinox='J2010'))
assert exc.value.args[0] == ("SkyCoord instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
def test_guess_from_table():
from ...table import Table, Column
from ...utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='RA[J2000]'))
tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='DEC[J2000]'))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# try without units in the table
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(1000), name='RA_J1900'))
with pytest.raises(ValueError) as excinfo:
sc3 = SkyCoord.guess_from_table(tab, unit=u.deg)
assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0]
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
tab.remove_column('RA_J1900')
with pytest.raises(ValueError):
sc3 = SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg)
oldra = tab['RA[J2000]']
tab.remove_column('RA[J2000]')
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]'])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=('x', 'y', 'z'))
tabgal = Table([b, l], names=('b', 'l'))
sc_cart = SkyCoord.guess_from_table(tabcart, representation='cartesian')
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic')
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal['b'].name = 'gal_b'
tabgal['l'].name = 'gal_l'
SkyCoord.guess_from_table(tabgal, frame='galactic')
tabgal['gal_b'].name = 'blob'
tabgal['gal_l'].name = 'central'
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame='galactic')
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3]*u.deg)
assert np.all(scnew.dec == [4, 6]*u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2*u.deg, 5*u.deg)
reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5')
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010')
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010')
scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010')
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time('J2010')
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg'))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == u'1 1')
def test_equiv_skycoord():
sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs')
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5')
scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005')
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005')
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox='J2005'))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox='J2005'))
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135*u.deg, 65*u.deg)
assert sc.get_constellation() == 'Ursa Major'
assert sc.get_constellation(short_name=True) == 'UMa'
scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg)
npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2)
npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And'
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus'
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces'
assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia'
# ok maybe not
# ok, but at least some of the others do make sense...
assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com'
assert SkyCoord.from_name('UMa II').get_constellation() == 'Ursa Major'
assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum'
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation = 'cartesian'
assert sc[0].representation is CartesianRepresentation
def test_spherical_offsets():
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs')
i01 = SkyCoord(0*u.arcmin, 1*u.arcmin, frame='icrs')
i10 = SkyCoord(1*u.arcmin, 0*u.arcmin, frame='icrs')
i11 = SkyCoord(1*u.arcmin, 1*u.arcmin, frame='icrs')
i22 = SkyCoord(2*u.arcmin, 2*u.arcmin, frame='icrs')
dra, ddec = i00.spherical_offsets_to(i01)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, 1*u.arcmin)
dra, ddec = i00.spherical_offsets_to(i10)
assert_allclose(dra, 1*u.arcmin)
assert_allclose(ddec, 0*u.arcmin)
dra, ddec = i10.spherical_offsets_to(i01)
assert_allclose(dra, -1*u.arcmin)
assert_allclose(ddec, 1*u.arcmin)
dra, ddec = i11.spherical_offsets_to(i22)
assert_allclose(ddec, 1*u.arcmin)
assert 0*u.arcmin < dra < 1*u.arcmin
fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5')
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1*u.deg, 1*u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1*u.deg)
assert_allclose(ddec, 1*u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs')
i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs')
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, np.arange(4)*u.arcmin)
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert 'fakeattr' in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert 'fakeattr' not in dir(sc_before)
assert 'fakeattr' not in dir(sc_after1)
assert 'fakeattr' not in dir(sc_after2)
def test_cache_clear_sc():
from .. import SkyCoord
i = SkyCoord(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5')
assert hasattr(sc.frame, 'equinox')
with pytest.raises(AttributeError):
sc.equinox = 'B1950'
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, 'relative_humidity')
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10']
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, 'obstime')
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to('fk4')
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to('icrs')
assert not hasattr(sc2.frame, 'obstime')
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time('2000-01-01T00:00')
t2 = Time('2012-01-01T00:00')
# Check a very simple case first:
frame = ICRS(ra=10.*u.deg, dec=0*u.deg,
distance=10.*u.pc,
pm_ra_cosdec=0.1*u.deg/u.yr,
pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101*u.kPa)
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12*u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra, applied2.pm_ra)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9*u.second < adt.to(u.second) < 2.1*u.second
c2 = SkyCoord(frame)
applied3 = c2.apply_space_motion(dt=6*u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5*u.deg < applied3.ra-c1.ra < .7*u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-3*u.deg)
# but *not* this close
assert not quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-4*u.deg)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')
]
}
SkyCoord(lat=1*u.deg, lon=2*u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert 'pm_ra_cosdec' in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(l=150*u.deg, b=-11*u.deg,
pm_l=100*u.mas/u.yr, pm_b=10*u.mas/u.yr,
frame='galactic')
assert 'pm_l_cosb' in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(x=1*u.pc, y=2*u.pc, z=3*u.pc,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr,
representation_type='cartesian')
assert 'pm_ra_cosdec' not in str(e.value)
|
fedc1a643574572f5215ca67954ddc93b40bdb5ffe1b0c13c5b03365484f6d3e | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This includes tests for the Distance class and related calculations
"""
import pytest
import numpy as np
from numpy import testing as npt
from ... import units as u
from ...units import allclose as quantity_allclose
from .. import Longitude, Latitude, Distance, CartesianRepresentation
from ..builtin_frames import ICRS, Galactic
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_distances():
"""
Tests functionality for Coordinate class distances and cartesian
transformations.
"""
'''
Distances can also be specified, and allow for a full 3D definition of a
coordinate.
'''
# try all the different ways to initialize a Distance
distance = Distance(12, u.parsec)
Distance(40, unit=u.au)
Distance(value=5, unit=u.kpc)
# need to provide a unit
with pytest.raises(u.UnitsError):
Distance(12)
# standard units are pre-defined
npt.assert_allclose(distance.lyr, 39.138765325702551)
npt.assert_allclose(distance.km, 370281309776063.0)
# Coordinate objects can be assigned a distance object, giving them a full
# 3D position
c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree,
distance=Distance(12, u.parsec))
# or initialize distances via redshifts - this is actually tested in the
# function below that checks for scipy. This is kept here as an example
# c.distance = Distance(z=0.2) # uses current cosmology
# with whatever your preferred cosmology may be
# c.distance = Distance(z=0.2, cosmology=WMAP5)
# Coordinate objects can be initialized with a distance using special
# syntax
c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc)
# Coordinate objects can be instantiated with cartesian coordinates
# Internally they will immediately be converted to two angles + a distance
cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc)
c2 = Galactic(cart)
sep12 = c1.separation_3d(c2)
# returns a *3d* distance between the c1 and c2 coordinates
# not that this does *not*
assert isinstance(sep12, Distance)
npt.assert_allclose(sep12.pc, 12005.784163916317, 10)
'''
All spherical coordinate systems with distances can be converted to
cartesian coordinates.
'''
cartrep2 = c2.cartesian
assert isinstance(cartrep2.x, u.Quantity)
npt.assert_allclose(cartrep2.x.value, 2)
npt.assert_allclose(cartrep2.y.value, 4)
npt.assert_allclose(cartrep2.z.value, 8)
# with no distance, the unit sphere is assumed when converting to cartesian
c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None)
unitcart = c3.cartesian
npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 +
unitcart.z**2)**0.5).value, 1.0)
# TODO: choose between these when CartesianRepresentation gets a definite
# decision on whether or not it gets __add__
#
# CartesianRepresentation objects can be added and subtracted, which are
# vector/elementwise they can also be given as arguments to a coordinate
# system
# csum = ICRS(c1.cartesian + c2.cartesian)
csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz)
csum = ICRS(csumrep)
npt.assert_allclose(csumrep.x.value, -8.12016610185)
npt.assert_allclose(csumrep.y.value, 3.19380597435)
npt.assert_allclose(csumrep.z.value, -8.2294483707)
npt.assert_allclose(csum.ra.degree, 158.529401774)
npt.assert_allclose(csum.dec.degree, -43.3235825777)
npt.assert_allclose(csum.distance.kpc, 11.9942200501)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_distances_scipy():
"""
The distance-related tests that require scipy due to the cosmology
module needing scipy integration routines
"""
from ...cosmology import WMAP5
# try different ways to initialize a Distance
d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7
npt.assert_allclose(d4.z, 0.23, rtol=1e-8)
d5 = Distance(z=0.23, cosmology=WMAP5)
npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8)
d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km)
npt.assert_allclose(d6.value, 3.5417046898762366e+22)
with pytest.raises(ValueError):
d7 = Distance(cosmology=WMAP5, unit=u.km)
with pytest.raises(ValueError):
d8 = Distance()
def test_distance_change():
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
c1 = ICRS(ra, dec, Distance(1, unit=u.kpc))
oldx = c1.cartesian.x.value
assert (oldx - 0.35284083171901953) < 1e-10
# first make sure distances are immutible
with pytest.raises(AttributeError):
c1.distance = Distance(2, unit=u.kpc)
# now x should increase with a bigger distance increases
c2 = ICRS(ra, dec, Distance(2, unit=u.kpc))
assert c2.cartesian.x.value == oldx * 2
def test_distance_is_quantity():
"""
test that distance behaves like a proper quantity
"""
Distance(2 * u.kpc)
d = Distance([2, 3.1], u.kpc)
assert d.shape == (2,)
a = d.view(np.ndarray)
q = d.view(u.Quantity)
a[0] = 1.2
q.value[1] = 5.4
assert d[0].value == 1.2
assert d[1].value == 5.4
q = u.Quantity(d, copy=True)
q.value[1] = 0
assert q.value[1] == 0
assert d.value[1] != 0
# regression test against #2261
d = Distance([2 * u.kpc, 250. * u.pc])
assert d.unit is u.kpc
assert np.all(d.value == np.array([2., 0.25]))
def test_distmod():
d = Distance(10, u.pc)
assert d.distmod.value == 0
d = Distance(distmod=20)
assert d.distmod.value == 20
assert d.kpc == 100
d = Distance(distmod=-1., unit=u.au)
npt.assert_allclose(d.value, 1301442.9440836983)
with pytest.raises(ValueError):
d = Distance(value=d, distmod=20)
with pytest.raises(ValueError):
d = Distance(z=.23, distmod=20)
# check the Mpc/kpc/pc behavior
assert Distance(distmod=1).unit == u.pc
assert Distance(distmod=11).unit == u.kpc
assert Distance(distmod=26).unit == u.Mpc
assert Distance(distmod=-21).unit == u.AU
# if an array, uses the mean of the log of the distances
assert Distance(distmod=[1, 11, 26]).unit == u.kpc
def test_parallax():
d = Distance(parallax=1*u.arcsecond)
assert d.pc == 1.
with pytest.raises(ValueError):
d = Distance(15*u.pc, parallax=20*u.milliarcsecond)
with pytest.raises(ValueError):
d = Distance(parallax=20*u.milliarcsecond, distmod=20)
# array
plx = [1, 10, 100.]*u.mas
d = Distance(parallax=plx)
assert quantity_allclose(d.pc, [1000., 100., 10.])
assert quantity_allclose(plx, d.parallax)
def test_distance_in_coordinates():
"""
test that distances can be created from quantities and that cartesian
representations come out right
"""
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
coo = ICRS(ra, dec, distance=2*u.kpc)
cart = coo.cartesian
assert isinstance(cart.xyz, u.Quantity)
def test_negative_distance():
""" Test optional kwarg allow_negative """
with pytest.raises(ValueError):
Distance([-2, 3.1], u.kpc)
with pytest.raises(ValueError):
Distance([-2, -3.1], u.kpc)
with pytest.raises(ValueError):
Distance(-2, u.kpc)
d = Distance(-2, u.kpc, allow_negative=True)
assert d.value == -2
def test_distance_comparison():
"""Ensure comparisons of distances work (#2206, #2250)"""
a = Distance(15*u.kpc)
b = Distance(15*u.kpc)
assert a == b
c = Distance(1.*u.Mpc)
assert a < c
def test_distance_to_quantity_when_not_units_of_length():
"""Any operation that leaves units other than those of length
should turn a distance into a quantity (#2206, #2250)"""
d = Distance(15*u.kpc)
twice = 2.*d
assert isinstance(twice, Distance)
area = 4.*np.pi*d**2
assert area.unit.is_equivalent(u.m**2)
assert not isinstance(area, Distance)
assert type(area) is u.Quantity
|
93808c9e3e4c62157e66513121a56b84bf90471de1b2a12687cc7e896f658a5f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test initialization of angles not already covered by the API tests"""
import pickle
import pytest
import numpy as np
from ..earth import EarthLocation, ELLIPSOIDS
from ..angles import Longitude, Latitude
from ...units import allclose as quantity_allclose
from ... import units as u
from ...time import Time
from ... import constants
from ..name_resolve import NameResolveError
def allclose_m14(a, b, rtol=1.e-14, atol=None):
if atol is None:
atol = 1.e-14 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def allclose_m8(a, b, rtol=1.e-8, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1)
return quantity_allclose(a, b, rtol, atol)
def isclose_m14(val, ref):
return np.array([allclose_m14(v, r) for (v, r) in zip(val, ref)])
def isclose_m8(val, ref):
return np.array([allclose_m8(v, r) for (v, r) in zip(val, ref)])
def vvd(val, valok, dval, func, test, status):
"""Mimic routine of erfa/src/t_erfa_c.c (to help copy & paste)"""
assert quantity_allclose(val, valok * val.unit, atol=dval * val.unit)
def test_gc2gd():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gc2gd"""
x, y, z = (2e6, 3e6, 5.244e6)
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geocentric(x, y, z, u.m)
e, p, h = location.to_geodetic('WGS84')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('GRS80')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e2", status)
vvd(p, 0.97160184820607853, 1e-14, "eraGc2gd", "p2", status)
vvd(h, 331.41731754844348, 1e-8, "eraGc2gd", "h2", status)
e, p, h = location.to_geodetic('WGS72')
e, p, h = e.to(u.radian), p.to(u.radian), h.to(u.m)
vvd(e, 0.98279372324732907, 1e-14, "eraGc2gd", "e3", status)
vvd(p, 0.97160181811015119, 1e-14, "eraGc2gd", "p3", status)
vvd(h, 333.27707261303181, 1e-8, "eraGc2gd", "h3", status)
def test_gd2gc():
"""Test that we reproduce erfa/src/t_erfa_c.c t_gd2gc"""
e = 3.1 * u.rad
p = -0.5 * u.rad
h = 2500.0 * u.m
status = 0 # help for copy & paste of vvd
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS84')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577049947, 1e-7, "eraGd2gc", "0/1", status)
vvd(xyz[1], 233011.67223479203, 1e-7, "eraGd2gc", "1/1", status)
vvd(xyz[2], -3040909.4706983363, 1e-7, "eraGd2gc", "2/1", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='GRS80')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5599000.5577260984, 1e-7, "eraGd2gc", "0/2", status)
vvd(xyz[1], 233011.6722356703, 1e-7, "eraGd2gc", "1/2", status)
vvd(xyz[2], -3040909.4706095476, 1e-7, "eraGd2gc", "2/2", status)
location = EarthLocation.from_geodetic(e, p, h, ellipsoid='WGS72')
xyz = tuple(v.to(u.m) for v in location.to_geocentric())
vvd(xyz[0], -5598998.7626301490, 1e-7, "eraGd2gc", "0/3", status)
vvd(xyz[1], 233011.5975297822, 1e-7, "eraGd2gc", "1/3", status)
vvd(xyz[2], -3040908.6861467111, 1e-7, "eraGd2gc", "2/3", status)
class TestInput():
def setup(self):
self.lon = Longitude([0., 45., 90., 135., 180., -180, -90, -45], u.deg,
wrap_angle=180*u.deg)
self.lat = Latitude([+0., 30., 60., +90., -90., -60., -30., 0.], u.deg)
self.h = u.Quantity([0.1, 0.5, 1.0, -0.5, -1.0, +4.2, -11., -.1], u.m)
self.location = EarthLocation.from_geodetic(self.lon, self.lat, self.h)
self.x, self.y, self.z = self.location.to_geocentric()
def test_default_ellipsoid(self):
assert self.location.ellipsoid == EarthLocation._ellipsoid
def test_geo_attributes(self):
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geodetic,
self.location.to_geodetic()))
assert all(np.all(_1 == _2)
for _1, _2 in zip(self.location.geocentric,
self.location.to_geocentric()))
def test_attribute_classes(self):
"""Test that attribute classes are correct (and not EarthLocation)"""
assert type(self.location.x) is u.Quantity
assert type(self.location.y) is u.Quantity
assert type(self.location.z) is u.Quantity
assert type(self.location.lon) is Longitude
assert type(self.location.lat) is Latitude
assert type(self.location.height) is u.Quantity
def test_input(self):
"""Check input is parsed correctly"""
# units of length should be assumed geocentric
geocentric = EarthLocation(self.x, self.y, self.z)
assert np.all(geocentric == self.location)
geocentric2 = EarthLocation(self.x.value, self.y.value, self.z.value,
self.x.unit)
assert np.all(geocentric2 == self.location)
geodetic = EarthLocation(self.lon, self.lat, self.h)
assert np.all(geodetic == self.location)
geodetic2 = EarthLocation(self.lon.to_value(u.degree),
self.lat.to_value(u.degree),
self.h.to_value(u.m))
assert np.all(geodetic2 == self.location)
geodetic3 = EarthLocation(self.lon, self.lat)
assert allclose_m14(geodetic3.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic3.lat.value,
self.location.lat.value)
assert not np.any(isclose_m14(geodetic3.height.value,
self.location.height.value))
geodetic4 = EarthLocation(self.lon, self.lat, self.h[-1])
assert allclose_m14(geodetic4.lon.value,
self.location.lon.value)
assert allclose_m14(geodetic4.lat.value,
self.location.lat.value)
assert allclose_m14(geodetic4.height[-1].value,
self.location.height[-1].value)
assert not np.any(isclose_m14(geodetic4.height[:-1].value,
self.location.height[:-1].value))
# check length unit preservation
geocentric5 = EarthLocation(self.x, self.y, self.z, u.pc)
assert geocentric5.unit is u.pc
assert geocentric5.x.unit is u.pc
assert geocentric5.height.unit is u.pc
assert allclose_m14(geocentric5.x.to_value(self.x.unit), self.x.value)
geodetic5 = EarthLocation(self.lon, self.lat, self.h.to(u.pc))
assert geodetic5.unit is u.pc
assert geodetic5.x.unit is u.pc
assert geodetic5.height.unit is u.pc
assert allclose_m14(geodetic5.x.to_value(self.x.unit), self.x.value)
def test_invalid_input(self):
"""Check invalid input raises exception"""
# incomprehensible by either raises TypeError
with pytest.raises(TypeError):
EarthLocation(self.lon, self.y, self.z)
# wrong units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.lon, self.lat, self.lat)
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geocentric(self.h, self.lon, self.lat)
# floats without a unit
with pytest.raises(TypeError):
EarthLocation.from_geocentric(self.x.value, self.y.value,
self.z.value)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geocentric(self.x, self.y, self.z[:5])
# inconsistent units
with pytest.raises(u.UnitsError):
EarthLocation.from_geodetic(self.x, self.y, self.z)
# inconsistent shape
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h[:5])
def test_slicing(self):
# test on WGS72 location, so we can check the ellipsoid is passed on
locwgs72 = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='WGS72')
loc_slice1 = locwgs72[4]
assert isinstance(loc_slice1, EarthLocation)
assert loc_slice1.unit is locwgs72.unit
assert loc_slice1.ellipsoid == locwgs72.ellipsoid == 'WGS72'
assert not loc_slice1.shape
with pytest.raises(TypeError):
loc_slice1[0]
with pytest.raises(IndexError):
len(loc_slice1)
loc_slice2 = locwgs72[4:6]
assert isinstance(loc_slice2, EarthLocation)
assert len(loc_slice2) == 2
assert loc_slice2.unit is locwgs72.unit
assert loc_slice2.ellipsoid == locwgs72.ellipsoid
assert loc_slice2.shape == (2,)
loc_x = locwgs72['x']
assert type(loc_x) is u.Quantity
assert loc_x.shape == locwgs72.shape
assert loc_x.unit is locwgs72.unit
def test_invalid_ellipsoid(self):
# unknown ellipsoid
with pytest.raises(ValueError):
EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid='foo')
with pytest.raises(TypeError):
EarthLocation(self.lon, self.lat, self.h, ellipsoid='foo')
with pytest.raises(ValueError):
self.location.ellipsoid = 'foo'
with pytest.raises(ValueError):
self.location.to_geodetic('foo')
@pytest.mark.parametrize('ellipsoid', ELLIPSOIDS)
def test_ellipsoid(self, ellipsoid):
"""Test that different ellipsoids are understood, and differ"""
# check that heights differ for different ellipsoids
# need different tolerance, since heights are relative to ~6000 km
lon, lat, h = self.location.to_geodetic(ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m8(h.value, self.h.value)
else:
# Some heights are very similar for some; some lon, lat identical.
assert not np.all(isclose_m8(h.value, self.h.value))
# given lon, lat, height, check that x,y,z differ
location = EarthLocation.from_geodetic(self.lon, self.lat, self.h,
ellipsoid=ellipsoid)
if ellipsoid == self.location.ellipsoid:
assert allclose_m14(location.z.value, self.z.value)
else:
assert not np.all(isclose_m14(location.z.value, self.z.value))
def test_to_value(self):
loc = self.location
loc_ndarray = loc.view(np.ndarray)
assert np.all(loc.value == loc_ndarray)
loc2 = self.location.to(u.km)
loc2_ndarray = np.empty_like(loc_ndarray)
for coo in 'x', 'y', 'z':
loc2_ndarray[coo] = loc_ndarray[coo] / 1000.
assert np.all(loc2.value == loc2_ndarray)
loc2_value = self.location.to_value(u.km)
assert np.all(loc2_value == loc2_ndarray)
def test_pickling():
"""Regression test against #4304."""
el = EarthLocation(0.*u.m, 6000*u.km, 6000*u.km)
s = pickle.dumps(el)
el2 = pickle.loads(s)
assert el == el2
def test_repr_latex():
"""
Regression test for issue #4542
"""
somelocation = EarthLocation(lon='149:3:57.9', lat='-31:16:37.3')
somelocation._repr_latex_()
somelocation2 = EarthLocation(lon=[1., 2.]*u.deg, lat=[-1., 9.]*u.deg)
somelocation2._repr_latex_()
@pytest.mark.remote_data
def test_of_address():
# just a location
try:
loc = EarthLocation.of_address("New York, NY")
except NameResolveError as e:
# Google map API limit might surface even here in Travis CI.
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, 40.7128*u.degree)
assert quantity_allclose(loc.lon, -74.0059*u.degree)
assert np.allclose(loc.height.value, 0.)
# Put this one here as buffer to get around Google map API limit per sec.
# no match: This always raises NameResolveError
with pytest.raises(NameResolveError):
EarthLocation.of_address("lkjasdflkja")
# a location and height
try:
loc = EarthLocation.of_address("New York, NY", get_height=True)
except NameResolveError as e:
# Buffer above sometimes insufficient to get around API limit but
# we also do not want to drag things out with time.sleep(0.195),
# where 0.195 was empirically determined on some physical machine.
pytest.xfail(str(e))
else:
assert quantity_allclose(loc.lat, 40.7128*u.degree)
assert quantity_allclose(loc.lon, -74.0059*u.degree)
assert quantity_allclose(loc.height, 10.438659669*u.meter, atol=1.*u.cm)
def test_geodetic_tuple():
lat = 2*u.deg
lon = 10*u.deg
height = 100*u.m
el = EarthLocation.from_geodetic(lat=lat, lon=lon, height=height)
res1 = el.to_geodetic()
res2 = el.geodetic
assert res1.lat == res2.lat and quantity_allclose(res1.lat, lat)
assert res1.lon == res2.lon and quantity_allclose(res1.lon, lon)
assert res1.height == res2.height and quantity_allclose(res1.height, height)
def test_gravitational_redshift():
someloc = EarthLocation(lon=-87.7*u.deg, lat=37*u.deg)
sometime = Time('2017-8-21 18:26:40')
zg0 = someloc.gravitational_redshift(sometime)
# should be of order ~few mm/s change per week
zg_week = someloc.gravitational_redshift(sometime + 7 * u.day)
assert 1.*u.mm/u.s < abs(zg_week - zg0) < 1*u.cm/u.s
# ~cm/s over a half-year
zg_halfyear = someloc.gravitational_redshift(sometime + 0.5 * u.yr)
assert 1*u.cm/u.s < abs(zg_halfyear - zg0) < 1*u.dm/u.s
# but when back to the same time in a year, should be tenths of mm
# even over decades
zg_year = someloc.gravitational_redshift(sometime - 20 * u.year)
assert .1*u.mm/u.s < abs(zg_year - zg0) < 1*u.mm/u.s
# Check mass adjustments.
# If Jupiter and the moon are ignored, effect should be off by ~ .5 mm/s
masses = {'sun': constants.G*constants.M_sun,
'jupiter': 0*constants.G*u.kg,
'moon': 0*constants.G*u.kg}
zg_moonjup = someloc.gravitational_redshift(sometime, masses=masses)
assert .1*u.mm/u.s < abs(zg_moonjup - zg0) < 1*u.mm/u.s
# Check that simply not including the bodies gives the same result.
assert zg_moonjup == someloc.gravitational_redshift(sometime,
bodies=('sun',))
# And that earth can be given, even not as last argument
assert zg_moonjup == someloc.gravitational_redshift(
sometime, bodies=('earth', 'sun',))
# If the earth is also ignored, effect should be off by ~ 20 cm/s
# This also tests the conversion of kg to gravitational units.
masses['earth'] = 0*u.kg
zg_moonjupearth = someloc.gravitational_redshift(sometime, masses=masses)
assert 1*u.dm/u.s < abs(zg_moonjupearth - zg0) < 1*u.m/u.s
# If all masses are zero, redshift should be 0 as well.
masses['sun'] = 0*u.kg
assert someloc.gravitational_redshift(sometime, masses=masses) == 0
with pytest.raises(KeyError):
someloc.gravitational_redshift(sometime, bodies=('saturn',))
with pytest.raises(u.UnitsError):
masses = {'sun': constants.G*constants.M_sun,
'jupiter': constants.G*constants.M_jup,
'moon': 1*u.km, # wrong units!
'earth': constants.G*constants.M_earth}
someloc.gravitational_redshift(sometime, masses=masses)
|
3e53e5e077304ae091a04bf968c7670887426e33b18fef9e9e4cfa475b17cabb | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ...units import allclose as quantity_allclose
from ... import units as u
from ... import constants
from ...time import Time
from ..builtin_frames import ICRS, AltAz, LSR, GCRS, Galactic, FK5
from ..baseframe import frame_transform_graph
from ..sites import get_builtin_sites
from .. import (TimeAttribute,
FunctionTransformWithFiniteDifference, get_sun,
CartesianRepresentation, SphericalRepresentation,
CartesianDifferential, SphericalDifferential,
DynamicMatrixTransform)
J2000 = Time('J2000')
@pytest.mark.parametrize("dt, symmetric", [(1*u.second, True),
(1*u.year, True),
(1*u.second, False),
(1*u.year, False)])
def test_faux_lsr(dt, symmetric):
class LSR2(LSR):
obstime = TimeAttribute(default=J2000)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
ICRS, LSR2, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def icrs_to_lsr(icrs_coo, lsr_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return lsr_frame.realize_frame(icrs_coo.data.without_differentials() + offset)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
LSR2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=symmetric)
def lsr_to_icrs(lsr_coo, icrs_frame):
dt = lsr_frame.obstime - J2000
offset = lsr_frame.v_bary * dt.to(u.second)
return icrs_frame.realize_frame(lsr_coo.data - offset)
ic = ICRS(ra=12.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
lsrc = ic.transform_to(LSR2())
assert quantity_allclose(ic.cartesian.xyz, lsrc.cartesian.xyz)
idiff = ic.cartesian.differentials['s']
ldiff = lsrc.cartesian.differentials['s']
change = (ldiff.d_xyz - idiff.d_xyz).to(u.km/u.s)
totchange = np.sum(change**2)**0.5
assert quantity_allclose(totchange, np.sum(lsrc.v_bary.d_xyz**2)**0.5)
ic2 = ICRS(ra=120.3*u.deg, dec=45.6*u.deg, distance=7.8*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=10*u.marcsec/u.yr,
radial_velocity=1000*u.km/u.s)
lsrc2 = ic2.transform_to(LSR2())
tot = np.sum(lsrc2.cartesian.differentials['s'].d_xyz**2)**0.5
assert np.abs(tot.to('km/s') - 1000*u.km/u.s) < 20*u.km/u.s
def test_faux_fk5_galactic():
from ..builtin_frames.galactic_transforms import fk5_to_gal, _gal_to_fk5
class Galactic2(Galactic):
pass
dt = 1000*u.s
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
FK5, Galactic2, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def fk5_to_gal2(fk5_coo, gal_frame):
trans = DynamicMatrixTransform(fk5_to_gal, FK5, Galactic2)
return trans(fk5_coo, gal_frame)
@frame_transform_graph.transform(FunctionTransformWithFiniteDifference,
Galactic2, ICRS, finite_difference_dt=dt,
symmetric_finite_difference=True,
finite_difference_frameattr_name=None)
def gal2_to_fk5(gal_coo, fk5_frame):
trans = DynamicMatrixTransform(_gal_to_fk5, Galactic2, FK5)
return trans(gal_coo, fk5_frame)
c1 = FK5(ra=150*u.deg, dec=-17*u.deg, radial_velocity=83*u.km/u.s,
pm_ra_cosdec=-41*u.mas/u.yr, pm_dec=16*u.mas/u.yr,
distance=150*u.pc)
c2 = c1.transform_to(Galactic2)
c3 = c1.transform_to(Galactic)
# compare the matrix and finite-difference calculations
assert quantity_allclose(c2.pm_l_cosb, c3.pm_l_cosb, rtol=1e-4)
assert quantity_allclose(c2.pm_b, c3.pm_b, rtol=1e-4)
def test_gcrs_diffs():
time = Time('J2017')
gf = GCRS(obstime=time)
sung = get_sun(time) # should have very little vhelio
# qtr-year off sun location should be the direction of ~ maximal vhelio
qtrsung = get_sun(time-.25*u.year)
# now we use those essentially as directions where the velocities should
# be either maximal or minimal - with or perpendiculat to Earh's orbit
msungr = CartesianRepresentation(-sung.cartesian.xyz).represent_as(SphericalRepresentation)
suni = ICRS(ra=msungr.lon, dec=msungr.lat, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
qtrsuni = ICRS(ra=qtrsung.ra, dec=qtrsung.dec, distance=100*u.au,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
# Now we transform those parallel- and perpendicular-to Earth's orbit
# directions to GCRS, which should shift the velocity to either include
# the Earth's velocity vector, or not (for parallel and perpendicular,
# respectively).
sung = suni.transform_to(gf)
qtrsung = qtrsuni.transform_to(gf)
# should be high along the ecliptic-not-sun sun axis and
# low along the sun axis
assert np.abs(qtrsung.radial_velocity) > 30*u.km/u.s
assert np.abs(qtrsung.radial_velocity) < 40*u.km/u.s
assert np.abs(sung.radial_velocity) < 1*u.km/u.s
suni2 = sung.transform_to(ICRS)
assert np.all(np.abs(suni2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
qtrisun2 = qtrsung.transform_to(ICRS)
assert np.all(np.abs(qtrisun2.data.differentials['s'].d_xyz) < 3e-5*u.km/u.s)
def test_altaz_diffs():
time = Time('J2015') + np.linspace(-1, 1, 1000)*u.day
loc = get_builtin_sites()['greenwich']
aa = AltAz(obstime=time, location=loc)
icoo = ICRS(np.zeros_like(time)*u.deg, 10*u.deg, 100*u.au,
pm_ra_cosdec=np.zeros_like(time)*u.marcsec/u.yr,
pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
acoo = icoo.transform_to(aa)
# Make sure the change in radial velocity over ~2 days isn't too much
# more than the rotation speed of the Earth - some excess is expected
# because the orbit also shifts the RV, but it should be pretty small
# over this short a time.
assert np.ptp(acoo.radial_velocity)/2 < (2*np.pi*constants.R_earth/u.day)*1.2 # MAGIC NUMBER
cdiff = acoo.data.differentials['s'].represent_as(CartesianDifferential,
acoo.data)
# The "total" velocity should be > c, because the *tangential* velocity
# isn't a True velocity, but rather an induced velocity due to the Earth's
# rotation at a distance of 100 AU
assert np.all(np.sum(cdiff.d_xyz**2, axis=0)**0.5 > constants.c)
_xfail = pytest.mark.xfail
@pytest.mark.parametrize('distance', [1000*u.au,
10*u.pc,
pytest.param(10*u.kpc, marks=_xfail),
pytest.param(100*u.kpc, marks=_xfail)])
# TODO: make these not fail when the
# finite-difference numerical stability
# is improved
def test_numerical_limits(distance):
"""
Tests the numerical stability of the default settings for the finite
difference transformation calculation. This is *known* to fail for at
>~1kpc, but this may be improved in future versions.
"""
time = Time('J2017') + np.linspace(-.5, .5, 100)*u.year
icoo = ICRS(ra=0*u.deg, dec=10*u.deg, distance=distance,
pm_ra_cosdec=0*u.marcsec/u.yr, pm_dec=0*u.marcsec/u.yr,
radial_velocity=0*u.km/u.s)
gcoo = icoo.transform_to(GCRS(obstime=time))
rv = gcoo.radial_velocity.to('km/s')
# if its a lot bigger than this - ~the maximal velocity shift along
# the direction above with a small allowance for noise - finite-difference
# rounding errors have ruined the calculation
assert np.ptp(rv) < 65*u.km/u.s
def diff_info_plot(frame, time):
"""
Useful for plotting a frame with multiple times. *Not* used in the testing
suite per se, but extremely useful for interactive plotting of results from
tests in this module.
"""
from matplotlib import pyplot as plt
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(20, 12))
ax1.plot_date(time.plot_date, frame.data.differentials['s'].d_xyz.to(u.km/u.s).T, fmt='-')
ax1.legend(['x', 'y', 'z'])
ax2.plot_date(time.plot_date, np.sum(frame.data.differentials['s'].d_xyz.to(u.km/u.s)**2, axis=0)**0.5, fmt='-')
ax2.set_title('total')
sd = frame.data.differentials['s'].represent_as(SphericalDifferential, frame.data)
ax3.plot_date(time.plot_date, sd.d_distance.to(u.km/u.s), fmt='-')
ax3.set_title('radial')
ax4.plot_date(time.plot_date, sd.d_lat.to(u.marcsec/u.yr), fmt='-', label='lat')
ax4.plot_date(time.plot_date, sd.d_lon.to(u.marcsec/u.yr), fmt='-', label='lon')
return fig
|
c77a772476a550b596419c9c6bd493c26634f762ce8a6b95fe5c7fc3606d0d23 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Accuracy tests for Ecliptic coordinate systems.
"""
import numpy as np
from ....units import allclose as quantity_allclose
from .... import units as u
from ... import SkyCoord
from ...builtin_frames import FK5, ICRS, GCRS, GeocentricTrueEcliptic, BarycentricTrueEcliptic, HeliocentricTrueEcliptic
from ....constants import R_sun, R_earth
def test_against_pytpm_doc_example():
"""
Check that Astropy's Ecliptic systems give answers consistent with pyTPM
Currently this is only testing against the example given in the pytpm docs
"""
fk5_in = SkyCoord('12h22m54.899s', '15d49m20.57s', frame=FK5(equinox='J2000'))
pytpm_out = BarycentricTrueEcliptic(lon=178.78256462*u.deg,
lat=16.7597002513*u.deg,
equinox='J2000')
astropy_out = fk5_in.transform_to(pytpm_out)
assert pytpm_out.separation(astropy_out) < (1*u.arcsec)
def test_ecliptic_heliobary():
"""
Check that the ecliptic transformations for heliocentric and barycentric
at least more or less make sense
"""
icrs = ICRS(1*u.deg, 2*u.deg, distance=1.5*R_sun)
bary = icrs.transform_to(BarycentricTrueEcliptic)
helio = icrs.transform_to(HeliocentricTrueEcliptic)
# make sure there's a sizable distance shift - in 3d hundreds of km, but
# this is 1D so we allow it to be somewhat smaller
assert np.abs(bary.distance - helio.distance) > 1*u.km
# now make something that's got the location of helio but in bary's frame.
# this is a convenience to allow `separation` to work as expected
helio_in_bary_frame = bary.realize_frame(helio.cartesian)
assert bary.separation(helio_in_bary_frame) > 1*u.arcmin
def test_ecl_geo():
"""
Check that the geocentric version at least gets well away from GCRS. For a
true "accuracy" test we need a comparison dataset that is similar to the
geocentric/GCRS comparison we want to do here. Contributions welcome!
"""
gcrs = GCRS(10*u.deg, 20*u.deg, distance=1.5*R_earth)
gecl = gcrs.transform_to(GeocentricTrueEcliptic)
assert quantity_allclose(gecl.distance, gcrs.distance)
def test_arraytransforms():
"""
Test that transforms to/from ecliptic coordinates work on array coordinates
(not testing for accuracy.)
"""
ra = np.ones((4, ), dtype=float) * u.deg
dec = 2*np.ones((4, ), dtype=float) * u.deg
distance = np.ones((4, ), dtype=float) * u.au
test_icrs = ICRS(ra=ra, dec=dec, distance=distance)
test_gcrs = GCRS(test_icrs.data)
bary_arr = test_icrs.transform_to(BarycentricTrueEcliptic)
assert bary_arr.shape == ra.shape
helio_arr = test_icrs.transform_to(HeliocentricTrueEcliptic)
assert helio_arr.shape == ra.shape
geo_arr = test_gcrs.transform_to(GeocentricTrueEcliptic)
assert geo_arr.shape == ra.shape
# now check that we also can go back the other way without shape problems
bary_icrs = bary_arr.transform_to(ICRS)
assert bary_icrs.shape == test_icrs.shape
helio_icrs = helio_arr.transform_to(ICRS)
assert helio_icrs.shape == test_icrs.shape
geo_gcrs = geo_arr.transform_to(GCRS)
assert geo_gcrs.shape == test_gcrs.shape
def test_roundtrip_scalar():
icrs = ICRS(ra=1*u.deg, dec=2*u.deg, distance=3*u.au)
gcrs = GCRS(icrs.cartesian)
bary = icrs.transform_to(BarycentricTrueEcliptic)
helio = icrs.transform_to(HeliocentricTrueEcliptic)
geo = gcrs.transform_to(GeocentricTrueEcliptic)
bary_icrs = bary.transform_to(ICRS)
helio_icrs = helio.transform_to(ICRS)
geo_gcrs = geo.transform_to(GCRS)
assert quantity_allclose(bary_icrs.cartesian.xyz, icrs.cartesian.xyz)
assert quantity_allclose(helio_icrs.cartesian.xyz, icrs.cartesian.xyz)
assert quantity_allclose(geo_gcrs.cartesian.xyz, gcrs.cartesian.xyz)
|
ca32663bccc7fca78acc350976e06e6ae8bd0431ec4195a5811c13fd4dc701a6 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import bz2
import gzip
import http.client
import mmap
import operator
import pathlib
import io
import os
import sys
import tempfile
import warnings
import zipfile
import re
from functools import reduce
import numpy as np
from .util import (isreadable, iswritable, isfile, fileobj_open, fileobj_name,
fileobj_closed, fileobj_mode, _array_from_file,
_array_to_file, _write_string)
from ...utils.data import download_file, _is_url
from ...utils.decorators import classproperty, deprecated_renamed_argument
from ...utils.exceptions import AstropyUserWarning
# Maps astropy.io.fits-specific file mode names to the appropriate file
# modes to use for the underlying raw files
IO_FITS_MODES = {
'readonly': 'rb',
'copyonwrite': 'rb',
'update': 'rb+',
'append': 'ab+',
'ostream': 'wb',
'denywrite': 'rb'}
# Maps OS-level file modes to the appropriate astropy.io.fits specific mode
# to use when given file objects but no mode specified; obviously in
# IO_FITS_MODES there are overlaps; for example 'readonly' and 'denywrite'
# both require the file to be opened in 'rb' mode. But 'readonly' is the
# default behavior for such files if not otherwise specified.
# Note: 'ab' is only supported for 'ostream' which is output-only.
FILE_MODES = {
'rb': 'readonly', 'rb+': 'update',
'wb': 'ostream', 'wb+': 'update',
'ab': 'ostream', 'ab+': 'append'}
# A match indicates the file was opened in text mode, which is not allowed
TEXT_RE = re.compile(r'^[rwa]((t?\+?)|(\+?t?))$')
# readonly actually uses copyonwrite for mmap so that readonly without mmap and
# with mmap still have to same behavior with regard to updating the array. To
# get a truly readonly mmap use denywrite
# the name 'denywrite' comes from a deprecated flag to mmap() on Linux--it
# should be clarified that 'denywrite' mode is not directly analogous to the
# use of that flag; it was just taken, for lack of anything better, as a name
# that means something like "read only" but isn't readonly.
MEMMAP_MODES = {'readonly': mmap.ACCESS_COPY,
'copyonwrite': mmap.ACCESS_COPY,
'update': mmap.ACCESS_WRITE,
'append': mmap.ACCESS_COPY,
'denywrite': mmap.ACCESS_READ}
# TODO: Eventually raise a warning, and maybe even later disable the use of
# 'copyonwrite' and 'denywrite' modes unless memmap=True. For now, however,
# that would generate too many warnings for too many users. If nothing else,
# wait until the new logging system is in place.
GZIP_MAGIC = b'\x1f\x8b\x08'
PKZIP_MAGIC = b'\x50\x4b\x03\x04'
BZIP2_MAGIC = b'\x42\x5a'
def _normalize_fits_mode(mode):
if mode is not None and mode not in IO_FITS_MODES:
if TEXT_RE.match(mode):
raise ValueError(
"Text mode '{}' not supported: "
"files must be opened in binary mode".format(mode))
new_mode = FILE_MODES.get(mode)
if new_mode not in IO_FITS_MODES:
raise ValueError("Mode '{}' not recognized".format(mode))
mode = new_mode
return mode
class _File:
"""
Represents a FITS file on disk (or in some other file-like object).
"""
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def __init__(self, fileobj=None, mode=None, memmap=None, overwrite=False,
cache=True):
self.strict_memmap = bool(memmap)
memmap = True if memmap is None else memmap
if fileobj is None:
self._file = None
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
self.compression = None
self.readonly = False
self.writeonly = False
self.simulateonly = True
self.close_on_error = False
return
else:
self.simulateonly = False
# If fileobj is of type pathlib.Path
if isinstance(fileobj, pathlib.Path):
fileobj = str(fileobj)
elif isinstance(fileobj, bytes):
# Using bytes as filename is tricky, it's deprecated for Windows
# in Python 3.5 (because it could lead to false-positives) but
# was fixed and un-deprecated in Python 3.6.
# However it requires that the bytes object is encoded with the
# file system encoding.
# Probably better to error out and ask for a str object instead.
# TODO: This could be revised when Python 3.5 support is dropped
# See also: https://github.com/astropy/astropy/issues/6789
raise TypeError("names should be `str` not `bytes`.")
# Holds mmap instance for files that use mmap
self._mmap = None
if mode is not None and mode not in IO_FITS_MODES:
raise ValueError("Mode '{}' not recognized".format(mode))
if isfile(fileobj):
objmode = _normalize_fits_mode(fileobj_mode(fileobj))
if mode is not None and mode != objmode:
raise ValueError(
"Requested FITS mode '{}' not compatible with open file "
"handle mode '{}'".format(mode, objmode))
mode = objmode
if mode is None:
mode = 'readonly'
# Handle raw URLs
if (isinstance(fileobj, str) and
mode not in ('ostream', 'append', 'update') and _is_url(fileobj)):
self.name = download_file(fileobj, cache=cache)
# Handle responses from URL requests that have already been opened
elif isinstance(fileobj, http.client.HTTPResponse):
if mode in ('ostream', 'append', 'update'):
raise ValueError(
"Mode {} not supported for HTTPResponse".format(mode))
fileobj = io.BytesIO(fileobj.read())
else:
self.name = fileobj_name(fileobj)
self.closed = False
self.binary = True
self.mode = mode
self.memmap = memmap
# Underlying fileobj is a file-like object, but an actual file object
self.file_like = False
# Should the object be closed on error: see
# https://github.com/astropy/astropy/issues/6168
self.close_on_error = False
# More defaults to be adjusted below as necessary
self.compression = None
self.readonly = False
self.writeonly = False
# Initialize the internal self._file object
if isfile(fileobj):
self._open_fileobj(fileobj, mode, overwrite)
elif isinstance(fileobj, str):
self._open_filename(fileobj, mode, overwrite)
else:
self._open_filelike(fileobj, mode, overwrite)
self.fileobj_mode = fileobj_mode(self._file)
if isinstance(fileobj, gzip.GzipFile):
self.compression = 'gzip'
elif isinstance(fileobj, zipfile.ZipFile):
# Reading from zip files is supported but not writing (yet)
self.compression = 'zip'
elif isinstance(fileobj, bz2.BZ2File):
self.compression = 'bzip2'
if (mode in ('readonly', 'copyonwrite', 'denywrite') or
(self.compression and mode == 'update')):
self.readonly = True
elif (mode == 'ostream' or
(self.compression and mode == 'append')):
self.writeonly = True
# For 'ab+' mode, the pointer is at the end after the open in
# Linux, but is at the beginning in Solaris.
if (mode == 'ostream' or self.compression or
not hasattr(self._file, 'seek')):
# For output stream start with a truncated file.
# For compressed files we can't really guess at the size
self.size = 0
else:
pos = self._file.tell()
self._file.seek(0, 2)
self.size = self._file.tell()
self._file.seek(pos)
if self.memmap:
if not isfile(self._file):
self.memmap = False
elif not self.readonly and not self._mmap_available:
# Test mmap.flush--see
# https://github.com/astropy/astropy/issues/968
self.memmap = False
def __repr__(self):
return '<{}.{} {}>'.format(self.__module__, self.__class__.__name__,
self._file)
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def readable(self):
if self.writeonly:
return False
return isreadable(self._file)
def read(self, size=None):
if not hasattr(self._file, 'read'):
raise EOFError
try:
return self._file.read(size)
except OSError:
# On some versions of Python, it appears, GzipFile will raise an
# OSError if you try to read past its end (as opposed to just
# returning '')
if self.compression == 'gzip':
return ''
raise
def readarray(self, size=None, offset=0, dtype=np.uint8, shape=None):
"""
Similar to file.read(), but returns the contents of the underlying
file as a numpy array (or mmap'd array if memmap=True) rather than a
string.
Usually it's best not to use the `size` argument with this method, but
it's provided for compatibility.
"""
if not hasattr(self._file, 'read'):
raise EOFError
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if size and size % dtype.itemsize != 0:
raise ValueError('size {} not a multiple of {}'.format(size, dtype))
if isinstance(shape, int):
shape = (shape,)
if not (size or shape):
warnings.warn('No size or shape given to readarray(); assuming a '
'shape of (1,)', AstropyUserWarning)
shape = (1,)
if size and not shape:
shape = (size // dtype.itemsize,)
if size and shape:
actualsize = np.prod(shape) * dtype.itemsize
if actualsize > size:
raise ValueError('size {} is too few bytes for a {} array of '
'{}'.format(size, shape, dtype))
elif actualsize < size:
raise ValueError('size {} is too many bytes for a {} array of '
'{}'.format(size, shape, dtype))
filepos = self._file.tell()
try:
if self.memmap:
if self._mmap is None:
# Instantiate Memmap array of the file offset at 0 (so we
# can return slices of it to offset anywhere else into the
# file)
access_mode = MEMMAP_MODES[self.mode]
# For reasons unknown the file needs to point to (near)
# the beginning or end of the file. No idea how close to
# the beginning or end.
# If I had to guess there is some bug in the mmap module
# of CPython or perhaps in microsoft's underlying code
# for generating the mmap.
self._file.seek(0, 0)
# This would also work:
# self._file.seek(0, 2) # moves to the end
self._mmap = mmap.mmap(self._file.fileno(), 0,
access=access_mode,
offset=0)
return np.ndarray(shape=shape, dtype=dtype, offset=offset,
buffer=self._mmap)
else:
count = reduce(operator.mul, shape)
self._file.seek(offset)
data = _array_from_file(self._file, dtype, count)
data.shape = shape
return data
finally:
# Make sure we leave the file in the position we found it; on
# some platforms (e.g. Windows) mmaping a file handle can also
# reset its file pointer
self._file.seek(filepos)
def writable(self):
if self.readonly:
return False
return iswritable(self._file)
def write(self, string):
if hasattr(self._file, 'write'):
_write_string(self._file, string)
def writearray(self, array):
"""
Similar to file.write(), but writes a numpy array instead of a string.
Also like file.write(), a flush() or close() may be needed before
the file on disk reflects the data written.
"""
if hasattr(self._file, 'write'):
_array_to_file(array, self._file)
def flush(self):
if hasattr(self._file, 'flush'):
self._file.flush()
def seek(self, offset, whence=0):
if not hasattr(self._file, 'seek'):
return
self._file.seek(offset, whence)
pos = self._file.tell()
if self.size and pos > self.size:
warnings.warn('File may have been truncated: actual file length '
'({}) is smaller than the expected size ({})'
.format(self.size, pos), AstropyUserWarning)
def tell(self):
if not hasattr(self._file, 'tell'):
raise EOFError
return self._file.tell()
def truncate(self, size=None):
if hasattr(self._file, 'truncate'):
self._file.truncate(size)
def close(self):
"""
Close the 'physical' FITS file.
"""
if hasattr(self._file, 'close'):
self._file.close()
self._maybe_close_mmap()
# Set self._memmap to None anyways since no new .data attributes can be
# loaded after the file is closed
self._mmap = None
self.closed = True
self.close_on_error = False
def _maybe_close_mmap(self, refcount_delta=0):
"""
When mmap is in use these objects hold a reference to the mmap of the
file (so there is only one, shared by all HDUs that reference this
file).
This will close the mmap if there are no arrays referencing it.
"""
if (self._mmap is not None and
sys.getrefcount(self._mmap) == 2 + refcount_delta):
self._mmap.close()
self._mmap = None
def _overwrite_existing(self, overwrite, fileobj, closed):
"""Overwrite an existing file if ``overwrite`` is ``True``, otherwise
raise an OSError. The exact behavior of this method depends on the
_File object state and is only meant for use within the ``_open_*``
internal methods.
"""
# The file will be overwritten...
if ((self.file_like and hasattr(fileobj, 'len') and fileobj.len > 0) or
(os.path.exists(self.name) and os.path.getsize(self.name) != 0)):
if overwrite:
if self.file_like and hasattr(fileobj, 'truncate'):
fileobj.truncate(0)
else:
if not closed:
fileobj.close()
os.remove(self.name)
else:
raise OSError("File {!r} already exists.".format(self.name))
def _try_read_compressed(self, obj_or_name, magic, mode, ext=''):
"""Attempt to determine if the given file is compressed"""
if ext == '.gz' or magic.startswith(GZIP_MAGIC):
if mode == 'append':
raise OSError("'append' mode is not supported with gzip files."
"Use 'update' mode instead")
# Handle gzip files
kwargs = dict(mode=IO_FITS_MODES[mode])
if isinstance(obj_or_name, str):
kwargs['filename'] = obj_or_name
else:
kwargs['fileobj'] = obj_or_name
self._file = gzip.GzipFile(**kwargs)
self.compression = 'gzip'
elif ext == '.zip' or magic.startswith(PKZIP_MAGIC):
# Handle zip files
self._open_zipfile(self.name, mode)
self.compression = 'zip'
elif ext == '.bz2' or magic.startswith(BZIP2_MAGIC):
# Handle bzip2 files
if mode in ['update', 'append']:
raise OSError("update and append modes are not supported "
"with bzip2 files")
# bzip2 only supports 'w' and 'r' modes
bzip2_mode = 'w' if mode == 'ostream' else 'r'
self._file = bz2.BZ2File(obj_or_name, mode=bzip2_mode)
self.compression = 'bzip2'
return self.compression is not None
def _open_fileobj(self, fileobj, mode, overwrite):
"""Open a FITS file from a file object (including compressed files)."""
closed = fileobj_closed(fileobj)
fmode = fileobj_mode(fileobj) or IO_FITS_MODES[mode]
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, closed)
if not closed:
self._file = fileobj
elif isfile(fileobj):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
# Attempt to determine if the file represented by the open file object
# is compressed
try:
# We need to account for the possibility that the underlying file
# handle may have been opened with either 'ab' or 'ab+', which
# means that the current file position is at the end of the file.
if mode in ['ostream', 'append']:
self._file.seek(0)
magic = self._file.read(4)
# No matter whether the underlying file was opened with 'ab' or
# 'ab+', we need to return to the beginning of the file in order
# to properly process the FITS header (and handle the possibility
# of a compressed file).
self._file.seek(0)
except (OSError,OSError):
return
self._try_read_compressed(fileobj, magic, mode)
def _open_filelike(self, fileobj, mode, overwrite):
"""Open a FITS file from a file-like object, i.e. one that has
read and/or write methods.
"""
self.file_like = True
self._file = fileobj
if fileobj_closed(fileobj):
raise OSError("Cannot read from/write to a closed file-like "
"object ({!r}).".format(fileobj))
if isinstance(fileobj, zipfile.ZipFile):
self._open_zipfile(fileobj, mode)
# We can bypass any additional checks at this point since now
# self._file points to the temp file extracted from the zip
return
# If there is not seek or tell methods then set the mode to
# output streaming.
if (not hasattr(self._file, 'seek') or
not hasattr(self._file, 'tell')):
self.mode = mode = 'ostream'
if mode == 'ostream':
self._overwrite_existing(overwrite, fileobj, False)
# Any "writeable" mode requires a write() method on the file object
if (self.mode in ('update', 'append', 'ostream') and
not hasattr(self._file, 'write')):
raise OSError("File-like object does not have a 'write' "
"method, required for mode '{}'.".format(self.mode))
# Any mode except for 'ostream' requires readability
if self.mode != 'ostream' and not hasattr(self._file, 'read'):
raise OSError("File-like object does not have a 'read' "
"method, required for mode {!r}.".format(self.mode))
def _open_filename(self, filename, mode, overwrite):
"""Open a FITS file from a filename string."""
if mode == 'ostream':
self._overwrite_existing(overwrite, None, True)
if os.path.exists(self.name):
with fileobj_open(self.name, 'rb') as f:
magic = f.read(4)
else:
magic = b''
ext = os.path.splitext(self.name)[1]
if not self._try_read_compressed(self.name, magic, mode, ext=ext):
self._file = fileobj_open(self.name, IO_FITS_MODES[mode])
self.close_on_error = True
# Make certain we're back at the beginning of the file
# BZ2File does not support seek when the file is open for writing, but
# when opening a file for write, bz2.BZ2File always truncates anyway.
if not (isinstance(self._file, bz2.BZ2File) and mode == 'ostream'):
self._file.seek(0)
@classproperty(lazy=True)
def _mmap_available(cls):
"""Tests that mmap, and specifically mmap.flush works. This may
be the case on some uncommon platforms (see
https://github.com/astropy/astropy/issues/968).
If mmap.flush is found not to work, ``self.memmap = False`` is
set and a warning is issued.
"""
tmpfd, tmpname = tempfile.mkstemp()
try:
# Windows does not allow mappings on empty files
os.write(tmpfd, b' ')
os.fsync(tmpfd)
try:
mm = mmap.mmap(tmpfd, 1, access=mmap.ACCESS_WRITE)
except OSError as exc:
warnings.warn('Failed to create mmap: {}; mmap use will be '
'disabled'.format(str(exc)), AstropyUserWarning)
del exc
return False
try:
mm.flush()
except OSError:
warnings.warn('mmap.flush is unavailable on this platform; '
'using mmap in writeable mode will be disabled',
AstropyUserWarning)
return False
finally:
mm.close()
finally:
os.close(tmpfd)
os.remove(tmpname)
return True
def _open_zipfile(self, fileobj, mode):
"""Limited support for zipfile.ZipFile objects containing a single
a file. Allows reading only for now by extracting the file to a
tempfile.
"""
if mode in ('update', 'append'):
raise OSError(
"Writing to zipped fits files is not currently "
"supported")
if not isinstance(fileobj, zipfile.ZipFile):
zfile = zipfile.ZipFile(fileobj)
close = True
else:
zfile = fileobj
close = False
namelist = zfile.namelist()
if len(namelist) != 1:
raise OSError(
"Zip files with multiple members are not supported.")
self._file = tempfile.NamedTemporaryFile(suffix='.fits')
self._file.write(zfile.read(namelist[0]))
if close:
zfile.close()
# We just wrote the contents of the first file in the archive to a new
# temp file, which now serves as our underlying file object. So it's
# necessary to reset the position back to the beginning
self._file.seek(0)
|
098096d418e40f6daeed64a99f089c06c5a102b126f24632f10aa06360c1e1c2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
import warnings
from collections import OrderedDict
from .. import registry as io_registry
from ... import units as u
from ...table import Table, serialize, meta, Column, MaskedColumn
from ...table.table import has_info_class
from ...time import Time
from ...utils.exceptions import AstropyUserWarning
from ...utils.data_info import MixinInfo, serialize_context_as
from . import HDUList, TableHDU, BinTableHDU, GroupsHDU
from .column import KEYWORD_NAMES, ASCII_DEFAULT_WIDTHS, _fortran_to_python_format
from .convenience import table_to_hdu
from .hdu.hdulist import fitsopen as fits_open
from .util import first
from .verify import VerifyError, VerifyWarning
# FITS file signature as per RFC 4047
FITS_SIGNATURE = (b"\x53\x49\x4d\x50\x4c\x45\x20\x20\x3d\x20\x20\x20\x20\x20"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x20\x54")
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2',
'PCOUNT', 'GCOUNT', 'TFIELDS', 'THEAP']
# Column-specific keywords regex
COLUMN_KEYWORD_REGEXP = '(' + '|'.join(KEYWORD_NAMES) + ')[0-9]+'
def is_column_keyword(keyword):
return re.match(COLUMN_KEYWORD_REGEXP, keyword) is not None
def is_fits(origin, filepath, fileobj, *args, **kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like object
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if fileobj is not None:
pos = fileobj.tell()
sig = fileobj.read(30)
fileobj.seek(pos)
return sig == FITS_SIGNATURE
elif filepath is not None:
if filepath.lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz',
'.fts', '.fts.gz')):
return True
elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)):
return True
else:
return False
def _decode_mixins(tbl):
"""Decode a Table ``tbl`` that has astropy Columns + appropriate meta-data into
the corresponding table with mixin columns (as appropriate).
"""
# If available read in __serialized_columns__ meta info which is stored
# in FITS COMMENTS between two sentinels.
try:
i0 = tbl.meta['comments'].index('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--')
i1 = tbl.meta['comments'].index('--END-ASTROPY-SERIALIZED-COLUMNS--')
except (ValueError, KeyError):
return tbl
# The YAML data are split into COMMENT cards, with lines longer than 70
# characters being split with a continuation character \ (backslash).
# Strip the backslashes and join together.
continuation_line = False
lines = []
for line in tbl.meta['comments'][i0 + 1:i1]:
if continuation_line:
lines[-1] = lines[-1] + line[:70]
else:
lines.append(line[:70])
continuation_line = len(line) == 71
del tbl.meta['comments'][i0:i1 + 1]
if not tbl.meta['comments']:
del tbl.meta['comments']
info = meta.get_header_from_yaml(lines)
# Add serialized column information to table meta for use in constructing mixins
tbl.meta['__serialized_columns__'] = info['meta']['__serialized_columns__']
# Use the `datatype` attribute info to update column attributes that are
# NOT already handled via standard FITS column keys (name, dtype, unit).
for col in info['datatype']:
for attr in ['description', 'meta']:
if attr in col:
setattr(tbl[col['name']].info, attr, col[attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
tbl = serialize._construct_mixins_from_columns(tbl)
return tbl
def read_table_fits(input, hdu=None, astropy_native=False, memmap=False,
character_as_bytes=True):
"""
Read a Table object from an FITS file
If the ``astropy_native`` argument is ``True``, then input FITS columns
which are representations of an astropy core object will be converted to
that class and stored in the ``Table`` as "mixin columns". Currently this
is limited to FITS columns which adhere to the FITS Time standard, in which
case they will be converted to a `~astropy.time.Time` column in the output
table.
Parameters
----------
input : str or file-like object or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
astropy_native : bool, optional
Read in FITS columns as native astropy objects where possible instead
of standard Table Column objects. Default is False.
memmap : bool, optional
Whether to use memory mapping, which accesses data on disk as needed. If
you are only accessing part of the data, this is often more efficient.
If you want to access all the values in the table, and you are able to
fit the table in memory, you may be better off leaving memory mapping
off. However, if your table would not fit in memory, you should set this
to `True`.
character_as_bytes : bool, optional
If `True`, string columns are stored as Numpy byte arrays (dtype ``S``)
and are converted on-the-fly to unicode strings when accessing
individual elements. If you need to use Numpy unicode arrays (dtype
``U``) internally, you should set this to `False`, but note that this
will use more memory. If set to `False`, string columns will not be
memory-mapped even if ``memmap`` is `True`.
"""
if isinstance(input, HDUList):
# Parse all table objects
tables = OrderedDict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn("hdu= was not specified but multiple tables"
" are present, reading in first available"
" table (hdu={0})".format(first(tables)),
AstropyUserWarning)
hdu = first(tables)
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError("No table found in hdu={0}".format(hdu))
elif len(tables) == 1:
table = tables[first(tables)]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
hdulist = fits_open(input, character_as_bytes=character_as_bytes,
memmap=memmap)
try:
return read_table_fits(hdulist, hdu=hdu,
astropy_native=astropy_native)
finally:
hdulist.close()
# Check if table is masked
masked = any(col.null is not None for col in table.columns)
# TODO: in future, it may make more sense to do this column-by-column,
# rather than via the structured array.
# In the loop below we access the data using data[col.name] rather than
# col.array to make sure that the data is scaled correctly if needed.
data = table.data
columns = []
for col in data.columns:
# Set column data
if masked:
column = MaskedColumn(data=data[col.name], name=col.name, copy=False)
if col.null is not None:
column.set_fill_value(col.null)
column.mask[column.data == col.null] = True
else:
column = Column(data=data[col.name], name=col.name, copy=False)
# Copy over units
if col.unit is not None:
column.unit = u.Unit(col.unit, format='fits', parse_strict='silent')
# Copy over display format
if col.disp is not None:
column.format = _fortran_to_python_format(col.disp)
columns.append(column)
# Create Table object
t = Table(columns, masked=masked, copy=False)
# TODO: deal properly with unsigned integers
hdr = table.header
if astropy_native:
# Avoid circular imports, and also only import if necessary.
from .fitstime import fits_to_time
hdr = fits_to_time(hdr, t)
for key, value, comment in hdr.cards:
if key in ['COMMENT', 'HISTORY']:
# Convert to io.ascii format
if key == 'COMMENT':
key = 'comments'
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif is_column_keyword(key) or key in REMOVE_KEYWORDS:
pass
else:
t.meta[key] = value
# TODO: implement masking
# Decode any mixin columns that have been stored as standard Columns.
t = _decode_mixins(t)
return t
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
# Determine if information will be lost without serializing meta. This is hardcoded
# to the set difference between column info attributes and what FITS can store
# natively (name, dtype, unit). See _get_col_attributes() in table/meta.py for where
# this comes from.
info_lost = any(any(getattr(col.info, attr, None) not in (None, {})
for attr in ('description', 'meta'))
for col in tbl.itercols())
# If PyYAML is not available then check to see if there are any mixin cols
# that *require* YAML serialization. FITS already has support for Time,
# Quantity, so if those are the only mixins the proceed without doing the
# YAML bit, for backward compatibility (i.e. not requiring YAML to write
# Time or Quantity). In this case other mixin column meta (e.g.
# description or meta) will be silently dropped, consistent with astropy <=
# 2.0 behavior.
try:
import yaml
except ImportError:
for col in tbl.itercols():
if (has_info_class(col, MixinInfo) and
col.__class__ not in (u.Quantity, Time)):
raise TypeError("cannot write type {} column '{}' "
"to FITS without PyYAML installed."
.format(col.__class__.__name__, col.info.name))
else:
if info_lost:
warnings.warn("table contains column(s) with defined 'format',"
" 'description', or 'meta' info attributes. These"
" will be dropped unless you install PyYAML.",
AstropyUserWarning)
return tbl
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table. This ignores
# Time-subclass columns and leave them in the table so that the downstream
# FITS Time handling does the right thing.
with serialize_context_as('fits'):
encode_tbl = serialize._represent_mixins_as_columns(
tbl, exclude_classes=(Time,))
# If the encoded table is unchanged then there were no mixins. But if there
# is column metadata (format, description, meta) that would be lost, then
# still go through the serialized columns machinery.
if encode_tbl is tbl and not info_lost:
return tbl
# Get the YAML serialization of information describing the table columns.
# This is re-using ECSV code that combined existing table.meta with with
# the extra __serialized_columns__ key. For FITS the table.meta is handled
# by the native FITS connect code, so don't include that in the YAML
# output.
ser_col = '__serialized_columns__'
# encode_tbl might not have a __serialized_columns__ key if there were no mixins,
# but machinery below expects it to be available, so just make an empty dict.
encode_tbl.meta.setdefault(ser_col, {})
tbl_meta_copy = encode_tbl.meta.copy()
try:
encode_tbl.meta = {ser_col: encode_tbl.meta[ser_col]}
meta_yaml_lines = meta.get_yaml_from_table(encode_tbl)
finally:
encode_tbl.meta = tbl_meta_copy
del encode_tbl.meta[ser_col]
if 'comments' not in encode_tbl.meta:
encode_tbl.meta['comments'] = []
encode_tbl.meta['comments'].append('--BEGIN-ASTROPY-SERIALIZED-COLUMNS--')
for line in meta_yaml_lines:
if len(line) == 0:
lines = ['']
else:
# Split line into 70 character chunks for COMMENT cards
idxs = list(range(0, len(line) + 70, 70))
lines = [line[i0:i1] + '\\' for i0, i1 in zip(idxs[:-1], idxs[1:])]
lines[-1] = lines[-1][:-1]
encode_tbl.meta['comments'].extend(lines)
encode_tbl.meta['comments'].append('--END-ASTROPY-SERIALIZED-COLUMNS--')
return encode_tbl
def write_table_fits(input, output, overwrite=False):
"""
Write a Table object to a FITS file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
"""
# Encode any mixin columns into standard Columns.
input = _encode_mixins(input)
table_hdu = table_to_hdu(input, character_as_bytes=True)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
table_hdu.writeto(output)
io_registry.register_reader('fits', Table, read_table_fits)
io_registry.register_writer('fits', Table, write_table_fits)
io_registry.register_identifier('fits', Table, is_fits)
|
4a35e43f97e9dabfddc37f416913df6e778a023869e1c8cdbce65dc1074d30a3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Facilities for diffing two FITS files. Includes objects for diffing entire
FITS files, individual HDUs, FITS headers, or just FITS data.
Used to implement the fitsdiff program.
"""
import fnmatch
import glob
import io
import operator
import os.path
import textwrap
import warnings
from collections import defaultdict
from inspect import signature
from itertools import islice
import numpy as np
from ... import __version__
from .card import Card, BLANK_CARD
from .header import Header
from ...utils.decorators import deprecated_renamed_argument
# HDUList is used in one of the doctests
from .hdu.hdulist import fitsopen, HDUList # pylint: disable=W0611
from .hdu.table import _TableLikeHDU
from ...utils.exceptions import AstropyDeprecationWarning
from ...utils.diff import (report_diff_values, fixed_width_indent,
where_not_allclose, diff_values)
__all__ = ['FITSDiff', 'HDUDiff', 'HeaderDiff', 'ImageDataDiff', 'RawDataDiff',
'TableDataDiff']
# Column attributes of interest for comparison
_COL_ATTRS = [('unit', 'units'), ('null', 'null values'),
('bscale', 'bscales'), ('bzero', 'bzeros'),
('disp', 'display formats'), ('dim', 'dimensions')]
class _BaseDiff:
"""
Base class for all FITS diff objects.
When instantiating a FITS diff object, the first two arguments are always
the two objects to diff (two FITS files, two FITS headers, etc.).
Instantiating a ``_BaseDiff`` also causes the diff itself to be executed.
The returned ``_BaseDiff`` instance has a number of attribute that describe
the results of the diff operation.
The most basic attribute, present on all ``_BaseDiff`` instances, is
``.identical`` which is `True` if the two objects being compared are
identical according to the diff method for objects of that type.
"""
def __init__(self, a, b):
"""
The ``_BaseDiff`` class does not implement a ``_diff`` method and
should not be instantiated directly. Instead instantiate the
appropriate subclass of ``_BaseDiff`` for the objects being compared
(for example, use `HeaderDiff` to compare two `Header` objects.
"""
self.a = a
self.b = b
# For internal use in report output
self._fileobj = None
self._indent = 0
self._diff()
def __bool__(self):
"""
A ``_BaseDiff`` object acts as `True` in a boolean context if the two
objects compared are identical. Otherwise it acts as `False`.
"""
return not self.identical
@classmethod
def fromdiff(cls, other, a, b):
"""
Returns a new Diff object of a specific subclass from an existing diff
object, passing on the values for any arguments they share in common
(such as ignore_keywords).
For example::
>>> from astropy.io import fits
>>> hdul1, hdul2 = fits.HDUList(), fits.HDUList()
>>> headera, headerb = fits.Header(), fits.Header()
>>> fd = fits.FITSDiff(hdul1, hdul2, ignore_keywords=['*'])
>>> hd = fits.HeaderDiff.fromdiff(fd, headera, headerb)
>>> list(hd.ignore_keywords)
['*']
"""
sig = signature(cls.__init__)
# The first 3 arguments of any Diff initializer are self, a, and b.
kwargs = {}
for arg in list(sig.parameters.keys())[3:]:
if hasattr(other, arg):
kwargs[arg] = getattr(other, arg)
return cls(a, b, **kwargs)
@property
def identical(self):
"""
`True` if all the ``.diff_*`` attributes on this diff instance are
empty, implying that no differences were found.
Any subclass of ``_BaseDiff`` must have at least one ``.diff_*``
attribute, which contains a non-empty value if and only if some
difference was found between the two objects being compared.
"""
return not any(getattr(self, attr) for attr in self.__dict__
if attr.startswith('diff_'))
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def report(self, fileobj=None, indent=0, overwrite=False):
"""
Generates a text report on the differences (if any) between two
objects, and either returns it as a string or writes it to a file-like
object.
Parameters
----------
fileobj : file-like object, string, or None (optional)
If `None`, this method returns the report as a string. Otherwise it
returns `None` and writes the report to the given file-like object
(which must have a ``.write()`` method at a minimum), or to a new
file at the path specified.
indent : int
The number of 4 space tabs to indent the report.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Returns
-------
report : str or None
"""
return_string = False
filepath = None
if isinstance(fileobj, str):
if os.path.exists(fileobj) and not overwrite:
raise OSError("File {0} exists, aborting (pass in "
"overwrite=True to overwrite)".format(fileobj))
else:
filepath = fileobj
fileobj = open(filepath, 'w')
elif fileobj is None:
fileobj = io.StringIO()
return_string = True
self._fileobj = fileobj
self._indent = indent # This is used internally by _writeln
try:
self._report()
finally:
if filepath:
fileobj.close()
if return_string:
return fileobj.getvalue()
def _writeln(self, text):
self._fileobj.write(fixed_width_indent(text, self._indent) + '\n')
def _diff(self):
raise NotImplementedError
def _report(self):
raise NotImplementedError
class FITSDiff(_BaseDiff):
"""Diff two FITS files by filename, or two `HDUList` objects.
`FITSDiff` objects have the following diff attributes:
- ``diff_hdu_count``: If the FITS files being compared have different
numbers of HDUs, this contains a 2-tuple of the number of HDUs in each
file.
- ``diff_hdus``: If any HDUs with the same index are different, this
contains a list of 2-tuples of the HDU index and the `HDUDiff` object
representing the differences between the two HDUs.
"""
def __init__(self, a, b, ignore_hdus=[], ignore_keywords=[],
ignore_comments=[], ignore_fields=[],
numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True, tolerance=None):
"""
Parameters
----------
a : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object.
b : str or `HDUList`
The filename of a FITS file on disk, or an `HDUList` object to
compare to the first file.
ignore_hdus : sequence, optional
HDU names to ignore when comparing two FITS files or HDU lists; the
presence of these HDUs and their contents are ignored. Wildcard
strings may also be included in the list.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
if isinstance(a, str):
try:
a = fitsopen(a)
except Exception as exc:
raise OSError("error opening file a ({}): {}: {}".format(
a, exc.__class__.__name__, exc.args[0]))
close_a = True
else:
close_a = False
if isinstance(b, str):
try:
b = fitsopen(b)
except Exception as exc:
raise OSError("error opening file b ({}): {}: {}".format(
b, exc.__class__.__name__, exc.args[0]))
close_b = True
else:
close_b = False
# Normalize keywords/fields to ignore to upper case
self.ignore_hdus = set(k.upper() for k in ignore_hdus)
self.ignore_keywords = set(k.upper() for k in ignore_keywords)
self.ignore_comments = set(k.upper() for k in ignore_comments)
self.ignore_fields = set(k.upper() for k in ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
# Some hdu names may be pattern wildcards. Find them.
self.ignore_hdu_patterns = set()
for name in list(self.ignore_hdus):
if name != '*' and glob.has_magic(name):
self.ignore_hdus.remove(name)
self.ignore_hdu_patterns.add(name)
self.diff_hdu_count = ()
self.diff_hdus = []
try:
super().__init__(a, b)
finally:
if close_a:
a.close()
if close_b:
b.close()
def _diff(self):
if len(self.a) != len(self.b):
self.diff_hdu_count = (len(self.a), len(self.b))
if self.ignore_hdus:
self.a = HDUList([h for h in self.a if h.name not in self.ignore_hdus])
self.b = HDUList([h for h in self.b if h.name not in self.ignore_hdus])
if self.ignore_hdu_patterns:
a_names = [hdu.name for hdu in self.a]
b_names = [hdu.name for hdu in self.b]
for pattern in self.ignore_hdu_patterns:
self.a = HDUList([h for h in self.a if h.name not in fnmatch.filter(
a_names, pattern)])
self.b = HDUList([h for h in self.b if h.name not in fnmatch.filter(
b_names, pattern)])
# For now, just compare the extensions one by one in order.
# Might allow some more sophisticated types of diffing later.
# TODO: Somehow or another simplify the passing around of diff
# options--this will become important as the number of options grows
for idx in range(min(len(self.a), len(self.b))):
hdu_diff = HDUDiff.fromdiff(self, self.a[idx], self.b[idx])
if not hdu_diff.identical:
self.diff_hdus.append((idx, hdu_diff))
def _report(self):
wrapper = textwrap.TextWrapper(initial_indent=' ',
subsequent_indent=' ')
# print out heading and parameter values
filenamea = self.a.filename()
if not filenamea:
filenamea = '<{} object at {:#x}>'.format(
self.a.__class__.__name__, id(self.a))
filenameb = self.b.filename()
if not filenameb:
filenameb = '<{} object at {:#x}>'.format(
self.b.__class__.__name__, id(self.b))
self._fileobj.write('\n')
self._writeln(' fitsdiff: {}'.format(__version__))
self._writeln(' a: {}\n b: {}'.format(filenamea, filenameb))
if self.ignore_hdus:
ignore_hdus = ' '.join(sorted(self.ignore_hdus))
self._writeln(' HDU(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_hdus)))
if self.ignore_hdu_patterns:
ignore_hdu_patterns = ' '.join(sorted(self.ignore_hdu_patterns))
self._writeln(' HDU(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_hdu_patterns)))
if self.ignore_keywords:
ignore_keywords = ' '.join(sorted(self.ignore_keywords))
self._writeln(' Keyword(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_keywords)))
if self.ignore_comments:
ignore_comments = ' '.join(sorted(self.ignore_comments))
self._writeln(' Keyword(s) whose comments are not to be compared'
':\n{}'.format(wrapper.fill(ignore_comments)))
if self.ignore_fields:
ignore_fields = ' '.join(sorted(self.ignore_fields))
self._writeln(' Table column(s) not to be compared:\n{}'
.format(wrapper.fill(ignore_fields)))
self._writeln(' Maximum number of different data values to be '
'reported: {}'.format(self.numdiffs))
self._writeln(' Relative tolerance: {}, Absolute tolerance: {}'
.format(self.rtol, self.atol))
if self.diff_hdu_count:
self._fileobj.write('\n')
self._writeln('Files contain different numbers of HDUs:')
self._writeln(' a: {}'.format(self.diff_hdu_count[0]))
self._writeln(' b: {}'.format(self.diff_hdu_count[1]))
if not self.diff_hdus:
self._writeln('No differences found between common HDUs.')
return
elif not self.diff_hdus:
self._fileobj.write('\n')
self._writeln('No differences found.')
return
for idx, hdu_diff in self.diff_hdus:
# print out the extension heading
if idx == 0:
self._fileobj.write('\n')
self._writeln('Primary HDU:')
else:
self._fileobj.write('\n')
self._writeln('Extension HDU {}:'.format(idx))
hdu_diff.report(self._fileobj, indent=self._indent + 1)
class HDUDiff(_BaseDiff):
"""
Diff two HDU objects, including their headers and their data (but only if
both HDUs contain the same type of data (image, table, or unknown).
`HDUDiff` objects have the following diff attributes:
- ``diff_extnames``: If the two HDUs have different EXTNAME values, this
contains a 2-tuple of the different extension names.
- ``diff_extvers``: If the two HDUS have different EXTVER values, this
contains a 2-tuple of the different extension versions.
- ``diff_extlevels``: If the two HDUs have different EXTLEVEL values, this
contains a 2-tuple of the different extension levels.
- ``diff_extension_types``: If the two HDUs have different XTENSION values,
this contains a 2-tuple of the different extension types.
- ``diff_headers``: Contains a `HeaderDiff` object for the headers of the
two HDUs. This will always contain an object--it may be determined
whether the headers are different through ``diff_headers.identical``.
- ``diff_data``: Contains either a `ImageDataDiff`, `TableDataDiff`, or
`RawDataDiff` as appropriate for the data in the HDUs, and only if the
two HDUs have non-empty data of the same type (`RawDataDiff` is used for
HDUs containing non-empty data of an indeterminate type).
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0,
ignore_blanks=True, ignore_blank_cards=True, tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : str or `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.ignore_fields = {k.upper() for k in ignore_fields}
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.numdiffs = numdiffs
self.ignore_blanks = ignore_blanks
self.diff_extnames = ()
self.diff_extvers = ()
self.diff_extlevels = ()
self.diff_extension_types = ()
self.diff_headers = None
self.diff_data = None
super().__init__(a, b)
def _diff(self):
if self.a.name != self.b.name:
self.diff_extnames = (self.a.name, self.b.name)
if self.a.ver != self.b.ver:
self.diff_extvers = (self.a.ver, self.b.ver)
if self.a.level != self.b.level:
self.diff_extlevels = (self.a.level, self.b.level)
if self.a.header.get('XTENSION') != self.b.header.get('XTENSION'):
self.diff_extension_types = (self.a.header.get('XTENSION'),
self.b.header.get('XTENSION'))
self.diff_headers = HeaderDiff.fromdiff(self, self.a.header.copy(),
self.b.header.copy())
if self.a.data is None or self.b.data is None:
# TODO: Perhaps have some means of marking this case
pass
elif self.a.is_image and self.b.is_image:
self.diff_data = ImageDataDiff.fromdiff(self, self.a.data,
self.b.data)
elif (isinstance(self.a, _TableLikeHDU) and
isinstance(self.b, _TableLikeHDU)):
# TODO: Replace this if/when _BaseHDU grows a .is_table property
self.diff_data = TableDataDiff.fromdiff(self, self.a.data,
self.b.data)
elif not self.diff_extension_types:
# Don't diff the data for unequal extension types that are not
# recognized image or table types
self.diff_data = RawDataDiff.fromdiff(self, self.a.data,
self.b.data)
def _report(self):
if self.identical:
self._writeln(" No differences found.")
if self.diff_extension_types:
self._writeln(" Extension types differ:\n a: {}\n "
"b: {}".format(*self.diff_extension_types))
if self.diff_extnames:
self._writeln(" Extension names differ:\n a: {}\n "
"b: {}".format(*self.diff_extnames))
if self.diff_extvers:
self._writeln(" Extension versions differ:\n a: {}\n "
"b: {}".format(*self.diff_extvers))
if self.diff_extlevels:
self._writeln(" Extension levels differ:\n a: {}\n "
"b: {}".format(*self.diff_extlevels))
if not self.diff_headers.identical:
self._fileobj.write('\n')
self._writeln(" Headers contain differences:")
self.diff_headers.report(self._fileobj, indent=self._indent + 1)
if self.diff_data is not None and not self.diff_data.identical:
self._fileobj.write('\n')
self._writeln(" Data contains differences:")
self.diff_data.report(self._fileobj, indent=self._indent + 1)
class HeaderDiff(_BaseDiff):
"""
Diff two `Header` objects.
`HeaderDiff` objects have the following diff attributes:
- ``diff_keyword_count``: If the two headers contain a different number of
keywords, this contains a 2-tuple of the keyword count for each header.
- ``diff_keywords``: If either header contains one or more keywords that
don't appear at all in the other header, this contains a 2-tuple
consisting of a list of the keywords only appearing in header a, and a
list of the keywords only appearing in header b.
- ``diff_duplicate_keywords``: If a keyword appears in both headers at
least once, but contains a different number of duplicates (for example, a
different number of HISTORY cards in each header), an item is added to
this dict with the keyword as the key, and a 2-tuple of the different
counts of that keyword as the value. For example::
{'HISTORY': (20, 19)}
means that header a contains 20 HISTORY cards, while header b contains
only 19 HISTORY cards.
- ``diff_keyword_values``: If any of the common keyword between the two
headers have different values, they appear in this dict. It has a
structure similar to ``diff_duplicate_keywords``, with the keyword as the
key, and a 2-tuple of the different values as the value. For example::
{'NAXIS': (2, 3)}
means that the NAXIS keyword has a value of 2 in header a, and a value of
3 in header b. This excludes any keywords matched by the
``ignore_keywords`` list.
- ``diff_keyword_comments``: Like ``diff_keyword_values``, but contains
differences between keyword comments.
`HeaderDiff` objects also have a ``common_keywords`` attribute that lists
all keywords that appear in both headers.
"""
def __init__(self, a, b, ignore_keywords=[], ignore_comments=[],
rtol=0.0, atol=0.0, ignore_blanks=True, ignore_blank_cards=True,
tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_keywords : sequence, optional
Header keywords to ignore when comparing two headers; the presence
of these keywords and their values are ignored. Wildcard strings
may also be included in the list.
ignore_comments : sequence, optional
A list of header keywords whose comments should be ignored in the
comparison. May contain wildcard strings as with ignore_keywords.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
ignore_blanks : bool, optional
Ignore extra whitespace at the end of string values either in
headers or data. Extra leading whitespace is not ignored
(default: True).
ignore_blank_cards : bool, optional
Ignore all cards that are blank, i.e. they only contain
whitespace (default: True).
"""
self.ignore_keywords = {k.upper() for k in ignore_keywords}
self.ignore_comments = {k.upper() for k in ignore_comments}
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.ignore_blanks = ignore_blanks
self.ignore_blank_cards = ignore_blank_cards
self.ignore_keyword_patterns = set()
self.ignore_comment_patterns = set()
for keyword in list(self.ignore_keywords):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_keywords.remove(keyword)
self.ignore_keyword_patterns.add(keyword)
for keyword in list(self.ignore_comments):
keyword = keyword.upper()
if keyword != '*' and glob.has_magic(keyword):
self.ignore_comments.remove(keyword)
self.ignore_comment_patterns.add(keyword)
# Keywords appearing in each header
self.common_keywords = []
# Set to the number of keywords in each header if the counts differ
self.diff_keyword_count = ()
# Set if the keywords common to each header (excluding ignore_keywords)
# appear in different positions within the header
# TODO: Implement this
self.diff_keyword_positions = ()
# Keywords unique to each header (excluding keywords in
# ignore_keywords)
self.diff_keywords = ()
# Keywords that have different numbers of duplicates in each header
# (excluding keywords in ignore_keywords)
self.diff_duplicate_keywords = {}
# Keywords common to each header but having different values (excluding
# keywords in ignore_keywords)
self.diff_keyword_values = defaultdict(list)
# Keywords common to each header but having different comments
# (excluding keywords in ignore_keywords or in ignore_comments)
self.diff_keyword_comments = defaultdict(list)
if isinstance(a, str):
a = Header.fromstring(a)
if isinstance(b, str):
b = Header.fromstring(b)
if not (isinstance(a, Header) and isinstance(b, Header)):
raise TypeError('HeaderDiff can only diff astropy.io.fits.Header '
'objects or strings containing FITS headers.')
super().__init__(a, b)
# TODO: This doesn't pay much attention to the *order* of the keywords,
# except in the case of duplicate keywords. The order should be checked
# too, or at least it should be an option.
def _diff(self):
if self.ignore_blank_cards:
cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
else:
cardsa = list(self.a.cards)
cardsb = list(self.b.cards)
# build dictionaries of keyword values and comments
def get_header_values_comments(cards):
values = {}
comments = {}
for card in cards:
value = card.value
if self.ignore_blanks and isinstance(value, str):
value = value.rstrip()
values.setdefault(card.keyword, []).append(value)
comments.setdefault(card.keyword, []).append(card.comment)
return values, comments
valuesa, commentsa = get_header_values_comments(cardsa)
valuesb, commentsb = get_header_values_comments(cardsb)
# Normalize all keyword to upper-case for comparison's sake;
# TODO: HIERARCH keywords should be handled case-sensitively I think
keywordsa = {k.upper() for k in valuesa}
keywordsb = {k.upper() for k in valuesb}
self.common_keywords = sorted(keywordsa.intersection(keywordsb))
if len(cardsa) != len(cardsb):
self.diff_keyword_count = (len(cardsa), len(cardsb))
# Any other diff attributes should exclude ignored keywords
keywordsa = keywordsa.difference(self.ignore_keywords)
keywordsb = keywordsb.difference(self.ignore_keywords)
if self.ignore_keyword_patterns:
for pattern in self.ignore_keyword_patterns:
keywordsa = keywordsa.difference(fnmatch.filter(keywordsa,
pattern))
keywordsb = keywordsb.difference(fnmatch.filter(keywordsb,
pattern))
if '*' in self.ignore_keywords:
# Any other differences between keywords are to be ignored
return
left_only_keywords = sorted(keywordsa.difference(keywordsb))
right_only_keywords = sorted(keywordsb.difference(keywordsa))
if left_only_keywords or right_only_keywords:
self.diff_keywords = (left_only_keywords, right_only_keywords)
# Compare count of each common keyword
for keyword in self.common_keywords:
if keyword in self.ignore_keywords:
continue
if self.ignore_keyword_patterns:
skip = False
for pattern in self.ignore_keyword_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
counta = len(valuesa[keyword])
countb = len(valuesb[keyword])
if counta != countb:
self.diff_duplicate_keywords[keyword] = (counta, countb)
# Compare keywords' values and comments
for a, b in zip(valuesa[keyword], valuesb[keyword]):
if diff_values(a, b, rtol=self.rtol, atol=self.atol):
self.diff_keyword_values[keyword].append((a, b))
else:
# If there are duplicate keywords we need to be able to
# index each duplicate; if the values of a duplicate
# are identical use None here
self.diff_keyword_values[keyword].append(None)
if not any(self.diff_keyword_values[keyword]):
# No differences found; delete the array of Nones
del self.diff_keyword_values[keyword]
if '*' in self.ignore_comments or keyword in self.ignore_comments:
continue
if self.ignore_comment_patterns:
skip = False
for pattern in self.ignore_comment_patterns:
if fnmatch.fnmatch(keyword, pattern):
skip = True
break
if skip:
continue
for a, b in zip(commentsa[keyword], commentsb[keyword]):
if diff_values(a, b):
self.diff_keyword_comments[keyword].append((a, b))
else:
self.diff_keyword_comments[keyword].append(None)
if not any(self.diff_keyword_comments[keyword]):
del self.diff_keyword_comments[keyword]
def _report(self):
if self.diff_keyword_count:
self._writeln(' Headers have different number of cards:')
self._writeln(' a: {}'.format(self.diff_keyword_count[0]))
self._writeln(' b: {}'.format(self.diff_keyword_count[1]))
if self.diff_keywords:
for keyword in self.diff_keywords[0]:
if keyword in Card._commentary_keywords:
val = self.a[keyword][0]
else:
val = self.a[keyword]
self._writeln(' Extra keyword {!r:8} in a: {!r}'.format(
keyword, val))
for keyword in self.diff_keywords[1]:
if keyword in Card._commentary_keywords:
val = self.b[keyword][0]
else:
val = self.b[keyword]
self._writeln(' Extra keyword {!r:8} in b: {!r}'.format(
keyword, val))
if self.diff_duplicate_keywords:
for keyword, count in sorted(self.diff_duplicate_keywords.items()):
self._writeln(' Inconsistent duplicates of keyword {!r:8}:'
.format(keyword))
self._writeln(' Occurs {} time(s) in a, {} times in (b)'
.format(*count))
if self.diff_keyword_values or self.diff_keyword_comments:
for keyword in self.common_keywords:
report_diff_keyword_attr(self._fileobj, 'values',
self.diff_keyword_values, keyword,
ind=self._indent)
report_diff_keyword_attr(self._fileobj, 'comments',
self.diff_keyword_comments, keyword,
ind=self._indent)
# TODO: It might be good if there was also a threshold option for percentage of
# different pixels: For example ignore if only 1% of the pixels are different
# within some threshold. There are lots of possibilities here, but hold off
# for now until specific cases come up.
class ImageDataDiff(_BaseDiff):
"""
Diff two image data arrays (really any array from a PRIMARY HDU or an IMAGE
extension HDU, though the data unit is assumed to be "pixels").
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: If the two arrays contain either a different number
of dimensions or different sizes in any dimension, this contains a
2-tuple of the shapes of each array. Currently no further comparison is
performed on images that don't have the exact same dimensions.
- ``diff_pixels``: If the two images contain any different pixels, this
contains a list of 2-tuples of the array index where the difference was
found, and another 2-tuple containing the different values. For example,
if the pixel at (0, 0) contains different values this would look like::
[(0, 0), (1.1, 2.2)]
where 1.1 and 2.2 are the values of that pixel in each array. This
array only contains up to ``self.numdiffs`` differences, for storage
efficiency.
- ``diff_total``: The total number of different pixels found between the
arrays. Although ``diff_pixels`` does not necessarily contain all the
different pixel values, this can be used to get a count of the total
number of differences found.
- ``diff_ratio``: Contains the ratio of ``diff_total`` to the total number
of pixels in the arrays.
"""
def __init__(self, a, b, numdiffs=10, rtol=0.0, atol=0.0, tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.diff_dimensions = ()
self.diff_pixels = []
self.diff_ratio = 0
# self.diff_pixels only holds up to numdiffs differing pixels, but this
# self.diff_total stores the total count of differences between
# the images, but not the different values
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
if self.a.shape != self.b.shape:
self.diff_dimensions = (self.a.shape, self.b.shape)
# Don't do any further comparison if the dimensions differ
# TODO: Perhaps we could, however, diff just the intersection
# between the two images
return
# Find the indices where the values are not equal
# If neither a nor b are floating point (or complex), ignore rtol and
# atol
if not (np.issubdtype(self.a.dtype, np.inexact) or
np.issubdtype(self.b.dtype, np.inexact)):
rtol = 0
atol = 0
else:
rtol = self.rtol
atol = self.atol
diffs = where_not_allclose(self.a, self.b, atol=atol, rtol=rtol)
self.diff_total = len(diffs[0])
if self.diff_total == 0:
# Then we're done
return
if self.numdiffs < 0:
numdiffs = self.diff_total
else:
numdiffs = self.numdiffs
self.diff_pixels = [(idx, (self.a[idx], self.b[idx]))
for idx in islice(zip(*diffs), 0, numdiffs)]
self.diff_ratio = float(self.diff_total) / float(len(self.a.flat))
def _report(self):
if self.diff_dimensions:
dimsa = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[0]))
dimsb = ' x '.join(str(d) for d in
reversed(self.diff_dimensions[1]))
self._writeln(' Data dimensions differ:')
self._writeln(' a: {}'.format(dimsa))
self._writeln(' b: {}'.format(dimsb))
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_pixels:
return
for index, values in self.diff_pixels:
index = [x + 1 for x in reversed(index)]
self._writeln(' Data differs at {}:'.format(index))
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different pixels found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class RawDataDiff(ImageDataDiff):
"""
`RawDataDiff` is just a special case of `ImageDataDiff` where the images
are one-dimensional, and the data is treated as a 1-dimensional array of
bytes instead of pixel values. This is used to compare the data of two
non-standard extension HDUs that were not recognized as containing image or
table data.
`ImageDataDiff` objects have the following diff attributes:
- ``diff_dimensions``: Same as the ``diff_dimensions`` attribute of
`ImageDataDiff` objects. Though the "dimension" of each array is just an
integer representing the number of bytes in the data.
- ``diff_bytes``: Like the ``diff_pixels`` attribute of `ImageDataDiff`
objects, but renamed to reflect the minor semantic difference that these
are raw bytes and not pixel values. Also the indices are integers
instead of tuples.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
"""
def __init__(self, a, b, numdiffs=10):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
"""
self.diff_dimensions = ()
self.diff_bytes = []
super().__init__(a, b, numdiffs=numdiffs)
def _diff(self):
super()._diff()
if self.diff_dimensions:
self.diff_dimensions = (self.diff_dimensions[0][0],
self.diff_dimensions[1][0])
self.diff_bytes = [(x[0], y) for x, y in self.diff_pixels]
del self.diff_pixels
def _report(self):
if self.diff_dimensions:
self._writeln(' Data sizes differ:')
self._writeln(' a: {} bytes'.format(self.diff_dimensions[0]))
self._writeln(' b: {} bytes'.format(self.diff_dimensions[1]))
# For now we don't do any further comparison if the dimensions
# differ; though in the future it might be nice to be able to
# compare at least where the images intersect
self._writeln(' No further data comparison performed.')
return
if not self.diff_bytes:
return
for index, values in self.diff_bytes:
self._writeln(' Data differs at byte {}:'.format(index))
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
self._writeln(' ...')
self._writeln(' {} different bytes found ({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
class TableDataDiff(_BaseDiff):
"""
Diff two table data arrays. It doesn't matter whether the data originally
came from a binary or ASCII table--the data should be passed in as a
recarray.
`TableDataDiff` objects have the following diff attributes:
- ``diff_column_count``: If the tables being compared have different
numbers of columns, this contains a 2-tuple of the column count in each
table. Even if the tables have different column counts, an attempt is
still made to compare any columns they have in common.
- ``diff_columns``: If either table contains columns unique to that table,
either in name or format, this contains a 2-tuple of lists. The first
element is a list of columns (these are full `Column` objects) that
appear only in table a. The second element is a list of tables that
appear only in table b. This only lists columns with different column
definitions, and has nothing to do with the data in those columns.
- ``diff_column_names``: This is like ``diff_columns``, but lists only the
names of columns unique to either table, rather than the full `Column`
objects.
- ``diff_column_attributes``: Lists columns that are in both tables but
have different secondary attributes, such as TUNIT or TDISP. The format
is a list of 2-tuples: The first a tuple of the column name and the
attribute, the second a tuple of the different values.
- ``diff_values``: `TableDataDiff` compares the data in each table on a
column-by-column basis. If any different data is found, it is added to
this list. The format of this list is similar to the ``diff_pixels``
attribute on `ImageDataDiff` objects, though the "index" consists of a
(column_name, row) tuple. For example::
[('TARGET', 0), ('NGC1001', 'NGC1002')]
shows that the tables contain different values in the 0-th row of the
'TARGET' column.
- ``diff_total`` and ``diff_ratio``: Same as `ImageDataDiff`.
`TableDataDiff` objects also have a ``common_columns`` attribute that lists
the `Column` objects for columns that are identical in both tables, and a
``common_column_names`` attribute which contains a set of the names of
those columns.
"""
def __init__(self, a, b, ignore_fields=[], numdiffs=10, rtol=0.0, atol=0.0,
tolerance=None):
"""
Parameters
----------
a : `HDUList`
An `HDUList` object.
b : `HDUList`
An `HDUList` object to compare to the first `HDUList` object.
ignore_fields : sequence, optional
The (case-insensitive) names of any table columns to ignore if any
table data is to be compared.
numdiffs : int, optional
The number of pixel/table values to output when reporting HDU data
differences. Though the count of differences is the same either
way, this allows controlling the number of different values that
are kept in memory or output. If a negative value is given, then
numdiffs is treated as unlimited (default: 10).
rtol : float, optional
The relative difference to allow when comparing two float values
either in header values, image arrays, or table columns
(default: 0.0). Values which satisfy the expression
.. math::
\\left| a - b \\right| > \\text{atol} + \\text{rtol} \\cdot \\left| b \\right|
are considered to be different.
The underlying function used for comparison is `numpy.allclose`.
.. versionchanged:: 2.0
``rtol`` replaces the deprecated ``tolerance`` argument.
atol : float, optional
The allowed absolute difference. See also ``rtol`` parameter.
.. versionadded:: 2.0
"""
self.ignore_fields = set(ignore_fields)
self.numdiffs = numdiffs
self.rtol = rtol
self.atol = atol
if tolerance is not None: # This should be removed in the next astropy version
warnings.warn(
'"tolerance" was deprecated in version 2.0 and will be removed in '
'a future version. Use argument "rtol" instead.',
AstropyDeprecationWarning)
self.rtol = tolerance # when tolerance is provided *always* ignore `rtol`
# during the transition/deprecation period
self.common_columns = []
self.common_column_names = set()
# self.diff_columns contains columns with different column definitions,
# but not different column data. Column data is only compared in
# columns that have the same definitions
self.diff_rows = ()
self.diff_column_count = ()
self.diff_columns = ()
# If two columns have the same name+format, but other attributes are
# different (such as TUNIT or such) they are listed here
self.diff_column_attributes = []
# Like self.diff_columns, but just contains a list of the column names
# unique to each table, and in the order they appear in the tables
self.diff_column_names = ()
self.diff_values = []
self.diff_ratio = 0
self.diff_total = 0
super().__init__(a, b)
def _diff(self):
# Much of the code for comparing columns is similar to the code for
# comparing headers--consider refactoring
colsa = self.a.columns
colsb = self.b.columns
if len(colsa) != len(colsb):
self.diff_column_count = (len(colsa), len(colsb))
# Even if the number of columns are unequal, we still do comparison of
# any common columns
colsa = {c.name.lower(): c for c in colsa}
colsb = {c.name.lower(): c for c in colsb}
if '*' in self.ignore_fields:
# If all columns are to be ignored, ignore any further differences
# between the columns
return
# Keep the user's original ignore_fields list for reporting purposes,
# but internally use a case-insensitive version
ignore_fields = {f.lower() for f in self.ignore_fields}
# It might be nice if there were a cleaner way to do this, but for now
# it'll do
for fieldname in ignore_fields:
fieldname = fieldname.lower()
if fieldname in colsa:
del colsa[fieldname]
if fieldname in colsb:
del colsb[fieldname]
colsa_set = set(colsa.values())
colsb_set = set(colsb.values())
self.common_columns = sorted(colsa_set.intersection(colsb_set),
key=operator.attrgetter('name'))
self.common_column_names = {col.name.lower()
for col in self.common_columns}
left_only_columns = {col.name.lower(): col
for col in colsa_set.difference(colsb_set)}
right_only_columns = {col.name.lower(): col
for col in colsb_set.difference(colsa_set)}
if left_only_columns or right_only_columns:
self.diff_columns = (left_only_columns, right_only_columns)
self.diff_column_names = ([], [])
if left_only_columns:
for col in self.a.columns:
if col.name.lower() in left_only_columns:
self.diff_column_names[0].append(col.name)
if right_only_columns:
for col in self.b.columns:
if col.name.lower() in right_only_columns:
self.diff_column_names[1].append(col.name)
# If the tables have a different number of rows, we don't compare the
# columns right now.
# TODO: It might be nice to optionally compare the first n rows where n
# is the minimum of the row counts between the two tables.
if len(self.a) != len(self.b):
self.diff_rows = (len(self.a), len(self.b))
return
# If the tables contain no rows there's no data to compare, so we're
# done at this point. (See ticket #178)
if len(self.a) == len(self.b) == 0:
return
# Like in the old fitsdiff, compare tables on a column by column basis
# The difficulty here is that, while FITS column names are meant to be
# case-insensitive, Astropy still allows, for the sake of flexibility,
# two columns with the same name but different case. When columns are
# accessed in FITS tables, a case-sensitive is tried first, and failing
# that a case-insensitive match is made.
# It's conceivable that the same column could appear in both tables
# being compared, but with different case.
# Though it *may* lead to inconsistencies in these rare cases, this
# just assumes that there are no duplicated column names in either
# table, and that the column names can be treated case-insensitively.
for col in self.common_columns:
name_lower = col.name.lower()
if name_lower in ignore_fields:
continue
cola = colsa[name_lower]
colb = colsb[name_lower]
for attr, _ in _COL_ATTRS:
vala = getattr(cola, attr, None)
valb = getattr(colb, attr, None)
if diff_values(vala, valb):
self.diff_column_attributes.append(
((col.name.upper(), attr), (vala, valb)))
arra = self.a[col.name]
arrb = self.b[col.name]
if (np.issubdtype(arra.dtype, np.floating) and
np.issubdtype(arrb.dtype, np.floating)):
diffs = where_not_allclose(arra, arrb,
rtol=self.rtol,
atol=self.atol)
elif 'P' in col.format:
diffs = ([idx for idx in range(len(arra))
if not np.allclose(arra[idx], arrb[idx],
rtol=self.rtol,
atol=self.atol)],)
else:
diffs = np.where(arra != arrb)
self.diff_total += len(set(diffs[0]))
if self.numdiffs >= 0:
if len(self.diff_values) >= self.numdiffs:
# Don't save any more diff values
continue
# Add no more diff'd values than this
max_diffs = self.numdiffs - len(self.diff_values)
else:
max_diffs = len(diffs[0])
last_seen_idx = None
for idx in islice(diffs[0], 0, max_diffs):
if idx == last_seen_idx:
# Skip duplicate indices, which my occur when the column
# data contains multi-dimensional values; we're only
# interested in storing row-by-row differences
continue
last_seen_idx = idx
self.diff_values.append(((col.name, idx),
(arra[idx], arrb[idx])))
total_values = len(self.a) * len(self.a.dtype.fields)
self.diff_ratio = float(self.diff_total) / float(total_values)
def _report(self):
if self.diff_column_count:
self._writeln(' Tables have different number of columns:')
self._writeln(' a: {}'.format(self.diff_column_count[0]))
self._writeln(' b: {}'.format(self.diff_column_count[1]))
if self.diff_column_names:
# Show columns with names unique to either table
for name in self.diff_column_names[0]:
format = self.diff_columns[0][name.lower()].format
self._writeln(' Extra column {} of format {} in a'.format(
name, format))
for name in self.diff_column_names[1]:
format = self.diff_columns[1][name.lower()].format
self._writeln(' Extra column {} of format {} in b'.format(
name, format))
col_attrs = dict(_COL_ATTRS)
# Now go through each table again and show columns with common
# names but other property differences...
for col_attr, vals in self.diff_column_attributes:
name, attr = col_attr
self._writeln(' Column {} has different {}:'.format(
name, col_attrs[attr]))
report_diff_values(vals[0], vals[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_rows:
self._writeln(' Table rows differ:')
self._writeln(' a: {}'.format(self.diff_rows[0]))
self._writeln(' b: {}'.format(self.diff_rows[1]))
self._writeln(' No further data comparison performed.')
return
if not self.diff_values:
return
# Finally, let's go through and report column data differences:
for indx, values in self.diff_values:
self._writeln(' Column {} data differs in row {}:'.format(*indx))
report_diff_values(values[0], values[1], fileobj=self._fileobj,
indent_width=self._indent + 1)
if self.diff_values and self.numdiffs < self.diff_total:
self._writeln(' ...{} additional difference(s) found.'.format(
(self.diff_total - self.numdiffs)))
if self.diff_total > self.numdiffs:
self._writeln(' ...')
self._writeln(' {} different table data element(s) found '
'({:.2%} different).'
.format(self.diff_total, self.diff_ratio))
def report_diff_keyword_attr(fileobj, attr, diffs, keyword, ind=0):
"""
Write a diff between two header keyword values or comments to the specified
file-like object.
"""
if keyword in diffs:
vals = diffs[keyword]
for idx, val in enumerate(vals):
if val is None:
continue
if idx == 0:
dup = ''
else:
dup = '[{}]'.format(idx + 1)
fileobj.write(
fixed_width_indent(' Keyword {:8}{} has different {}:\n'
.format(keyword, dup, attr), ind))
report_diff_values(val[0], val[1], fileobj=fileobj,
indent_width=ind + 1)
|
49740f7e4f0ebe4b68106ddafa3349a64637f634bba09f039ff335c8ec4cba35 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import copy
import operator
import re
import sys
import warnings
import weakref
import numbers
from functools import reduce
from collections import OrderedDict
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .card import Card, CARD_LENGTH
from .util import (pairwise, _is_int, _convert_array, encode_ascii, cmp,
NotifierMixin)
from .verify import VerifyError, VerifyWarning
from ...utils import lazyproperty, isiterable, indent
from ...utils.exceptions import AstropyUserWarning
__all__ = ['Column', 'ColDefs', 'Delayed']
# mapping from TFORM data type to numpy data type (code)
# L: Logical (Boolean)
# B: Unsigned Byte
# I: 16-bit Integer
# J: 32-bit Integer
# K: 64-bit Integer
# E: Single-precision Floating Point
# D: Double-precision Floating Point
# C: Single-precision Complex
# M: Double-precision Complex
# A: Character
FITS2NUMPY = {'L': 'i1', 'B': 'u1', 'I': 'i2', 'J': 'i4', 'K': 'i8', 'E': 'f4',
'D': 'f8', 'C': 'c8', 'M': 'c16', 'A': 'a'}
# the inverse dictionary of the above
NUMPY2FITS = {val: key for key, val in FITS2NUMPY.items()}
# Normally booleans are represented as ints in Astropy, but if passed in a numpy
# boolean array, that should be supported
NUMPY2FITS['b1'] = 'L'
# Add unsigned types, which will be stored as signed ints with a TZERO card.
NUMPY2FITS['u2'] = 'I'
NUMPY2FITS['u4'] = 'J'
NUMPY2FITS['u8'] = 'K'
# Add half precision floating point numbers which will be up-converted to
# single precision.
NUMPY2FITS['f2'] = 'E'
# This is the order in which values are converted to FITS types
# Note that only double precision floating point/complex are supported
FORMATORDER = ['L', 'B', 'I', 'J', 'K', 'D', 'M', 'A']
# Convert single precision floating point/complex to double precision.
FITSUPCONVERTERS = {'E': 'D', 'C': 'M'}
# mapping from ASCII table TFORM data type to numpy data type
# A: Character
# I: Integer (32-bit)
# J: Integer (64-bit; non-standard)
# F: Float (64-bit; fixed decimal notation)
# E: Float (64-bit; exponential notation)
# D: Float (64-bit; exponential notation, always 64-bit by convention)
ASCII2NUMPY = {'A': 'a', 'I': 'i4', 'J': 'i8', 'F': 'f8', 'E': 'f8', 'D': 'f8'}
# Maps FITS ASCII column format codes to the appropriate Python string
# formatting codes for that type.
ASCII2STR = {'A': '', 'I': 'd', 'J': 'd', 'F': 'f', 'E': 'E', 'D': 'E'}
# For each ASCII table format code, provides a default width (and decimal
# precision) for when one isn't given explicitly in the column format
ASCII_DEFAULT_WIDTHS = {'A': (1, 0), 'I': (10, 0), 'J': (15, 0),
'E': (15, 7), 'F': (16, 7), 'D': (25, 17)}
# TDISPn for both ASCII and Binary tables
TDISP_RE_DICT = {}
TDISP_RE_DICT['F'] = re.compile(r'(?:(?P<formatc>[F])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)|')
TDISP_RE_DICT['A'] = TDISP_RE_DICT['L'] = \
re.compile(r'(?:(?P<formatc>[AL])(?P<width>[0-9]+)+)|')
TDISP_RE_DICT['I'] = TDISP_RE_DICT['B'] = \
TDISP_RE_DICT['O'] = TDISP_RE_DICT['Z'] = \
re.compile(r'(?:(?P<formatc>[IBOZ])(?:(?P<width>[0-9]+)'
r'(?:\.{0,1}(?P<precision>[0-9]+))?))|')
TDISP_RE_DICT['E'] = TDISP_RE_DICT['G'] = \
TDISP_RE_DICT['D'] = \
re.compile(r'(?:(?P<formatc>[EGD])(?:(?P<width>[0-9]+)\.'
r'(?P<precision>[0-9]+))+)'
r'(?:E{0,1}(?P<exponential>[0-9]+)?)|')
TDISP_RE_DICT['EN'] = TDISP_RE_DICT['ES'] = \
re.compile(r'(?:(?P<formatc>E[NS])(?:(?P<width>[0-9]+)\.{1}'
r'(?P<precision>[0-9])+)+)')
# mapping from TDISP format to python format
# A: Character
# L: Logical (Boolean)
# I: 16-bit Integer
# Can't predefine zero padding and space padding before hand without
# knowing the value being formatted, so grabbing precision and using that
# to zero pad, ignoring width. Same with B, O, and Z
# B: Binary Integer
# O: Octal Integer
# Z: Hexadecimal Integer
# F: Float (64-bit; fixed decimal notation)
# EN: Float (engineering fortran format, exponential multiple of thee
# ES: Float (scientific, same as EN but non-zero leading digit
# E: Float, exponential notation
# Can't get exponential restriction to work without knowing value
# before hand, so just using width and precision, same with D, G, EN, and
# ES formats
# D: Double-precision Floating Point with exponential
# (E but for double precision)
# G: Double-precision Floating Point, may or may not show exponent
TDISP_FMT_DICT = {'I' : '{{:{width}d}}',
'B' : '{{:{width}b}}',
'O' : '{{:{width}o}}',
'Z' : '{{:{width}x}}',
'F' : '{{:{width}.{precision}f}}',
'G' : '{{:{width}.{precision}g}}'}
TDISP_FMT_DICT['A'] = TDISP_FMT_DICT['L'] = '{{:>{width}}}'
TDISP_FMT_DICT['E'] = TDISP_FMT_DICT['D'] = \
TDISP_FMT_DICT['EN'] = TDISP_FMT_DICT['ES'] ='{{:{width}.{precision}e}}'
# tuple of column/field definition common names and keyword names, make
# sure to preserve the one-to-one correspondence when updating the list(s).
# Use lists, instead of dictionaries so the names can be displayed in a
# preferred order.
KEYWORD_NAMES = ('TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',
'TDISP', 'TBCOL', 'TDIM', 'TCTYP', 'TCUNI', 'TCRPX',
'TCRVL', 'TCDLT', 'TRPOS')
KEYWORD_ATTRIBUTES = ('name', 'format', 'unit', 'null', 'bscale', 'bzero',
'disp', 'start', 'dim', 'coord_type', 'coord_unit',
'coord_ref_point', 'coord_ref_value', 'coord_inc',
'time_ref_pos')
"""This is a list of the attributes that can be set on `Column` objects."""
KEYWORD_TO_ATTRIBUTE = OrderedDict(zip(KEYWORD_NAMES, KEYWORD_ATTRIBUTES))
ATTRIBUTE_TO_KEYWORD = OrderedDict(zip(KEYWORD_ATTRIBUTES, KEYWORD_NAMES))
# TODO: Define a list of default comments to associate with each table keyword
# TFORMn regular expression
TFORMAT_RE = re.compile(r'(?P<repeat>^[0-9]*)(?P<format>[LXBIJKAEDCMPQ])'
r'(?P<option>[!-~]*)', re.I)
# TFORMn for ASCII tables; two different versions depending on whether
# the format is floating-point or not; allows empty values for width
# in which case defaults are used
TFORMAT_ASCII_RE = re.compile(r'(?:(?P<format>[AIJ])(?P<width>[0-9]+)?)|'
r'(?:(?P<formatf>[FED])'
r'(?:(?P<widthf>[0-9]+)\.'
r'(?P<precision>[0-9]+))?)')
TTYPE_RE = re.compile(r'[0-9a-zA-Z_]+')
"""
Regular expression for valid table column names. See FITS Standard v3.0 section
7.2.2.
"""
# table definition keyword regular expression
TDEF_RE = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')
# table dimension keyword regular expression (fairly flexible with whitespace)
TDIM_RE = re.compile(r'\(\s*(?P<dims>(?:\d+,\s*)+\s*\d+)\s*\)\s*')
# value for ASCII table cell with value = TNULL
# this can be reset by user.
ASCIITNULL = 0
# The default placeholder to use for NULL values in ASCII tables when
# converting from binary to ASCII tables
DEFAULT_ASCII_TNULL = '---'
class Delayed:
"""Delayed file-reading data."""
def __init__(self, hdu=None, field=None):
self.hdu = weakref.proxy(hdu)
self.field = field
def __getitem__(self, key):
# This forces the data for the HDU to be read, which will replace
# the corresponding Delayed objects in the Tables Columns to be
# transformed into ndarrays. It will also return the value of the
# requested data element.
return self.hdu.data[key][self.field]
class _BaseColumnFormat(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
class _ColumnFormat(_BaseColumnFormat):
"""
Represents a FITS binary table column format.
This is an enhancement over using a normal string for the format, since the
repeat count, format code, and option are available as separate attributes,
and smart comparison is used. For example 1J == J.
"""
def __new__(cls, format):
self = super().__new__(cls, format)
self.repeat, self.format, self.option = _parse_tformat(format)
self.format = self.format.upper()
if self.format in ('P', 'Q'):
# TODO: There should be a generic factory that returns either
# _FormatP or _FormatQ as appropriate for a given TFORMn
if self.format == 'P':
recformat = _FormatP.from_tform(format)
else:
recformat = _FormatQ.from_tform(format)
# Format of variable length arrays
self.p_format = recformat.format
else:
self.p_format = None
return self
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of rTa where T is the single character data
type code, a is the optional part, and r is the repeat. If repeat == 1
(the default) it is left out of this representation.
"""
if self.repeat == 1:
repeat = ''
else:
repeat = str(self.repeat)
return '{}{}{}'.format(repeat, self.format, self.option)
class _AsciiColumnFormat(_BaseColumnFormat):
"""Similar to _ColumnFormat but specifically for columns in ASCII tables.
The formats of ASCII table columns and binary table columns are inherently
incompatible in FITS. They don't support the same ranges and types of
values, and even reuse format codes in subtly different ways. For example
the format code 'Iw' in ASCII columns refers to any integer whose string
representation is at most w characters wide, so 'I' can represent
effectively any integer that will fit in a FITS columns. Whereas for
binary tables 'I' very explicitly refers to a 16-bit signed integer.
Conversions between the two column formats can be performed using the
``to/from_binary`` methods on this class, or the ``to/from_ascii``
methods on the `_ColumnFormat` class. But again, not all conversions are
possible and may result in a `ValueError`.
"""
def __new__(cls, format, strict=False):
self = super().__new__(cls, format)
self.format, self.width, self.precision = \
_parse_ascii_tformat(format, strict)
# This is to support handling logical (boolean) data from binary tables
# in an ASCII table
self._pseudo_logical = False
return self
@classmethod
def from_column_format(cls, format):
inst = cls.from_recformat(format.recformat)
# Hack
if format.format == 'L':
inst._pseudo_logical = True
return inst
@classmethod
def from_recformat(cls, recformat):
"""Creates a column format from a Numpy record dtype format."""
return cls(_convert_ascii_format(recformat, reverse=True))
@lazyproperty
def recformat(self):
"""Returns the equivalent Numpy record format string."""
return _convert_ascii_format(self)
@lazyproperty
def canonical(self):
"""
Returns a 'canonical' string representation of this format.
This is in the proper form of Tw.d where T is the single character data
type code, w is the width in characters for this field, and d is the
number of digits after the decimal place (for format codes 'E', 'F',
and 'D' only).
"""
if self.format in ('E', 'F', 'D'):
return '{}{}.{}'.format(self.format, self.width, self.precision)
return '{}{}'.format(self.format, self.width)
class _FormatX(str):
"""For X format in binary tables."""
def __new__(cls, repeat=1):
nbytes = ((repeat - 1) // 8) + 1
# use an array, even if it is only ONE u1 (i.e. use tuple always)
obj = super().__new__(cls, repr((nbytes,)) + 'u1')
obj.repeat = repeat
return obj
def __getnewargs__(self):
return (self.repeat,)
@property
def tform(self):
return '{}X'.format(self.repeat)
# TODO: Table column formats need to be verified upon first reading the file;
# as it is, an invalid P format will raise a VerifyError from some deep,
# unexpected place
class _FormatP(str):
"""For P format in variable length table."""
# As far as I can tell from my reading of the FITS standard, a type code is
# *required* for P and Q formats; there is no default
_format_re_template = (r'(?P<repeat>\d+)?{}(?P<dtype>[LXBIJKAEDCM])'
r'(?:\((?P<max>\d*)\))?')
_format_code = 'P'
_format_re = re.compile(_format_re_template.format(_format_code))
_descriptor_format = '2i4'
def __new__(cls, dtype, repeat=None, max=None):
obj = super().__new__(cls, cls._descriptor_format)
obj.format = NUMPY2FITS[dtype]
obj.dtype = dtype
obj.repeat = repeat
obj.max = max
return obj
def __getnewargs__(self):
return (self.dtype, self.repeat, self.max)
@classmethod
def from_tform(cls, format):
m = cls._format_re.match(format)
if not m or m.group('dtype') not in FITS2NUMPY:
raise VerifyError('Invalid column format: {}'.format(format))
repeat = m.group('repeat')
array_dtype = m.group('dtype')
max = m.group('max')
if not max:
max = None
return cls(FITS2NUMPY[array_dtype], repeat=repeat, max=max)
@property
def tform(self):
repeat = '' if self.repeat is None else self.repeat
max = '' if self.max is None else self.max
return '{}{}{}({})'.format(repeat, self._format_code, self.format, max)
class _FormatQ(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = 'Q'
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = '2i8'
class ColumnAttribute:
"""
Descriptor for attributes of `Column` that are associated with keywords
in the FITS header and describe properties of the column as specified in
the FITS standard.
Each `ColumnAttribute` may have a ``validator`` method defined on it.
This validates values set on this attribute to ensure that they meet the
FITS standard. Invalid values will raise a warning and will not be used in
formatting the column. The validator should take two arguments--the
`Column` it is being assigned to, and the new value for the attribute, and
it must raise an `AssertionError` if the value is invalid.
The `ColumnAttribute` itself is a decorator that can be used to define the
``validator`` for each column attribute. For example::
@ColumnAttribute('TTYPE')
def name(col, name):
if not isinstance(name, str):
raise AssertionError
The actual object returned by this decorator is the `ColumnAttribute`
instance though, not the ``name`` function. As such ``name`` is not a
method of the class it is defined in.
The setter for `ColumnAttribute` also updates the header of any table
HDU this column is attached to in order to reflect the change. The
``validator`` should ensure that the value is valid for inclusion in a FITS
header.
"""
def __init__(self, keyword):
self._keyword = keyword
self._validator = None
# The name of the attribute associated with this keyword is currently
# determined from the KEYWORD_NAMES/ATTRIBUTES lists. This could be
# make more flexible in the future, for example, to support custom
# column attributes.
self._attr = '_' + KEYWORD_TO_ATTRIBUTE[self._keyword]
def __get__(self, obj, objtype=None):
if obj is None:
return self
else:
return getattr(obj, self._attr)
def __set__(self, obj, value):
if self._validator is not None:
self._validator(obj, value)
old_value = getattr(obj, self._attr, None)
setattr(obj, self._attr, value)
obj._notify('column_attribute_changed', obj, self._attr[1:], old_value,
value)
def __call__(self, func):
"""
Set the validator for this column attribute.
Returns ``self`` so that this can be used as a decorator, as described
in the docs for this class.
"""
self._validator = func
return self
def __repr__(self):
return "{0}('{1}')".format(self.__class__.__name__, self._keyword)
class Column(NotifierMixin):
"""
Class which contains the definition of one column, e.g. ``ttype``,
``tform``, etc. and the array containing values for the column.
"""
def __init__(self, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None, dim=None,
array=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None, coord_inc=None,
time_ref_pos=None):
"""
Construct a `Column` by specifying attributes. All attributes
except ``format`` can be optional; see :ref:`column_creation` and
:ref:`creating_ascii_table` for more information regarding
``TFORM`` keyword.
Parameters
----------
name : str, optional
column name, corresponding to ``TTYPE`` keyword
format : str
column format, corresponding to ``TFORM`` keyword
unit : str, optional
column unit, corresponding to ``TUNIT`` keyword
null : str, optional
null value, corresponding to ``TNULL`` keyword
bscale : int-like, optional
bscale value, corresponding to ``TSCAL`` keyword
bzero : int-like, optional
bzero value, corresponding to ``TZERO`` keyword
disp : str, optional
display format, corresponding to ``TDISP`` keyword
start : int, optional
column starting position (ASCII table only), corresponding
to ``TBCOL`` keyword
dim : str, optional
column dimension corresponding to ``TDIM`` keyword
array : iterable, optional
a `list`, `numpy.ndarray` (or other iterable that can be used to
initialize an ndarray) providing initial data for this column.
The array will be automatically converted, if possible, to the data
format of the column. In the case were non-trivial ``bscale``
and/or ``bzero`` arguments are given, the values in the array must
be the *physical* values--that is, the values of column as if the
scaling has already been applied (the array stored on the column
object will then be converted back to its storage values).
ascii : bool, optional
set `True` if this describes a column for an ASCII table; this
may be required to disambiguate the column format
coord_type : str, optional
coordinate/axis type corresponding to ``TCTYP`` keyword
coord_unit : str, optional
coordinate/axis unit corresponding to ``TCUNI`` keyword
coord_ref_point : int-like, optional
pixel coordinate of the reference point corresponding to ``TCRPX``
keyword
coord_ref_value : int-like, optional
coordinate value at reference point corresponding to ``TCRVL``
keyword
coord_inc : int-like, optional
coordinate increment at reference point corresponding to ``TCDLT``
keyword
time_ref_pos : str, optional
reference position for a time coordinate column corresponding to
``TRPOS`` keyword
"""
if format is None:
raise ValueError('Must specify format to construct Column.')
# any of the input argument (except array) can be a Card or just
# a number/string
kwargs = {'ascii': ascii}
for attr in KEYWORD_ATTRIBUTES:
value = locals()[attr] # get the argument's value
if isinstance(value, Card):
value = value.value
kwargs[attr] = value
valid_kwargs, invalid_kwargs = self._verify_keywords(**kwargs)
if invalid_kwargs:
msg = ['The following keyword arguments to Column were invalid:']
for val in invalid_kwargs.values():
msg.append(indent(val[1]))
raise VerifyError('\n'.join(msg))
for attr in KEYWORD_ATTRIBUTES:
setattr(self, attr, valid_kwargs.get(attr))
# TODO: Try to eliminate the following two special cases
# for recformat and dim:
# This is not actually stored as an attribute on columns for some
# reason
recformat = valid_kwargs['recformat']
# The 'dim' keyword's original value is stored in self.dim, while
# *only* the tuple form is stored in self._dims.
self._dims = self.dim
self.dim = dim
# Awful hack to use for now to keep track of whether the column holds
# pseudo-unsigned int data
self._pseudo_unsigned_ints = False
# if the column data is not ndarray, make it to be one, i.e.
# input arrays can be just list or tuple, not required to be ndarray
# does not include Object array because there is no guarantee
# the elements in the object array are consistent.
if not isinstance(array,
(np.ndarray, chararray.chararray, Delayed)):
try: # try to convert to a ndarray first
if array is not None:
array = np.array(array)
except Exception:
try: # then try to convert it to a strings array
itemsize = int(recformat[1:])
array = chararray.array(array, itemsize=itemsize)
except ValueError:
# then try variable length array
# Note: This includes _FormatQ by inheritance
if isinstance(recformat, _FormatP):
array = _VLF(array, dtype=recformat.dtype)
else:
raise ValueError('Data is inconsistent with the '
'format `{}`.'.format(format))
array = self._convert_to_valid_data_type(array)
# We have required (through documentation) that arrays passed in to
# this constructor are already in their physical values, so we make
# note of that here
if isinstance(array, np.ndarray):
self._physical_values = True
else:
self._physical_values = False
self._parent_fits_rec = None
self.array = array
def __repr__(self):
text = ''
for attr in KEYWORD_ATTRIBUTES:
value = getattr(self, attr)
if value is not None:
text += attr + ' = ' + repr(value) + '; '
return text[:-2]
def __eq__(self, other):
"""
Two columns are equal if their name and format are the same. Other
attributes aren't taken into account at this time.
"""
# According to the FITS standard column names must be case-insensitive
a = (self.name.lower(), self.format)
b = (other.name.lower(), other.format)
return a == b
def __hash__(self):
"""
Like __eq__, the hash of a column should be based on the unique column
name and format, and be case-insensitive with respect to the column
name.
"""
return hash((self.name.lower(), self.format))
@property
def array(self):
"""
The Numpy `~numpy.ndarray` associated with this `Column`.
If the column was instantiated with an array passed to the ``array``
argument, this will return that array. However, if the column is
later added to a table, such as via `BinTableHDU.from_columns` as
is typically the case, this attribute will be updated to reference
the associated field in the table, which may no longer be the same
array.
"""
# Ideally the .array attribute never would have existed in the first
# place, or would have been internal-only. This is a legacy of the
# older design from Astropy that needs to have continued support, for
# now.
# One of the main problems with this design was that it created a
# reference cycle. When the .array attribute was updated after
# creating a FITS_rec from the column (as explained in the docstring) a
# reference cycle was created. This is because the code in BinTableHDU
# (and a few other places) does essentially the following:
#
# data._coldefs = columns # The ColDefs object holding this Column
# for col in columns:
# col.array = data.field(col.name)
#
# This way each columns .array attribute now points to the field in the
# table data. It's actually a pretty confusing interface (since it
# replaces the array originally pointed to by .array), but it's the way
# things have been for a long, long time.
#
# However, this results, in *many* cases, in a reference cycle.
# Because the array returned by data.field(col.name), while sometimes
# an array that owns its own data, is usually like a slice of the
# original data. It has the original FITS_rec as the array .base.
# This results in the following reference cycle (for the n-th column):
#
# data -> data._coldefs -> data._coldefs[n] ->
# data._coldefs[n].array -> data._coldefs[n].array.base -> data
#
# Because ndarray objects do not handled by Python's garbage collector
# the reference cycle cannot be broken. Therefore the FITS_rec's
# refcount never goes to zero, its __del__ is never called, and its
# memory is never freed. This didn't occur in *all* cases, but it did
# occur in many cases.
#
# To get around this, Column.array is no longer a simple attribute
# like it was previously. Now each Column has a ._parent_fits_rec
# attribute which is a weakref to a FITS_rec object. Code that
# previously assigned each col.array to field in a FITS_rec (as in
# the example a few paragraphs above) is still used, however now
# array.setter checks if a reference cycle will be created. And if
# so, instead of saving directly to the Column's __dict__, it creates
# the ._prent_fits_rec weakref, and all lookups of the column's .array
# go through that instead.
#
# This alone does not fully solve the problem. Because
# _parent_fits_rec is a weakref, if the user ever holds a reference to
# the Column, but deletes all references to the underlying FITS_rec,
# the .array attribute would suddenly start returning None instead of
# the array data. This problem is resolved on FITS_rec's end. See the
# note in the FITS_rec._coldefs property for the rest of the story.
# If the Columns's array is not a reference to an existing FITS_rec,
# then it is just stored in self.__dict__; otherwise check the
# _parent_fits_rec reference if it 's still available.
if 'array' in self.__dict__:
return self.__dict__['array']
elif self._parent_fits_rec is not None:
parent = self._parent_fits_rec()
if parent is not None:
return parent[self.name]
else:
return None
@array.setter
def array(self, array):
# The following looks over the bases of the given array to check if it
# has a ._coldefs attribute (i.e. is a FITS_rec) and that that _coldefs
# contains this Column itself, and would create a reference cycle if we
# stored the array directly in self.__dict__.
# In this case it instead sets up the _parent_fits_rec weakref to the
# underlying FITS_rec, so that array.getter can return arrays through
# self._parent_fits_rec().field(self.name), rather than storing a
# hard reference to the field like it used to.
base = array
while True:
if (hasattr(base, '_coldefs') and
isinstance(base._coldefs, ColDefs)):
for col in base._coldefs:
if col is self and self._parent_fits_rec is None:
self._parent_fits_rec = weakref.ref(base)
# Just in case the user already set .array to their own
# array.
if 'array' in self.__dict__:
del self.__dict__['array']
return
if getattr(base, 'base', None) is not None:
base = base.base
else:
break
self.__dict__['array'] = array
@array.deleter
def array(self):
try:
del self.__dict__['array']
except KeyError:
pass
self._parent_fits_rec = None
@ColumnAttribute('TTYPE')
def name(col, name):
if name is None:
# Allow None to indicate deleting the name, or to just indicate an
# unspecified name (when creating a new Column).
return
# Check that the name meets the recommended standard--other column
# names are *allowed*, but will be discouraged
if isinstance(name, str) and not TTYPE_RE.match(name):
warnings.warn(
'It is strongly recommended that column names contain only '
'upper and lower-case ASCII letters, digits, or underscores '
'for maximum compatibility with other software '
'(got {0!r}).'.format(name), VerifyWarning)
# This ensures that the new name can fit into a single FITS card
# without any special extension like CONTINUE cards or the like.
if (not isinstance(name, str)
or len(str(Card('TTYPE', name))) != CARD_LENGTH):
raise AssertionError(
'Column name must be a string able to fit in a single '
'FITS card--typically this means a maximum of 68 '
'characters, though it may be fewer if the string '
'contains special characters like quotes.')
@ColumnAttribute('TCTYP')
def coord_type(col, coord_type):
if coord_type is None:
return
if (not isinstance(coord_type, str)
or len(coord_type) > 8):
raise AssertionError(
'Coordinate/axis type must be a string of atmost 8 '
'characters.')
@ColumnAttribute('TCUNI')
def coord_unit(col, coord_unit):
if (coord_unit is not None
and not isinstance(coord_unit, str)):
raise AssertionError(
'Coordinate/axis unit must be a string.')
@ColumnAttribute('TCRPX')
def coord_ref_point(col, coord_ref_point):
if (coord_ref_point is not None
and not isinstance(coord_ref_point, numbers.Real)):
raise AssertionError(
'Pixel coordinate of the reference point must be '
'real floating type.')
@ColumnAttribute('TCRVL')
def coord_ref_value(col, coord_ref_value):
if (coord_ref_value is not None
and not isinstance(coord_ref_value, numbers.Real)):
raise AssertionError(
'Coordinate value at reference point must be real '
'floating type.')
@ColumnAttribute('TCDLT')
def coord_inc(col, coord_inc):
if (coord_inc is not None
and not isinstance(coord_inc, numbers.Real)):
raise AssertionError(
'Coordinate increment must be real floating type.')
@ColumnAttribute('TRPOS')
def time_ref_pos(col, time_ref_pos):
if (time_ref_pos is not None
and not isinstance(time_ref_pos, str)):
raise AssertionError(
'Time reference position must be a string.')
format = ColumnAttribute('TFORM')
unit = ColumnAttribute('TUNIT')
null = ColumnAttribute('TNULL')
bscale = ColumnAttribute('TSCAL')
bzero = ColumnAttribute('TZERO')
disp = ColumnAttribute('TDISP')
start = ColumnAttribute('TBCOL')
dim = ColumnAttribute('TDIM')
@lazyproperty
def ascii(self):
"""Whether this `Column` represents a column in an ASCII table."""
return isinstance(self.format, _AsciiColumnFormat)
@lazyproperty
def dtype(self):
return self.format.dtype
def copy(self):
"""
Return a copy of this `Column`.
"""
tmp = Column(format='I') # just use a throw-away format
tmp.__dict__ = self.__dict__.copy()
return tmp
@staticmethod
def _convert_format(format, cls):
"""The format argument to this class's initializer may come in many
forms. This uses the given column format class ``cls`` to convert
to a format of that type.
TODO: There should be an abc base class for column format classes
"""
# Short circuit in case we're already a _BaseColumnFormat--there is at
# least one case in which this can happen
if isinstance(format, _BaseColumnFormat):
return format, format.recformat
if format in NUMPY2FITS:
with suppress(VerifyError):
# legit recarray format?
recformat = format
format = cls.from_recformat(format)
try:
# legit FITS format?
format = cls(format)
recformat = format.recformat
except VerifyError:
raise VerifyError('Illegal format `{}`.'.format(format))
return format, recformat
@classmethod
def _verify_keywords(cls, name=None, format=None, unit=None, null=None,
bscale=None, bzero=None, disp=None, start=None,
dim=None, ascii=None, coord_type=None, coord_unit=None,
coord_ref_point=None, coord_ref_value=None,
coord_inc=None, time_ref_pos=None):
"""
Given the keyword arguments used to initialize a Column, specifically
those that typically read from a FITS header (so excluding array),
verify that each keyword has a valid value.
Returns a 2-tuple of dicts. The first maps valid keywords to their
values. The second maps invalid keywords to a 2-tuple of their value,
and a message explaining why they were found invalid.
"""
valid = {}
invalid = {}
format, recformat = cls._determine_formats(format, start, dim, ascii)
valid.update(format=format, recformat=recformat)
# Currently we don't have any validation for name, unit, bscale, or
# bzero so include those by default
# TODO: Add validation for these keywords, obviously
for k, v in [('name', name), ('unit', unit), ('bscale', bscale),
('bzero', bzero)]:
if v is not None and v != '':
valid[k] = v
# Validate null option
# Note: Enough code exists that thinks empty strings are sensible
# inputs for these options that we need to treat '' as None
if null is not None and null != '':
msg = None
if isinstance(format, _AsciiColumnFormat):
null = str(null)
if len(null) > format.width:
msg = (
"ASCII table null option (TNULLn) is longer than "
"the column's character width and will be truncated "
"(got {!r}).".format(null))
else:
tnull_formats = ('B', 'I', 'J', 'K')
if not _is_int(null):
# Make this an exception instead of a warning, since any
# non-int value is meaningless
msg = (
'Column null option (TNULLn) must be an integer for '
'binary table columns (got {!r}). The invalid value '
'will be ignored for the purpose of formatting '
'the data in this column.'.format(null))
elif not (format.format in tnull_formats or
(format.format in ('P', 'Q') and
format.p_format in tnull_formats)):
# TODO: We should also check that TNULLn's integer value
# is in the range allowed by the column's format
msg = (
'Column null option (TNULLn) is invalid for binary '
'table columns of type {!r} (got {!r}). The invalid '
'value will be ignored for the purpose of formatting '
'the data in this column.'.format(format, null))
if msg is None:
valid['null'] = null
else:
invalid['null'] = (null, msg)
# Validate the disp option
# TODO: Add full parsing and validation of TDISPn keywords
if disp is not None and disp != '':
msg = None
if not isinstance(disp, str):
msg = (
'Column disp option (TDISPn) must be a string (got {!r}).'
'The invalid value will be ignored for the purpose of '
'formatting the data in this column.'.format(disp))
elif (isinstance(format, _AsciiColumnFormat) and
disp[0].upper() == 'L'):
# disp is at least one character long and has the 'L' format
# which is not recognized for ASCII tables
msg = (
"Column disp option (TDISPn) may not use the 'L' format "
"with ASCII table columns. The invalid value will be "
"ignored for the purpose of formatting the data in this "
"column.")
if msg is None:
valid['disp'] = disp
else:
invalid['disp'] = (disp, msg)
# Validate the start option
if start is not None and start != '':
msg = None
if not isinstance(format, _AsciiColumnFormat):
# The 'start' option only applies to ASCII columns
msg = (
'Column start option (TBCOLn) is not allowed for binary '
'table columns (got {!r}). The invalid keyword will be '
'ignored for the purpose of formatting the data in this '
'column.'.format(start))
else:
try:
start = int(start)
except (TypeError, ValueError):
pass
if not _is_int(start) or start < 1:
msg = (
'Column start option (TBCOLn) must be a positive integer '
'(got {!r}). The invalid value will be ignored for the '
'purpose of formatting the data in this column.'.format(start))
if msg is None:
valid['start'] = start
else:
invalid['start'] = (start, msg)
# Process TDIMn options
# ASCII table columns can't have a TDIMn keyword associated with it;
# for now we just issue a warning and ignore it.
# TODO: This should be checked by the FITS verification code
if dim is not None and dim != '':
msg = None
dims_tuple = tuple()
# NOTE: If valid, the dim keyword's value in the the valid dict is
# a tuple, not the original string; if invalid just the original
# string is returned
if isinstance(format, _AsciiColumnFormat):
msg = (
'Column dim option (TDIMn) is not allowed for ASCII table '
'columns (got {!r}). The invalid keyword will be ignored '
'for the purpose of formatting this column.'.format(dim))
elif isinstance(dim, str):
dims_tuple = _parse_tdim(dim)
elif isinstance(dim, tuple):
dims_tuple = dim
else:
msg = (
"`dim` argument must be a string containing a valid value "
"for the TDIMn header keyword associated with this column, "
"or a tuple containing the C-order dimensions for the "
"column. The invalid value will be ignored for the purpose "
"of formatting this column.")
if dims_tuple:
if reduce(operator.mul, dims_tuple) > format.repeat:
msg = (
"The repeat count of the column format {!r} for column {!r} "
"is fewer than the number of elements per the TDIM "
"argument {!r}. The invalid TDIMn value will be ignored "
"for the purpose of formatting this column.".format(
name, format, dim))
if msg is None:
valid['dim'] = dims_tuple
else:
invalid['dim'] = (dim, msg)
if coord_type is not None and coord_type != '':
msg = None
if not isinstance(coord_type, str):
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_type))
elif len(coord_type) > 8:
msg = (
"Coordinate/axis type option (TCTYPn) must be a string "
"of atmost 8 characters (got {!r}). The invalid keyword "
"will be ignored for the purpose of formatting this "
"column.".format(coord_type))
if msg is None:
valid['coord_type'] = coord_type
else:
invalid['coord_type'] = (coord_type, msg)
if coord_unit is not None and coord_unit != '':
msg = None
if not isinstance(coord_unit, str):
msg = (
"Coordinate/axis unit option (TCUNIn) must be a string "
"(got {!r}). The invalid keyword will be ignored for the "
"purpose of formatting this column.".format(coord_unit))
if msg is None:
valid['coord_unit'] = coord_unit
else:
invalid['coord_unit'] = (coord_unit, msg)
for k, v in [('coord_ref_point', coord_ref_point),
('coord_ref_value', coord_ref_value),
('coord_inc', coord_inc)]:
if v is not None and v != '':
msg = None
if not isinstance(v, numbers.Real):
msg = (
"Column {} option ({}n) must be a real floating type (got {!r}). "
"The invalid value will be ignored for the purpose of formatting "
"the data in this column.".format(k, ATTRIBUTE_TO_KEYWORD[k], v))
if msg is None:
valid[k] = v
else:
invalid[k] = (v, msg)
if time_ref_pos is not None and time_ref_pos != '':
msg=None
if not isinstance(time_ref_pos, str):
msg = (
"Time coordinate reference position option (TRPOSn) must be "
"a string (got {!r}). The invalid keyword will be ignored for "
"the purpose of formatting this column.".format(time_ref_pos))
if msg is None:
valid['time_ref_pos'] = time_ref_pos
else:
invalid['time_ref_pos'] = (time_ref_pos, msg)
return valid, invalid
@classmethod
def _determine_formats(cls, format, start, dim, ascii):
"""
Given a format string and whether or not the Column is for an
ASCII table (ascii=None means unspecified, but lean toward binary table
where ambiguous) create an appropriate _BaseColumnFormat instance for
the column's format, and determine the appropriate recarray format.
The values of the start and dim keyword arguments are also useful, as
the former is only valid for ASCII tables and the latter only for
BINARY tables.
"""
# If the given format string is unambiguously a Numpy dtype or one of
# the Numpy record format type specifiers supported by Astropy then that
# should take priority--otherwise assume it is a FITS format
if isinstance(format, np.dtype):
format, _, _ = _dtype_to_recformat(format)
# check format
if ascii is None and not isinstance(format, _BaseColumnFormat):
# We're just give a string which could be either a Numpy format
# code, or a format for a binary column array *or* a format for an
# ASCII column array--there may be many ambiguities here. Try our
# best to guess what the user intended.
format, recformat = cls._guess_format(format, start, dim)
elif not ascii and not isinstance(format, _BaseColumnFormat):
format, recformat = cls._convert_format(format, _ColumnFormat)
elif ascii and not isinstance(format, _AsciiColumnFormat):
format, recformat = cls._convert_format(format,
_AsciiColumnFormat)
else:
# The format is already acceptable and unambiguous
recformat = format.recformat
return format, recformat
@classmethod
def _guess_format(cls, format, start, dim):
if start and dim:
# This is impossible; this can't be a valid FITS column
raise ValueError(
'Columns cannot have both a start (TCOLn) and dim '
'(TDIMn) option, since the former is only applies to '
'ASCII tables, and the latter is only valid for binary '
'tables.')
elif start:
# Only ASCII table columns can have a 'start' option
guess_format = _AsciiColumnFormat
elif dim:
# Only binary tables can have a dim option
guess_format = _ColumnFormat
else:
# If the format is *technically* a valid binary column format
# (i.e. it has a valid format code followed by arbitrary
# "optional" codes), but it is also strictly a valid ASCII
# table format, then assume an ASCII table column was being
# requested (the more likely case, after all).
with suppress(VerifyError):
format = _AsciiColumnFormat(format, strict=True)
# A safe guess which reflects the existing behavior of previous
# Astropy versions
guess_format = _ColumnFormat
try:
format, recformat = cls._convert_format(format, guess_format)
except VerifyError:
# For whatever reason our guess was wrong (for example if we got
# just 'F' that's not a valid binary format, but it an ASCII format
# code albeit with the width/precision omitted
guess_format = (_AsciiColumnFormat
if guess_format is _ColumnFormat
else _ColumnFormat)
# If this fails too we're out of options--it is truly an invalid
# format, or at least not supported
format, recformat = cls._convert_format(format, guess_format)
return format, recformat
def _convert_to_valid_data_type(self, array):
# Convert the format to a type we understand
if isinstance(array, Delayed):
return array
elif array is None:
return array
else:
format = self.format
dims = self._dims
if dims:
shape = dims[:-1] if 'A' in format else dims
shape = (len(array),) + shape
array = array.reshape(shape)
if 'P' in format or 'Q' in format:
return array
elif 'A' in format:
if array.dtype.char in 'SU':
if dims:
# The 'last' dimension (first in the order given
# in the TDIMn keyword itself) is the number of
# characters in each string
fsize = dims[-1]
else:
fsize = np.dtype(format.recformat).itemsize
return chararray.array(array, itemsize=fsize, copy=False)
else:
return _convert_array(array, np.dtype(format.recformat))
elif 'L' in format:
# boolean needs to be scaled back to storage values ('T', 'F')
if array.dtype == np.dtype('bool'):
return np.where(array == np.False_, ord('F'), ord('T'))
else:
return np.where(array == 0, ord('F'), ord('T'))
elif 'X' in format:
return _convert_array(array, np.dtype('uint8'))
else:
# Preserve byte order of the original array for now; see #77
numpy_format = array.dtype.byteorder + format.recformat
# Handle arrays passed in as unsigned ints as pseudo-unsigned
# int arrays; blatantly tacked in here for now--we need columns
# to have explicit knowledge of whether they treated as
# pseudo-unsigned
bzeros = {2: np.uint16(2**15), 4: np.uint32(2**31),
8: np.uint64(2**63)}
if (array.dtype.kind == 'u' and
array.dtype.itemsize in bzeros and
self.bscale in (1, None, '') and
self.bzero == bzeros[array.dtype.itemsize]):
# Basically the array is uint, has scale == 1.0, and the
# bzero is the appropriate value for a pseudo-unsigned
# integer of the input dtype, then go ahead and assume that
# uint is assumed
numpy_format = numpy_format.replace('i', 'u')
self._pseudo_unsigned_ints = True
# The .base here means we're dropping the shape information,
# which is only used to format recarray fields, and is not
# useful for converting input arrays to the correct data type
dtype = np.dtype(numpy_format).base
return _convert_array(array, dtype)
class ColDefs(NotifierMixin):
"""
Column definitions class.
It has attributes corresponding to the `Column` attributes
(e.g. `ColDefs` has the attribute ``names`` while `Column`
has ``name``). Each attribute in `ColDefs` is a list of
corresponding attribute values from all `Column` objects.
"""
_padding_byte = '\x00'
_col_format_cls = _ColumnFormat
def __new__(cls, input, ascii=False):
klass = cls
if (hasattr(input, '_columns_type') and
issubclass(input._columns_type, ColDefs)):
klass = input._columns_type
elif (hasattr(input, '_col_format_cls') and
issubclass(input._col_format_cls, _AsciiColumnFormat)):
klass = _AsciiColDefs
if ascii: # force ASCII if this has been explicitly requested
klass = _AsciiColDefs
return object.__new__(klass)
def __getnewargs__(self):
return (self._arrays,)
def __init__(self, input, ascii=False):
"""
Parameters
----------
input : sequence of `Column`, `ColDefs`, other
An existing table HDU, an existing `ColDefs`, or any multi-field
Numpy array or `numpy.recarray`.
ascii : bool
Use True to ensure that ASCII table columns are used.
"""
from .hdu.table import _TableBaseHDU
from .fitsrec import FITS_rec
if isinstance(input, ColDefs):
self._init_from_coldefs(input)
elif (isinstance(input, FITS_rec) and hasattr(input, '_coldefs') and
input._coldefs):
# If given a FITS_rec object we can directly copy its columns, but
# only if its columns have already been defined, otherwise this
# will loop back in on itself and blow up
self._init_from_coldefs(input._coldefs)
elif isinstance(input, np.ndarray) and input.dtype.fields is not None:
# Construct columns from the fields of a record array
self._init_from_array(input)
elif isiterable(input):
# if the input is a list of Columns
self._init_from_sequence(input)
elif isinstance(input, _TableBaseHDU):
# Construct columns from fields in an HDU header
self._init_from_table(input)
else:
raise TypeError('Input to ColDefs must be a table HDU, a list '
'of Columns, or a record/field array.')
# Listen for changes on all columns
for col in self.columns:
col._add_listener(self)
def _init_from_coldefs(self, coldefs):
"""Initialize from an existing ColDefs object (just copy the
columns and convert their formats if necessary).
"""
self.columns = [self._copy_column(col) for col in coldefs]
def _init_from_sequence(self, columns):
for idx, col in enumerate(columns):
if not isinstance(col, Column):
raise TypeError('Element {} in the ColDefs input is not a '
'Column.'.format(idx))
self._init_from_coldefs(columns)
def _init_from_array(self, array):
self.columns = []
for idx in range(len(array.dtype)):
cname = array.dtype.names[idx]
ftype = array.dtype.fields[cname][0]
format = self._col_format_cls.from_recformat(ftype)
# Determine the appropriate dimensions for items in the column
# (typically just 1D)
dim = array.dtype[idx].shape[::-1]
if dim and (len(dim) > 1 or 'A' in format):
if 'A' in format:
# n x m string arrays must include the max string
# length in their dimensions (e.g. l x n x m)
dim = (array.dtype[idx].base.itemsize,) + dim
dim = repr(dim).replace(' ', '')
else:
dim = None
# Check for unsigned ints.
bzero = None
if 'I' in format and ftype == np.dtype('uint16'):
bzero = np.uint16(2**15)
elif 'J' in format and ftype == np.dtype('uint32'):
bzero = np.uint32(2**31)
elif 'K' in format and ftype == np.dtype('uint64'):
bzero = np.uint64(2**63)
c = Column(name=cname, format=format,
array=array.view(np.ndarray)[cname], bzero=bzero,
dim=dim)
self.columns.append(c)
def _init_from_table(self, table):
hdr = table._header
nfields = hdr['TFIELDS']
# go through header keywords to pick out column definition keywords
# definition dictionaries for each field
col_keywords = [{} for i in range(nfields)]
for keyword, value in hdr.items():
key = TDEF_RE.match(keyword)
try:
keyword = key.group('label')
except Exception:
continue # skip if there is no match
if keyword in KEYWORD_NAMES:
col = int(key.group('num'))
if 0 < col <= nfields:
attr = KEYWORD_TO_ATTRIBUTE[keyword]
if attr == 'format':
# Go ahead and convert the format value to the
# appropriate ColumnFormat container now
value = self._col_format_cls(value)
col_keywords[col - 1][attr] = value
# Verify the column keywords and display any warnings if necessary;
# we only want to pass on the valid keywords
for idx, kwargs in enumerate(col_keywords):
valid_kwargs, invalid_kwargs = Column._verify_keywords(**kwargs)
for val in invalid_kwargs.values():
warnings.warn(
'Invalid keyword for column {}: {}'.format(idx + 1, val[1]),
VerifyWarning)
# Special cases for recformat and dim
# TODO: Try to eliminate the need for these special cases
del valid_kwargs['recformat']
if 'dim' in valid_kwargs:
valid_kwargs['dim'] = kwargs['dim']
col_keywords[idx] = valid_kwargs
# data reading will be delayed
for col in range(nfields):
col_keywords[col]['array'] = Delayed(table, col)
# now build the columns
self.columns = [Column(**attrs) for attrs in col_keywords]
# Add the table HDU is a listener to changes to the columns
# (either changes to individual columns, or changes to the set of
# columns (add/remove/etc.))
self._add_listener(table)
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo):
return self.__class__([copy.deepcopy(c, memo) for c in self.columns])
def _copy_column(self, column):
"""Utility function used currently only by _init_from_coldefs
to help convert columns from binary format to ASCII format or vice
versa if necessary (otherwise performs a straight copy).
"""
if isinstance(column.format, self._col_format_cls):
# This column has a FITS format compatible with this column
# definitions class (that is ascii or binary)
return column.copy()
new_column = column.copy()
# Try to use the Numpy recformat as the equivalency between the
# two formats; if that conversion can't be made then these
# columns can't be transferred
# TODO: Catch exceptions here and raise an explicit error about
# column format conversion
new_column.format = self._col_format_cls.from_column_format(
column.format)
# Handle a few special cases of column format options that are not
# compatible between ASCII an binary tables
# TODO: This is sort of hacked in right now; we really need
# separate classes for ASCII and Binary table Columns, and they
# should handle formatting issues like these
if not isinstance(new_column.format, _AsciiColumnFormat):
# the column is a binary table column...
new_column.start = None
if new_column.null is not None:
# We can't just "guess" a value to represent null
# values in the new column, so just disable this for
# now; users may modify it later
new_column.null = None
else:
# the column is an ASCII table column...
if new_column.null is not None:
new_column.null = DEFAULT_ASCII_TNULL
if (new_column.disp is not None and
new_column.disp.upper().startswith('L')):
# ASCII columns may not use the logical data display format;
# for now just drop the TDISPn option for this column as we
# don't have a systematic conversion of boolean data to ASCII
# tables yet
new_column.disp = None
return new_column
def __getattr__(self, name):
"""
Automatically returns the values for the given keyword attribute for
all `Column`s in this list.
Implements for example self.units, self.formats, etc.
"""
cname = name[:-1]
if cname in KEYWORD_ATTRIBUTES and name[-1] == 's':
attr = []
for col in self.columns:
val = getattr(col, cname)
attr.append(val if val is not None else '')
return attr
raise AttributeError(name)
@lazyproperty
def dtype(self):
# Note: This previously returned a dtype that just used the raw field
# widths based on the format's repeat count, and did not incorporate
# field *shapes* as provided by TDIMn keywords.
# Now this incorporates TDIMn from the start, which makes *this* method
# a little more complicated, but simplifies code elsewhere (for example
# fields will have the correct shapes even in the raw recarray).
formats = []
offsets = [0]
for format_, dim in zip(self.formats, self._dims):
dt = format_.dtype
if len(offsets) < len(self.formats):
# Note: the size of the *original* format_ may be greater than
# one would expect from the number of elements determined by
# dim. The FITS format allows this--the rest of the field is
# filled with undefined values.
offsets.append(offsets[-1] + dt.itemsize)
if dim:
if format_.format == 'A':
dt = np.dtype((dt.char + str(dim[-1]), dim[:-1]))
else:
dt = np.dtype((dt.base, dim))
formats.append(dt)
return np.dtype({'names': self.names,
'formats': formats,
'offsets': offsets})
@lazyproperty
def names(self):
return [col.name for col in self.columns]
@lazyproperty
def formats(self):
return [col.format for col in self.columns]
@lazyproperty
def _arrays(self):
return [col.array for col in self.columns]
@lazyproperty
def _recformats(self):
return [fmt.recformat for fmt in self.formats]
@lazyproperty
def _dims(self):
"""Returns the values of the TDIMn keywords parsed into tuples."""
return [col._dims for col in self.columns]
def __getitem__(self, key):
if isinstance(key, str):
key = _get_index(self.names, key)
x = self.columns[key]
if _is_int(key):
return x
else:
return ColDefs(x)
def __len__(self):
return len(self.columns)
def __repr__(self):
rep = 'ColDefs('
if hasattr(self, 'columns') and self.columns:
# The hasattr check is mostly just useful in debugging sessions
# where self.columns may not be defined yet
rep += '\n '
rep += '\n '.join([repr(c) for c in self.columns])
rep += '\n'
rep += ')'
return rep
def __add__(self, other, option='left'):
if isinstance(other, Column):
b = [other]
elif isinstance(other, ColDefs):
b = list(other.columns)
else:
raise TypeError('Wrong type of input.')
if option == 'left':
tmp = list(self.columns) + b
else:
tmp = b + list(self.columns)
return ColDefs(tmp)
def __radd__(self, other):
return self.__add__(other, 'right')
def __sub__(self, other):
if not isinstance(other, (list, tuple)):
other = [other]
_other = [_get_index(self.names, key) for key in other]
indx = list(range(len(self)))
for x in _other:
indx.remove(x)
tmp = [self[i] for i in indx]
return ColDefs(tmp)
def _update_column_attribute_changed(self, column, attr, old_value,
new_value):
"""
Handle column attribute changed notifications from columns that are
members of this `ColDefs`.
`ColDefs` itself does not currently do anything with this, and just
bubbles the notification up to any listening table HDUs that may need
to update their headers, etc. However, this also informs the table of
the numerical index of the column that changed.
"""
idx = 0
for idx, col in enumerate(self.columns):
if col is column:
break
if attr == 'name':
del self.names
elif attr == 'format':
del self.formats
self._notify('column_attribute_changed', column, idx, attr, old_value,
new_value)
def add_col(self, column):
"""
Append one `Column` to the column definition.
"""
if not isinstance(column, Column):
raise AssertionError
self._arrays.append(column.array)
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
self.columns.append(column)
# Listen for changes on the new column
column._add_listener(self)
# If this ColDefs is being tracked by a Table, inform the
# table that its data is now invalid.
self._notify('column_added', self, column)
return self
def del_col(self, col_name):
"""
Delete (the definition of) one `Column`.
col_name : str or int
The column's name or index
"""
indx = _get_index(self.names, col_name)
col = self.columns[indx]
del self._arrays[indx]
# Obliterate caches of certain things
del self.dtype
del self._recformats
del self._dims
del self.names
del self.formats
del self.columns[indx]
col._remove_listener(self)
# If this ColDefs is being tracked by a table HDU, inform the HDU (or
# any other listeners) that the column has been removed
# Just send a reference to self, and the index of the column that was
# removed
self._notify('column_removed', self, indx)
return self
def change_attrib(self, col_name, attrib, new_value):
"""
Change an attribute (in the ``KEYWORD_ATTRIBUTES`` list) of a `Column`.
Parameters
----------
col_name : str or int
The column name or index to change
attrib : str
The attribute name
new_value : object
The new value for the attribute
"""
setattr(self[col_name], attrib, new_value)
def change_name(self, col_name, new_name):
"""
Change a `Column`'s name.
Parameters
----------
col_name : str
The current name of the column
new_name : str
The new name of the column
"""
if new_name != col_name and new_name in self.names:
raise ValueError('New name {} already exists.'.format(new_name))
else:
self.change_attrib(col_name, 'name', new_name)
def change_unit(self, col_name, new_unit):
"""
Change a `Column`'s unit.
Parameters
----------
col_name : str or int
The column name or index
new_unit : str
The new unit for the column
"""
self.change_attrib(col_name, 'unit', new_unit)
def info(self, attrib='all', output=None):
"""
Get attribute(s) information of the column definition.
Parameters
----------
attrib : str
Can be one or more of the attributes listed in
``astropy.io.fits.column.KEYWORD_ATTRIBUTES``. The default is
``"all"`` which will print out all attributes. It forgives plurals
and blanks. If there are two or more attribute names, they must be
separated by comma(s).
output : file, optional
File-like object to output to. Outputs to stdout by default.
If `False`, returns the attributes as a `dict` instead.
Notes
-----
This function doesn't return anything by default; it just prints to
stdout.
"""
if output is None:
output = sys.stdout
if attrib.strip().lower() in ['all', '']:
lst = KEYWORD_ATTRIBUTES
else:
lst = attrib.split(',')
for idx in range(len(lst)):
lst[idx] = lst[idx].strip().lower()
if lst[idx][-1] == 's':
lst[idx] = list[idx][:-1]
ret = {}
for attr in lst:
if output:
if attr not in KEYWORD_ATTRIBUTES:
output.write("'{}' is not an attribute of the column "
"definitions.\n".format(attr))
continue
output.write("{}:\n".format(attr))
output.write(' {}\n'.format(getattr(self, attr + 's')))
else:
ret[attr] = getattr(self, attr + 's')
if not output:
return ret
class _AsciiColDefs(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = ' '
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = 'S' + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ['a' + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
class _VLF(np.ndarray):
"""Variable length field object."""
def __new__(cls, input, dtype='a'):
"""
Parameters
----------
input
a sequence of variable-sized elements.
"""
if dtype == 'a':
try:
# this handles ['abc'] and [['a','b','c']]
# equally, beautiful!
input = [chararray.array(x, itemsize=1) for x in input]
except Exception:
raise ValueError(
'Inconsistent input data array: {0}'.format(input))
a = np.array(input, dtype=object)
self = np.ndarray.__new__(cls, shape=(len(input),), buffer=a,
dtype=object)
self.max = 0
self.element_dtype = dtype
return self
def __array_finalize__(self, obj):
if obj is None:
return
self.max = obj.max
self.element_dtype = obj.element_dtype
def __setitem__(self, key, value):
"""
To make sure the new item has consistent data type to avoid
misalignment.
"""
if isinstance(value, np.ndarray) and value.dtype == self.dtype:
pass
elif isinstance(value, chararray.chararray) and value.itemsize == 1:
pass
elif self.element_dtype == 'a':
value = chararray.array(value, itemsize=1)
else:
value = np.array(value, dtype=self.element_dtype)
np.ndarray.__setitem__(self, key, value)
self.max = max(self.max, len(value))
def _get_index(names, key):
"""
Get the index of the ``key`` in the ``names`` list.
The ``key`` can be an integer or string. If integer, it is the index
in the list. If string,
a. Field (column) names are case sensitive: you can have two
different columns called 'abc' and 'ABC' respectively.
b. When you *refer* to a field (presumably with the field
method), it will try to match the exact name first, so in
the example in (a), field('abc') will get the first field,
and field('ABC') will get the second field.
If there is no exact name matched, it will try to match the
name with case insensitivity. So, in the last example,
field('Abc') will cause an exception since there is no unique
mapping. If there is a field named "XYZ" and no other field
name is a case variant of "XYZ", then field('xyz'),
field('Xyz'), etc. will get this field.
"""
if _is_int(key):
indx = int(key)
elif isinstance(key, str):
# try to find exact match first
try:
indx = names.index(key.rstrip())
except ValueError:
# try to match case-insentively,
_key = key.lower().rstrip()
names = [n.lower().rstrip() for n in names]
count = names.count(_key) # occurrence of _key in names
if count == 1:
indx = names.index(_key)
elif count == 0:
raise KeyError("Key '{}' does not exist.".format(key))
else: # multiple match
raise KeyError("Ambiguous key name '{}'.".format(key))
else:
raise KeyError("Illegal key '{!r}'.".format(key))
return indx
def _unwrapx(input, output, repeat):
"""
Unwrap the X format column into a Boolean array.
Parameters
----------
input
input ``Uint8`` array of shape (`s`, `nbytes`)
output
output Boolean array of shape (`s`, `repeat`)
repeat
number of bits
"""
pow2 = np.array([128, 64, 32, 16, 8, 4, 2, 1], dtype='uint8')
nbytes = ((repeat - 1) // 8) + 1
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
output[..., j] = np.bitwise_and(input[..., i], pow2[j - i * 8])
def _wrapx(input, output, repeat):
"""
Wrap the X format column Boolean array into an ``UInt8`` array.
Parameters
----------
input
input Boolean array of shape (`s`, `repeat`)
output
output ``Uint8`` array of shape (`s`, `nbytes`)
repeat
number of bits
"""
output[...] = 0 # reset the output
nbytes = ((repeat - 1) // 8) + 1
unused = nbytes * 8 - repeat
for i in range(nbytes):
_min = i * 8
_max = min((i + 1) * 8, repeat)
for j in range(_min, _max):
if j != _min:
np.left_shift(output[..., i], 1, output[..., i])
np.add(output[..., i], input[..., j], output[..., i])
# shift the unused bits
np.left_shift(output[..., i], unused, output[..., i])
def _makep(array, descr_output, format, nrows=None):
"""
Construct the P (or Q) format column array, both the data descriptors and
the data. It returns the output "data" array of data type `dtype`.
The descriptor location will have a zero offset for all columns
after this call. The final offset will be calculated when the file
is written.
Parameters
----------
array
input object array
descr_output
output "descriptor" array of data type int32 (for P format arrays) or
int64 (for Q format arrays)--must be nrows long in its first dimension
format
the _FormatP object representing the format of the variable array
nrows : int, optional
number of rows to create in the column; defaults to the number of rows
in the input array
"""
# TODO: A great deal of this is redundant with FITS_rec._convert_p; see if
# we can merge the two somehow.
_offset = 0
if not nrows:
nrows = len(array)
data_output = _VLF([None] * nrows, dtype=format.dtype)
if format.dtype == 'a':
_nbytes = 1
else:
_nbytes = np.array([], dtype=format.dtype).itemsize
for idx in range(nrows):
if idx < len(array):
rowval = array[idx]
else:
if format.dtype == 'a':
rowval = ' ' * data_output.max
else:
rowval = [0] * data_output.max
if format.dtype == 'a':
data_output[idx] = chararray.array(encode_ascii(rowval),
itemsize=1)
else:
data_output[idx] = np.array(rowval, dtype=format.dtype)
descr_output[idx, 0] = len(data_output[idx])
descr_output[idx, 1] = _offset
_offset += len(data_output[idx]) * _nbytes
return data_output
def _parse_tformat(tform):
"""Parse ``TFORMn`` keyword for a binary table into a
``(repeat, format, option)`` tuple.
"""
try:
(repeat, format, option) = TFORMAT_RE.match(tform.strip()).groups()
except Exception:
# TODO: Maybe catch this error use a default type (bytes, maybe?) for
# unrecognized column types. As long as we can determine the correct
# byte width somehow..
raise VerifyError('Format {!r} is not recognized.'.format(tform))
if repeat == '':
repeat = 1
else:
repeat = int(repeat)
return (repeat, format.upper(), option)
def _parse_ascii_tformat(tform, strict=False):
"""
Parse the ``TFORMn`` keywords for ASCII tables into a ``(format, width,
precision)`` tuple (the latter is always zero unless format is one of 'E',
'F', or 'D').
"""
match = TFORMAT_ASCII_RE.match(tform.strip())
if not match:
raise VerifyError('Format {!r} is not recognized.'.format(tform))
# Be flexible on case
format = match.group('format')
if format is None:
# Floating point format
format = match.group('formatf').upper()
width = match.group('widthf')
precision = match.group('precision')
if width is None or precision is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
width = 0 if width is None else width
precision = 1 if precision is None else precision
else:
format = format.upper()
width = match.group('width')
if width is None:
if strict:
raise VerifyError('Format {!r} is not unambiguously an ASCII '
'table format.')
else:
# Just use a default width of 0 if unspecified
width = 0
precision = 0
def convert_int(val):
msg = ('Format {!r} is not valid--field width and decimal precision '
'must be integers.')
try:
val = int(val)
except (ValueError, TypeError):
raise VerifyError(msg.format(tform))
return val
if width and precision:
# This should only be the case for floating-point formats
width, precision = convert_int(width), convert_int(precision)
elif width:
# Just for integer/string formats; ignore precision
width = convert_int(width)
else:
# For any format, if width was unspecified use the set defaults
width, precision = ASCII_DEFAULT_WIDTHS[format]
if width <= 0:
raise VerifyError("Format {!r} not valid--field width must be a "
"positive integeter.".format(tform))
if precision >= width:
raise VerifyError("Format {!r} not valid--the number of decimal digits "
"must be less than the format's total "
"width {}.".format(tform, width))
return format, width, precision
def _parse_tdim(tdim):
"""Parse the ``TDIM`` value into a tuple (may return an empty tuple if
the value ``TDIM`` value is empty or invalid).
"""
m = tdim and TDIM_RE.match(tdim)
if m:
dims = m.group('dims')
return tuple(int(d.strip()) for d in dims.split(','))[::-1]
# Ignore any dim values that don't specify a multidimensional column
return tuple()
def _scalar_to_format(value):
"""
Given a scalar value or string, returns the minimum FITS column format
that can represent that value. 'minimum' is defined by the order given in
FORMATORDER.
"""
# First, if value is a string, try to convert to the appropriate scalar
# value
for type_ in (int, float, complex):
try:
value = type_(value)
break
except ValueError:
continue
numpy_dtype_str = np.min_scalar_type(value).str
numpy_dtype_str = numpy_dtype_str[1:] # Strip endianness
try:
fits_format = NUMPY2FITS[numpy_dtype_str]
return FITSUPCONVERTERS.get(fits_format, fits_format)
except KeyError:
return "A" + str(len(value))
def _cmp_recformats(f1, f2):
"""
Compares two numpy recformats using the ordering given by FORMATORDER.
"""
if f1[0] == 'a' and f2[0] == 'a':
return cmp(int(f1[1:]), int(f2[1:]))
else:
f1, f2 = NUMPY2FITS[f1], NUMPY2FITS[f2]
return cmp(FORMATORDER.index(f1), FORMATORDER.index(f2))
def _convert_fits2record(format):
"""
Convert FITS format spec to record format spec.
"""
repeat, dtype, option = _parse_tformat(format)
if dtype in FITS2NUMPY:
if dtype == 'A':
output_format = FITS2NUMPY[dtype] + str(repeat)
# to accommodate both the ASCII table and binary table column
# format spec, i.e. A7 in ASCII table is the same as 7A in
# binary table, so both will produce 'a7'.
# Technically the FITS standard does not allow this but it's a very
# common mistake
if format.lstrip()[0] == 'A' and option != '':
# make sure option is integer
output_format = FITS2NUMPY[dtype] + str(int(option))
else:
repeat_str = ''
if repeat != 1:
repeat_str = str(repeat)
output_format = repeat_str + FITS2NUMPY[dtype]
elif dtype == 'X':
output_format = _FormatX(repeat)
elif dtype == 'P':
output_format = _FormatP.from_tform(format)
elif dtype == 'Q':
output_format = _FormatQ.from_tform(format)
elif dtype == 'F':
output_format = 'f8'
else:
raise ValueError('Illegal format `{}`.'.format(format))
return output_format
def _convert_record2fits(format):
"""
Convert record format spec to FITS format spec.
"""
recformat, kind, dtype = _dtype_to_recformat(format)
shape = dtype.shape
itemsize = dtype.base.itemsize
if dtype.char == 'U':
# Unicode dtype--itemsize is 4 times actual ASCII character length,
# which what matters for FITS column formats
# Use dtype.base--dtype may be a multi-dimensional dtype
itemsize = itemsize // 4
option = str(itemsize)
ndims = len(shape)
repeat = 1
if ndims > 0:
nel = np.array(shape, dtype='i8').prod()
if nel > 1:
repeat = nel
if kind == 'a':
# This is a kludge that will place string arrays into a
# single field, so at least we won't lose data. Need to
# use a TDIM keyword to fix this, declaring as (slength,
# dim1, dim2, ...) as mwrfits does
ntot = int(repeat) * int(option)
output_format = str(ntot) + 'A'
elif recformat in NUMPY2FITS: # record format
if repeat != 1:
repeat = str(repeat)
else:
repeat = ''
output_format = repeat + NUMPY2FITS[recformat]
else:
raise ValueError('Illegal format `{}`.'.format(format))
return output_format
def _dtype_to_recformat(dtype):
"""
Utility function for converting a dtype object or string that instantiates
a dtype (e.g. 'float32') into one of the two character Numpy format codes
that have been traditionally used by Astropy.
In particular, use of 'a' to refer to character data is long since
deprecated in Numpy, but Astropy remains heavily invested in its use
(something to try to get away from sooner rather than later).
"""
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
kind = dtype.base.kind
if kind in ('U', 'S'):
recformat = kind = 'a'
else:
itemsize = dtype.base.itemsize
recformat = kind + str(itemsize)
return recformat, kind, dtype
def _convert_format(format, reverse=False):
"""
Convert FITS format spec to record format spec. Do the opposite if
reverse=True.
"""
if reverse:
return _convert_record2fits(format)
else:
return _convert_fits2record(format)
def _convert_ascii_format(format, reverse=False):
"""Convert ASCII table format spec to record format spec."""
if reverse:
recformat, kind, dtype = _dtype_to_recformat(format)
itemsize = dtype.itemsize
if kind == 'a':
return 'A' + str(itemsize)
elif NUMPY2FITS.get(recformat) == 'L':
# Special case for logical/boolean types--for ASCII tables we
# represent these as single character columns containing 'T' or 'F'
# (a la the storage format for Logical columns in binary tables)
return 'A1'
elif kind == 'i':
# Use for the width the maximum required to represent integers
# of that byte size plus 1 for signs, but use a minimum of the
# default width (to keep with existing behavior)
width = 1 + len(str(2 ** (itemsize * 8)))
width = max(width, ASCII_DEFAULT_WIDTHS['I'][0])
return 'I' + str(width)
elif kind == 'f':
# This is tricky, but go ahead and use D if float-64, and E
# if float-32 with their default widths
if itemsize >= 8:
format = 'D'
else:
format = 'E'
width = '.'.join(str(w) for w in ASCII_DEFAULT_WIDTHS[format])
return format + width
# TODO: There may be reasonable ways to represent other Numpy types so
# let's see what other possibilities there are besides just 'a', 'i',
# and 'f'. If it doesn't have a reasonable ASCII representation then
# raise an exception
else:
format, width, precision = _parse_ascii_tformat(format)
# This gives a sensible "default" dtype for a given ASCII
# format code
recformat = ASCII2NUMPY[format]
# The following logic is taken from CFITSIO:
# For integers, if the width <= 4 we can safely use 16-bit ints for all
# values [for the non-standard J format code just always force 64-bit]
if format == 'I' and width <= 4:
recformat = 'i2'
elif format == 'A':
recformat += str(width)
return recformat
def _parse_tdisp_format(tdisp):
"""
Parse the ``TDISPn`` keywords for ASCII and binary tables into a
``(format, width, precision, exponential)`` tuple (the TDISP values
for ASCII and binary are identical except for 'Lw',
which is only present in BINTABLE extensions
Parameters
----------
tdisp: str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
formatc: str
The format characters from TDISPn
width: str
The width int value from TDISPn
precision: str
The precision int value from TDISPn
exponential: str
The exponential int value from TDISPn
"""
# Use appropriate regex for format type
tdisp = tdisp.strip()
fmt_key = tdisp[0] if tdisp[0] !='E' or tdisp[1] not in 'NS' else tdisp[:2]
try:
tdisp_re = TDISP_RE_DICT[fmt_key]
except KeyError:
raise VerifyError('Format {} is not recognized.'.format(tdisp))
match = tdisp_re.match(tdisp.strip())
if not match or match.group('formatc') is None:
raise VerifyError('Format {} is not recognized.'.format(tdisp))
formatc = match.group('formatc')
width = match.group('width')
precision = None
exponential = None
# Some formats have precision and exponential
if tdisp[0] in ('I', 'B', 'O', 'Z', 'F', 'E', 'G', 'D'):
precision = match.group('precision')
if precision is None:
precision = 1
if tdisp[0] in ('E', 'D', 'G') and tdisp[1] not in ('N', 'S'):
exponential = match.group('exponential')
if exponential is None:
exponential = 1
# Once parsed, check format dict to do conversion to a formatting string
return formatc, width, precision, exponential
def _fortran_to_python_format(tdisp):
"""
Turn the TDISPn fortran format pieces into a final Python format string.
See the format_type definitions above the TDISP_FMT_DICT. If codes is
changed to take advantage of the exponential specification, will need to
add it as another input parameter.
Parameters
----------
tdisp: str
TDISPn FITS Header keyword. Used to specify display formatting.
Returns
-------
format_string: str
The TDISPn keyword string translated into a Python format string.
"""
format_type, width, precision, exponential = _parse_tdisp_format(tdisp)
try:
fmt = TDISP_FMT_DICT[format_type]
return fmt.format(width=width, precision=precision)
except KeyError:
raise VerifyError('Format {} is not recognized.'.format(format_type))
def python_to_tdisp(format_string, logical_dtype = False):
"""
Turn the Python format string to a TDISP FITS compliant format string. Not
all formats convert. these will cause a Warning and return None.
Parameters
----------
format_string: str
TDISPn FITS Header keyword. Used to specify display formatting.
logical_dtype: bool
True is this format type should be a logical type, 'L'. Needs special
handeling.
Returns
-------
tdsip_string: str
The TDISPn keyword string translated into a Python format string.
"""
fmt_to_tdisp = {'a': 'A', 's': 'A', 'd': 'I', 'b': 'B', 'o': 'O', 'x': 'Z',
'X': 'Z', 'f': 'F', 'F': 'F', 'g': 'G', 'G': 'G', 'e': 'E',
'E': 'E'}
if format_string in [None, "", "{}"]:
return None
# Strip out extra format characters that aren't a type or a width/precision
if format_string[0] == '{' and format_string != "{}":
fmt_str = format_string.lstrip("{:").rstrip('}')
elif format_string[0] == '%':
fmt_str = format_string.lstrip("%")
else:
fmt_str = format_string
precision, sep = '', ''
# Character format, only translate right aligned, and don't take zero fills
if fmt_str[-1].isdigit() and fmt_str[0] == '>' and fmt_str[1] != '0':
ftype = fmt_to_tdisp['a']
width = fmt_str[1:]
elif fmt_str[-1] == 's' and fmt_str != 's':
ftype = fmt_to_tdisp['a']
width = fmt_str[:-1].lstrip('0')
# Number formats, don't take zero fills
elif fmt_str[-1].isalpha() and len(fmt_str) > 1 and fmt_str[0] != '0':
ftype = fmt_to_tdisp[fmt_str[-1]]
fmt_str = fmt_str[:-1]
# If format has a "." split out the width and precision
if '.' in fmt_str:
width, precision = fmt_str.split('.')
sep = '.'
if width == "":
ascii_key = ftype if ftype != 'G' else 'F'
width = str(int(precision) + (ASCII_DEFAULT_WIDTHS[ascii_key][0] -
ASCII_DEFAULT_WIDTHS[ascii_key][1]))
# Otherwise we just have a width
else:
width = fmt_str
else:
warnings.warn('Format {} cannot be mapped to the accepted '
'TDISPn keyword values. Format will not be '
'moved into TDISPn keyword.'.format(format_string),
AstropyUserWarning)
return None
# Catch logical data type, set the format type back to L in this case
if logical_dtype:
ftype = 'L'
return ftype + width + sep + precision
|
65f5a5b4337e78884dcddef85a8c7b05dced2df6f526be7896f8394aa9d14a5b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import warnings
from collections import defaultdict, OrderedDict
import numpy as np
from . import Header, Card
from ... import units as u
from ...coordinates import EarthLocation
from ...table import Column
from ...time import Time, TimeDelta
from ...time.core import BARYCENTRIC_SCALES
from ...time.formats import FITS_DEPRECATED_SCALES
from ...utils.exceptions import AstropyUserWarning
# The following is based on the FITS WCS Paper IV, "Representations of time
# coordinates in FITS".
# http://adsabs.harvard.edu/abs/2015A%26A...574A..36R
# FITS WCS standard specified "4-3" form for non-linear coordinate types
TCTYP_RE_TYPE = re.compile(r'(?P<type>[A-Z]+)[-]+')
TCTYP_RE_ALGO = re.compile(r'(?P<algo>[A-Z]+)\s*')
# FITS Time standard specified time units
FITS_TIME_UNIT = ['s', 'd', 'a', 'cy', 'min', 'h', 'yr', 'ta', 'Ba']
# Global time reference coordinate keywords
TIME_KEYWORDS = ('TIMESYS', 'MJDREF', 'JDREF', 'DATEREF',
'TREFPOS', 'TREFDIR', 'TIMEUNIT', 'TIMEOFFS',
'OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z',
'OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H', 'DATE',
'DATE-OBS', 'DATE-AVG', 'DATE-BEG', 'DATE-END',
'MJD-OBS', 'MJD-AVG', 'MJD-BEG', 'MJD-END')
# Column-specific time override keywords
COLUMN_TIME_KEYWORDS = ('TCTYP', 'TCUNI', 'TRPOS')
# Column-specific keywords regex
COLUMN_TIME_KEYWORD_REGEXP = '({0})[0-9]+'.format(
'|'.join(COLUMN_TIME_KEYWORDS))
def is_time_column_keyword(keyword):
"""
Check if the FITS header keyword is a time column-specific keyword.
Parameters
----------
keyword : str
FITS keyword.
"""
return re.match(COLUMN_TIME_KEYWORD_REGEXP, keyword) is not None
# Set astropy time global information
GLOBAL_TIME_INFO = {'TIMESYS': ('UTC', 'Default time scale'),
'JDREF': (0.0, 'Time columns are jd = jd1 + jd2'),
'TREFPOS': ('TOPOCENTER', 'Time reference position')}
def _verify_global_info(global_info):
"""
Given the global time reference frame information, verify that
each global time coordinate attribute will be given a valid value.
Parameters
----------
global_info : dict
Global time reference frame information.
"""
# Translate FITS deprecated scale into astropy scale, or else just convert
# to lower case for further checks.
global_info['scale'] = FITS_DEPRECATED_SCALES.get(global_info['TIMESYS'],
global_info['TIMESYS'].lower())
# Verify global time scale
if global_info['scale'] not in Time.SCALES:
# 'GPS' and 'LOCAL' are FITS recognized time scale values
# but are not supported by astropy.
if global_info['scale'] == 'gps':
warnings.warn(
'Global time scale (TIMESYS) has a FITS recognized time scale '
'value "GPS". In Astropy, "GPS" is a time from epoch format '
'which runs synchronously with TAI; GPS is approximately 19 s '
'ahead of TAI. Hence, this format will be used.', AstropyUserWarning)
# Assume that the values are in GPS format
global_info['scale'] = 'tai'
global_info['format'] = 'gps'
if global_info['scale'] == 'local':
warnings.warn(
'Global time scale (TIMESYS) has a FITS recognized time scale '
'value "LOCAL". However, the standard states that "LOCAL" should be '
'tied to one of the existing scales because it is intrinsically '
'unreliable and/or ill-defined. Astropy will thus use the default '
'global time scale "UTC" instead of "LOCAL".', AstropyUserWarning)
# Default scale 'UTC'
global_info['scale'] = 'utc'
global_info['format'] = None
else:
raise AssertionError(
'Global time scale (TIMESYS) should have a FITS recognized '
'time scale value (got {!r}). The FITS standard states that '
'the use of local time scales should be restricted to alternate '
'coordinates.'.format(global_info['TIMESYS']))
else:
# Scale is already set
global_info['format'] = None
# Check if geocentric global location is specified
obs_geo = [global_info[attr] for attr in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z')
if attr in global_info]
# Location full specification is (X, Y, Z)
if len(obs_geo) == 3:
global_info['location'] = EarthLocation.from_geocentric(*obs_geo, unit=u.m)
else:
# Check if geodetic global location is specified (since geocentric failed)
# First warn the user if geocentric location is partially specified
if obs_geo:
warnings.warn(
'The geocentric observatory location {} is not completely '
'specified (X, Y, Z) and will be ignored.'.format(obs_geo),
AstropyUserWarning)
# Check geodetic location
obs_geo = [global_info[attr] for attr in ('OBSGEO-L', 'OBSGEO-B', 'OBSGEO-H')
if attr in global_info]
if len(obs_geo) == 3:
global_info['location'] = EarthLocation.from_geodetic(*obs_geo)
else:
# Since both geocentric and geodetic locations are not specified,
# location will be None.
# Warn the user if geodetic location is partially specified
if obs_geo:
warnings.warn(
'The geodetic observatory location {} is not completely '
'specified (lon, lat, alt) and will be ignored.'.format(obs_geo),
AstropyUserWarning)
global_info['location'] = None
# Get global time reference
# Keywords are listed in order of precedence, as stated by the standard
for key, format_ in (('MJDREF', 'mjd'), ('JDREF', 'jd'), ('DATEREF', 'fits')):
if key in global_info:
global_info['ref_time'] = {'val': global_info[key], 'format': format_}
break
else:
# If none of the three keywords is present, MJDREF = 0.0 must be assumed
global_info['ref_time'] = {'val': 0, 'format': 'mjd'}
def _verify_column_info(column_info, global_info):
"""
Given the column-specific time reference frame information, verify that
each column-specific time coordinate attribute has a valid value.
Return True if the coordinate column is time, or else return False.
Parameters
----------
global_info : dict
Global time reference frame information.
column_info : dict
Column-specific time reference frame override information.
"""
scale = column_info.get('TCTYP', None)
unit = column_info.get('TCUNI', None)
location = column_info.get('TRPOS', None)
if scale is not None:
# Non-linear coordinate types have "4-3" form and are not time coordinates
if TCTYP_RE_TYPE.match(scale[:5]) and TCTYP_RE_ALGO.match(scale[5:]):
return False
elif scale.lower() in Time.SCALES:
column_info['scale'] = scale.lower()
column_info['format'] = None
elif scale in FITS_DEPRECATED_SCALES.keys():
column_info['scale'] = FITS_DEPRECATED_SCALES[scale]
column_info['format'] = None
# TCTYPn (scale) = 'TIME' indicates that the column scale is
# controlled by the global scale.
elif scale == 'TIME':
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
elif scale == 'GPS':
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "GPS". '
'In Astropy, "GPS" is a time from epoch format which runs '
'synchronously with TAI; GPS runs ahead of TAI approximately '
'by 19 s. Hence, this format will be used.'.format(column_info),
AstropyUserWarning)
column_info['scale'] = 'tai'
column_info['format'] = 'gps'
elif scale == 'LOCAL':
warnings.warn(
'Table column "{}" has a FITS recognized time scale value "LOCAL". '
'However, the standard states that "LOCAL" should be tied to one '
'of the existing scales because it is intrinsically unreliable '
'and/or ill-defined. Astropy will thus use the global time scale '
'(TIMESYS) as the default.'. format(column_info),
AstropyUserWarning)
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
else:
# Coordinate type is either an unrecognized local time scale
# or a linear coordinate type
return False
# If TCUNIn is a time unit or TRPOSn is specified, the column is a time
# coordinate. This has to be tested since TCTYP (scale) is not specified.
elif (unit is not None and unit in FITS_TIME_UNIT) or location is not None:
column_info['scale'] = global_info['scale']
column_info['format'] = global_info['format']
# None of the conditions for time coordinate columns is satisfied
else:
return False
# Check if column-specific reference position TRPOSn is specified
if location is not None:
# Observatory position (location) needs to be specified only
# for 'TOPOCENTER'.
if location == 'TOPOCENTER':
column_info['location'] = global_info['location']
if column_info['location'] is None:
warnings.warn(
'Time column reference position "TRPOSn" value is "TOPOCENTER". '
'However, the observatory position is not properly specified. '
'The FITS standard does not support this and hence reference '
'position will be ignored.', AstropyUserWarning)
else:
column_info['location'] = None
# Since TRPOSn is not specified, global reference position is
# considered.
elif global_info['TREFPOS'] == 'TOPOCENTER':
column_info['location'] = global_info['location']
if column_info['location'] is None:
warnings.warn(
'Time column reference position "TRPOSn" is not specified. The '
'default value for it is "TOPOCENTER", but due to unspecified '
'observatory position, reference position will be ignored.',
AstropyUserWarning)
else:
column_info['location'] = None
# Get reference time
column_info['ref_time'] = global_info['ref_time']
return True
def _get_info_if_time_column(col, global_info):
"""
Check if a column without corresponding time column keywords in the
FITS header represents time or not. If yes, return the time column
information needed for its conversion to Time.
This is only applicable to the special-case where a column has the
name 'TIME' and a time unit.
"""
# Column with TTYPEn = 'TIME' and lacking any TC*n or time
# specific keywords will be controlled by the global keywords.
if col.info.name.upper() == 'TIME' and col.info.unit in FITS_TIME_UNIT:
column_info = {'scale': global_info['scale'],
'format': global_info['format'],
'ref_time': global_info['ref_time'],
'location': None}
if global_info['TREFPOS'] == 'TOPOCENTER':
column_info['location'] = global_info['location']
if column_info['location'] is None:
warnings.warn(
'Time column "{}" reference position will be ignored '
'due to unspecified observatory position.'.format(col.info.name),
AstropyUserWarning)
return column_info
return None
def _convert_global_time(table, global_info):
"""
Convert the table metadata for time informational keywords
to astropy Time.
Parameters
----------
table : `~astropy.table.Table`
The table whose time metadata is to be converted.
global_info : dict
Global time reference frame information.
"""
# Read in Global Informational keywords as Time
for key, value in global_info.items():
# FITS uses a subset of ISO-8601 for DATE-xxx
if key.startswith('DATE'):
if key not in table.meta:
scale = 'utc' if key == 'DATE' else global_info['scale']
try:
precision = len(value.split('.')[-1]) if '.' in value else 0
value = Time(value, format='fits', scale=scale,
precision=precision)
except ValueError:
pass
table.meta[key] = value
# MJD-xxx in MJD according to TIMESYS
elif key.startswith('MJD-'):
if key not in table.meta:
try:
value = Time(value, format='mjd',
scale=global_info['scale'])
except ValueError:
pass
table.meta[key] = value
def _convert_time_column(col, column_info):
"""
Convert time columns to astropy Time columns.
Parameters
----------
col : `~astropy.table.Column`
The time coordinate column to be converted to Time.
column_info : dict
Column-specific time reference frame override information.
"""
# The code might fail while attempting to read FITS files not written by astropy.
try:
# ISO-8601 is the only string representation of time in FITS
if col.info.dtype.kind in ['S', 'U']:
# [+/-C]CCYY-MM-DD[Thh:mm:ss[.s...]] where the number of characters
# from index 20 to the end of string represents the precision
precision = max(int(col.info.dtype.str[2:]) - 20, 0)
return Time(col, format='fits', scale=column_info['scale'],
precision=precision,
location=column_info['location'])
if column_info['format'] == 'gps':
return Time(col, format='gps', location=column_info['location'])
# If reference value is 0 for JD or MJD, the column values can be
# directly converted to Time, as they are absolute (relative
# to a globally accepted zero point).
if (column_info['ref_time']['val'] == 0 and
column_info['ref_time']['format'] in ['jd', 'mjd']):
# (jd1, jd2) where jd = jd1 + jd2
if col.shape[-1] == 2 and col.ndim > 1:
return Time(col[..., 0], col[..., 1], scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
else:
return Time(col, scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
# Reference time
ref_time = Time(column_info['ref_time']['val'], scale=column_info['scale'],
format=column_info['ref_time']['format'],
location=column_info['location'])
# Elapsed time since reference time
if col.shape[-1] == 2 and col.ndim > 1:
delta_time = TimeDelta(col[..., 0], col[..., 1])
else:
delta_time = TimeDelta(col)
return ref_time + delta_time
except Exception as err:
warnings.warn(
'The exception "{}" was encountered while trying to convert the time '
'column "{}" to Astropy Time.'.format(err, col.info.name),
AstropyUserWarning)
return col
def fits_to_time(hdr, table):
"""
Read FITS binary table time columns as `~astropy.time.Time`.
This method reads the metadata associated with time coordinates, as
stored in a FITS binary table header, converts time columns into
`~astropy.time.Time` columns and reads global reference times as
`~astropy.time.Time` instances.
Parameters
----------
hdr : `~astropy.io.fits.header.Header`
FITS Header
table : `~astropy.table.Table`
The table whose time columns are to be read as Time
Returns
-------
hdr : `~astropy.io.fits.header.Header`
Modified FITS Header (time metadata removed)
"""
# Set defaults for global time scale, reference, etc.
global_info = {'TIMESYS': 'UTC',
'TREFPOS': 'TOPOCENTER'}
# Set default dictionary for time columns
time_columns = defaultdict(OrderedDict)
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = hdr.copy(strip=True)
# Scan the header for global and column-specific time keywords
for key, value, comment in hdr.cards:
if key in TIME_KEYWORDS:
global_info[key] = value
hcopy.remove(key)
elif is_time_column_keyword(key):
base, idx = re.match(r'([A-Z]+)([0-9]+)', key).groups()
time_columns[int(idx)][base] = value
hcopy.remove(key)
elif (value in ('OBSGEO-X', 'OBSGEO-Y', 'OBSGEO-Z') and
re.match('TTYPE[0-9]+', key)):
global_info[value] = table[value]
# Verify and get the global time reference frame information
_verify_global_info(global_info)
_convert_global_time(table, global_info)
# Columns with column-specific time (coordinate) keywords
if time_columns:
for idx, column_info in time_columns.items():
# Check if the column is time coordinate (not spatial)
if _verify_column_info(column_info, global_info):
colname = table.colnames[idx - 1]
# Convert to Time
table[colname] = _convert_time_column(table[colname],
column_info)
# Check for special-cases of time coordinate columns
for idx, colname in enumerate(table.colnames):
if (idx + 1) not in time_columns:
column_info = _get_info_if_time_column(table[colname], global_info)
if column_info:
table[colname] = _convert_time_column(table[colname], column_info)
return hcopy
def time_to_fits(table):
"""
Replace Time columns in a Table with non-mixin columns containing
each element as a vector of two doubles (jd1, jd2) and return a FITS
header with appropriate time coordinate keywords.
jd = jd1 + jd2 represents time in the Julian Date format with
high-precision.
Parameters
----------
table : `~astropy.table.Table`
The table whose Time columns are to be replaced.
Returns
-------
table : `~astropy.table.Table`
The table with replaced Time columns
hdr : `~astropy.io.fits.header.Header`
Header containing global time reference frame FITS keywords
"""
# Shallow copy of the input table
newtable = table.copy(copy_data=False)
# Global time coordinate frame keywords
hdr = Header([Card(keyword=key, value=val[0], comment=val[1])
for key, val in GLOBAL_TIME_INFO.items()])
# Store coordinate column-specific metadata
newtable.meta['__coordinate_columns__'] = defaultdict(OrderedDict)
coord_meta = newtable.meta['__coordinate_columns__']
time_cols = table.columns.isinstance(Time)
# Geocentric location
location = None
for col in time_cols:
# By default, Time objects are written in full precision, i.e. we store both
# jd1 and jd2 (serialize_method['fits'] = 'jd1_jd2'). Formatted values for
# Time can be stored if the user explicitly chooses to do so.
if col.info.serialize_method['fits'] == 'formatted_value':
newtable.replace_column(col.info.name, Column(col.value))
continue
# The following is necessary to deal with multi-dimensional ``Time`` objects
# (i.e. where Time.shape is non-trivial).
jd12 = np.array([col.jd1, col.jd2])
# Roll the 0th (innermost) axis backwards, until it lies in the last position
# (jd12.ndim)
jd12 = np.rollaxis(jd12, 0, jd12.ndim)
newtable.replace_column(col.info.name, Column(jd12, unit='d'))
# Get column position(index)
n = table.colnames.index(col.info.name) + 1
# Time column-specific override keywords
coord_meta[col.info.name]['coord_type'] = col.scale.upper()
coord_meta[col.info.name]['coord_unit'] = 'd'
# Time column reference position
if getattr(col, 'location') is None:
if location is not None:
warnings.warn(
'Time Column "{}" has no specified location, but global Time '
'Position is present, which will be the default for this column '
'in FITS specification.'.format(col.info.name),
AstropyUserWarning)
else:
coord_meta[col.info.name]['time_ref_pos'] = 'TOPOCENTER'
# Compatibility of Time Scales and Reference Positions
if col.scale in BARYCENTRIC_SCALES:
warnings.warn(
'Earth Location "TOPOCENTER" for Time Column "{}" is incompatabile '
'with scale "{}".'.format(col.info.name, col.scale.upper()),
AstropyUserWarning)
if location is None:
# Set global geocentric location
location = col.location
if location.size > 1:
for dim in ('x', 'y', 'z'):
newtable.add_column(Column(getattr(location, dim).to_value(u.m)),
name='OBSGEO-{}'.format(dim.upper()))
else:
hdr.extend([Card(keyword='OBSGEO-{}'.format(dim.upper()),
value=getattr(location, dim).to_value(u.m))
for dim in ('x', 'y', 'z')])
elif location != col.location:
raise ValueError('Multiple Time Columns with different geocentric '
'observatory locations ({}, {}) encountered.'
'This is not supported by the FITS standard.'
.format(location, col.location))
return newtable, hdr
|
6527992eefeebef8303a0b11a83ee4678bf0b2ffc8b7fc58479026b3a831b393 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
"""
Convenience functions
=====================
The functions in this module provide shortcuts for some of the most basic
operations on FITS files, such as reading and updating the header. They are
included directly in the 'astropy.io.fits' namespace so that they can be used
like::
astropy.io.fits.getheader(...)
These functions are primarily for convenience when working with FITS files in
the command-line interpreter. If performing several operations on the same
file, such as in a script, it is better to *not* use these functions, as each
one must open and re-parse the file. In such cases it is better to use
:func:`astropy.io.fits.open` and work directly with the
:class:`astropy.io.fits.HDUList` object and underlying HDU objects.
Several of the convenience functions, such as `getheader` and `getdata` support
special arguments for selecting which extension HDU to use when working with a
multi-extension FITS file. There are a few supported argument formats for
selecting the extension. See the documentation for `getdata` for an
explanation of all the different formats.
.. warning::
All arguments to convenience functions other than the filename that are
*not* for selecting the extension HDU should be passed in as keyword
arguments. This is to avoid ambiguity and conflicts with the
extension arguments. For example, to set NAXIS=1 on the Primary HDU:
Wrong::
astropy.io.fits.setval('myimage.fits', 'NAXIS', 1)
The above example will try to set the NAXIS value on the first extension
HDU to blank. That is, the argument '1' is assumed to specify an extension
HDU.
Right::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1)
This will set the NAXIS keyword to 1 on the primary HDU (the default). To
specify the first extension HDU use::
astropy.io.fits.setval('myimage.fits', 'NAXIS', value=1, ext=1)
This complexity arises out of the attempt to simultaneously support
multiple argument formats that were used in past versions of PyFITS.
Unfortunately, it is not possible to support all formats without
introducing some ambiguity. A future Astropy release may standardize
around a single format and officially deprecate the other formats.
"""
import operator
import os
import warnings
import numpy as np
from .diff import FITSDiff, HDUDiff
from .file import FILE_MODES, _File
from .hdu.base import _BaseHDU, _ValidHDU
from .hdu.hdulist import fitsopen, HDUList
from .hdu.image import PrimaryHDU, ImageHDU
from .hdu.table import BinTableHDU
from .header import Header
from .util import fileobj_closed, fileobj_name, fileobj_mode, _is_int
from ...units import Unit
from ...units.format.fits import UnitScaleError
from ...units import Quantity
from ...utils.exceptions import AstropyUserWarning
from ...utils.decorators import deprecated_renamed_argument
__all__ = ['getheader', 'getdata', 'getval', 'setval', 'delval', 'writeto',
'append', 'update', 'info', 'tabledump', 'tableload',
'table_to_hdu', 'printdiff']
def getheader(filename, *args, **kwargs):
"""
Get the header from an extension of a FITS file.
Parameters
----------
filename : file path, file object, or file like object
File to get header from. If an opened file object, its mode
must be one of the following rb, rb+, or ab+).
ext, extname, extver
The rest of the arguments are for extension specification. See the
`getdata` documentation for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
header : `Header` object
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
header = hdu.header
finally:
hdulist.close(closed=closed)
return header
def getdata(filename, *args, header=None, lower=None, upper=None, view=None,
**kwargs):
"""
Get the data from an extension of a FITS file (and optionally the
header).
Parameters
----------
filename : file path, file object, or file like object
File to get data from. If opened, mode must be one of the
following rb, rb+, or ab+.
ext
The rest of the arguments are for extension specification.
They are flexible and are best illustrated by examples.
No extra arguments implies the primary header::
getdata('in.fits')
By extension number::
getdata('in.fits', 0) # the primary header
getdata('in.fits', 2) # the second extension
getdata('in.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique)::
getdata('in.fits', 'sci')
getdata('in.fits', extname='sci') # equivalent
Note ``EXTNAME`` values are not case sensitive
By combination of ``EXTNAME`` and EXTVER`` as separate
arguments or as a tuple::
getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2
getdata('in.fits', extname='sci', extver=2) # equivalent
getdata('in.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
getdata('in.fits', ext=('sci',1), extname='err', extver=2)
header : bool, optional
If `True`, return the data and the header of the specified HDU as a
tuple.
lower, upper : bool, optional
If ``lower`` or ``upper`` are `True`, the field names in the
returned data object will be converted to lower or upper case,
respectively.
view : ndarray, optional
When given, the data will be returned wrapped in the given ndarray
subclass by calling::
data.view(view)
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
Returns
-------
array : array, record array or groups data object
Type depends on the type of the extension being referenced.
If the optional keyword ``header`` is set to `True`, this
function will return a (``data``, ``header``) tuple.
"""
mode, closed = _get_file_mode(filename)
hdulist, extidx = _getext(filename, mode, *args, **kwargs)
try:
hdu = hdulist[extidx]
data = hdu.data
if data is None and extidx == 0:
try:
hdu = hdulist[1]
data = hdu.data
except IndexError:
raise IndexError('No data in this HDU.')
if data is None:
raise IndexError('No data in this HDU.')
if header:
hdr = hdu.header
finally:
hdulist.close(closed=closed)
# Change case of names if requested
trans = None
if lower:
trans = operator.methodcaller('lower')
elif upper:
trans = operator.methodcaller('upper')
if trans:
if data.dtype.names is None:
# this data does not have fields
return
if data.dtype.descr[0][0] == '':
# this data does not have fields
return
data.dtype.names = [trans(n) for n in data.dtype.names]
# allow different views into the underlying ndarray. Keep the original
# view just in case there is a problem
if isinstance(view, type) and issubclass(view, np.ndarray):
data = data.view(view)
if header:
return data, hdr
else:
return data
def getval(filename, keyword, *args, **kwargs):
"""
Get a keyword's value from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object (if opened, mode must be
one of the following rb, rb+, or ab+).
keyword : str
Keyword name
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
Returns
-------
keyword value : str, int, or float
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
hdr = getheader(filename, *args, **kwargs)
return hdr[keyword]
def setval(filename, keyword, *args, value=None, comment=None, before=None,
after=None, savecomment=False, **kwargs):
"""
Set a keyword's value from a header in a FITS file.
If the keyword already exists, it's value/comment will be updated.
If it does not exist, a new card will be created and it will be
placed before or after the specified location. If no ``before`` or
``after`` is specified, it will be appended at the end.
When updating more than one keyword in a file, this convenience
function is a much less efficient approach compared with opening
the file for update, modifying the header, and closing the file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str
Keyword name
value : str, int, float, optional
Keyword value (default: `None`, meaning don't modify)
comment : str, optional
Keyword comment, (default: `None`, meaning don't modify)
before : str, int, optional
Name of the keyword, or index of the card before which the new card
will be placed. The argument ``before`` takes precedence over
``after`` if both are specified (default: `None`).
after : str, int, optional
Name of the keyword, or index of the card after which the new card will
be placed. (default: `None`).
savecomment : bool, optional
When `True`, preserve the current comment for an existing keyword. The
argument ``savecomment`` takes precedence over ``comment`` if both
specified. If ``comment`` is not specified then the current comment
will automatically be preserved (default: `False`).
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
if keyword in hdulist[extidx].header and savecomment:
comment = None
hdulist[extidx].header.set(keyword, value, comment, before, after)
finally:
hdulist.close(closed=closed)
def delval(filename, keyword, *args, **kwargs):
"""
Delete all instances of keyword from a header in a FITS file.
Parameters
----------
filename : file path, file object, or file like object
Name of the FITS file, or file object If opened, mode must be update
(rb+). An opened file object or `~gzip.GzipFile` object will be closed
upon return.
keyword : str, int
Keyword name or index
ext, extname, extver
The rest of the arguments are for extension specification.
See `getdata` for explanations/examples.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function automatically specifies ``do_not_scale_image_data
= True`` when opening the file so that values can be retrieved from the
unmodified header.
"""
if 'do_not_scale_image_data' not in kwargs:
kwargs['do_not_scale_image_data'] = True
closed = fileobj_closed(filename)
hdulist, extidx = _getext(filename, 'update', *args, **kwargs)
try:
del hdulist[extidx].header[keyword]
finally:
hdulist.close(closed=closed)
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(filename, data, header=None, output_verify='exception',
overwrite=False, checksum=False):
"""
Create a new FITS file using the supplied data/header.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened in a writeable binary
mode such as 'wb' or 'ab+'.
data : array, record array, or groups data object
data to write to the new file
header : `Header` object, optional
the header associated with ``data``. If `None`, a header
of the appropriate type is created for the supplied data. This
argument is optional.
output_verify : str
Output verification option. Must be one of ``"fix"``, ``"silentfix"``,
``"ignore"``, ``"warn"``, or ``"exception"``. May also be any
combination of ``"fix"`` or ``"silentfix"`` with ``"+ignore"``,
``+warn``, or ``+exception" (e.g. ``"fix+warn"``). See :ref:`verify`
for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool, optional
If `True`, adds both ``DATASUM`` and ``CHECKSUM`` cards to the
headers of all HDU's written to the file.
"""
hdu = _makehdu(data, header)
if hdu.is_image and not isinstance(hdu, PrimaryHDU):
hdu = PrimaryHDU(data, header=header)
hdu.writeto(filename, overwrite=overwrite, output_verify=output_verify,
checksum=checksum)
def table_to_hdu(table, character_as_bytes=False):
"""
Convert an `~astropy.table.Table` object to a FITS
`~astropy.io.fits.BinTableHDU`.
Parameters
----------
table : astropy.table.Table
The table to convert.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the HDU.
By default this is `False` and (unicode) strings are returned, but for
large tables this may use up a lot of memory.
Returns
-------
table_hdu : `~astropy.io.fits.BinTableHDU`
The FITS binary table HDU.
"""
# Avoid circular imports
from .connect import is_column_keyword, REMOVE_KEYWORDS
from .column import python_to_tdisp
# Header to store Time related metadata
hdr = None
# Not all tables with mixin columns are supported
if table.has_mixin_columns:
# Import is done here, in order to avoid it at build time as erfa is not
# yet available then.
from ...table.column import BaseColumn, Column
from ...time import Time
from .fitstime import time_to_fits
# Only those columns which are instances of BaseColumn, Quantity or Time can
# be written
unsupported_cols = table.columns.not_isinstance((BaseColumn, Quantity, Time))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError('cannot write table with mixin column(s) {0}'
.format(unsupported_names))
time_cols = table.columns.isinstance(Time)
if time_cols:
table, hdr = time_to_fits(table)
# Create a new HDU object
if table.masked:
# float column's default mask value needs to be Nan
for column in table.columns.values():
fill_value = column.get_fill_value()
if column.dtype.kind == 'f' and np.allclose(fill_value, 1e20):
column.set_fill_value(np.nan)
# TODO: it might be better to construct the FITS table directly from
# the Table columns, rather than go via a structured array.
table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=True)
for col in table_hdu.columns:
# Binary FITS tables support TNULL *only* for integer data columns
# TODO: Determine a schema for handling non-integer masked columns
# in FITS (if at all possible)
int_formats = ('B', 'I', 'J', 'K')
if not (col.format in int_formats or
col.format.p_format in int_formats):
continue
# The astype is necessary because if the string column is less
# than one character, the fill value will be N/A by default which
# is too long, and so no values will get masked.
fill_value = table[col.name].get_fill_value()
col.null = fill_value.astype(table[col.name].dtype)
else:
table_hdu = BinTableHDU.from_columns(np.array(table.filled()), header=hdr, character_as_bytes=character_as_bytes)
# Set units and format display for output HDU
for col in table_hdu.columns:
if table[col.name].info.format is not None:
# check for boolean types, special format case
logical = table[col.name].info.dtype == bool
tdisp_format = python_to_tdisp(table[col.name].info.format,
logical_dtype=logical)
if tdisp_format is not None:
col.disp = tdisp_format
unit = table[col.name].unit
if unit is not None:
try:
col.unit = unit.to_string(format='fits')
except UnitScaleError:
scale = unit.scale
raise UnitScaleError(
"The column '{0}' could not be stored in FITS format "
"because it has a scale '({1})' that "
"is not recognized by the FITS standard. Either scale "
"the data or change the units.".format(col.name, str(scale)))
except ValueError:
warnings.warn(
"The unit '{0}' could not be saved to FITS format".format(
unit.to_string()), AstropyUserWarning)
# Try creating a Unit to issue a warning if the unit is not FITS compliant
Unit(col.unit, format='fits', parse_strict='warn')
# Column-specific override keywords for coordinate columns
coord_meta = table.meta.pop('__coordinate_columns__', {})
for col_name, col_info in coord_meta.items():
col = table_hdu.columns[col_name]
# Set the column coordinate attributes from data saved earlier.
# Note: have to set all three, even if we have no data.
for attr in 'coord_type', 'coord_unit', 'time_ref_pos':
setattr(col, attr, col_info.get(attr, None))
for key, value in table.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
warnings.warn(
"Meta-data keyword {0} will be ignored since it conflicts "
"with a FITS reserved keyword".format(key), AstropyUserWarning)
# Convert to FITS format
if key == 'comments':
key = 'comment'
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to "
"FITS Header - skipping".format(key, type(value)),
AstropyUserWarning)
else:
try:
table_hdu.header[key] = value
except ValueError:
warnings.warn(
"Attribute `{0}` of type {1} cannot be added to FITS "
"Header - skipping".format(key, type(value)),
AstropyUserWarning)
return table_hdu
def append(filename, data, header=None, checksum=False, verify=True, **kwargs):
"""
Append the header/data to FITS file if filename exists, create if not.
If only ``data`` is supplied, a minimal header is created.
Parameters
----------
filename : file path, file object, or file like object
File to write to. If opened, must be opened for update (rb+) unless it
is a new file, then it must be opened for append (ab+). A file or
`~gzip.GzipFile` object opened for update will be closed after return.
data : array, table, or group data object
the new data used for appending
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
checksum : bool, optional
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards to the header
of the HDU when written to the file.
verify : bool, optional
When `True`, the existing FITS file will be read in to verify it for
correctness before appending. When `False`, content is simply appended
to the end of the file. Setting ``verify`` to `False` can be much
faster.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
name, closed, noexist_or_empty = _stat_filename_or_fileobj(filename)
if noexist_or_empty:
#
# The input file or file like object either doesn't exits or is
# empty. Use the writeto convenience function to write the
# output to the empty object.
#
writeto(filename, data, header, checksum=checksum, **kwargs)
else:
hdu = _makehdu(data, header)
if isinstance(hdu, PrimaryHDU):
hdu = ImageHDU(data, header)
if verify or not closed:
f = fitsopen(filename, mode='append')
try:
f.append(hdu)
# Set a flag in the HDU so that only this HDU gets a checksum
# when writing the file.
hdu._output_checksum = checksum
finally:
f.close(closed=closed)
else:
f = _File(filename, mode='append')
try:
hdu._output_checksum = checksum
hdu._writeto(f)
finally:
f.close()
def update(filename, data, *args, **kwargs):
"""
Update the specified extension with the input data/header.
Parameters
----------
filename : file path, file object, or file like object
File to update. If opened, mode must be update (rb+). An opened file
object or `~gzip.GzipFile` object will be closed upon return.
data : array, table, or group data object
the new data used for updating
header : `Header` object, optional
The header associated with ``data``. If `None`, an appropriate header
will be created for the data object supplied.
ext, extname, extver
The rest of the arguments are flexible: the 3rd argument can be the
header associated with the data. If the 3rd argument is not a
`Header`, it (and other positional arguments) are assumed to be the
extension specification(s). Header and extension specs can also be
keyword arguments. For example::
update(file, dat, hdr, 'sci') # update the 'sci' extension
update(file, dat, 3) # update the 3rd extension
update(file, dat, hdr, 3) # update the 3rd extension
update(file, dat, 'sci', 2) # update the 2nd SCI extension
update(file, dat, 3, header=hdr) # update the 3rd extension
update(file, dat, header=hdr, ext=5) # update the 5th extension
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
"""
# The arguments to this function are a bit trickier to deal with than others
# in this module, since the documentation has promised that the header
# argument can be an optional positional argument.
if args and isinstance(args[0], Header):
header = args[0]
args = args[1:]
else:
header = None
# The header can also be a keyword argument--if both are provided the
# keyword takes precedence
header = kwargs.pop('header', header)
new_hdu = _makehdu(data, header)
closed = fileobj_closed(filename)
hdulist, _ext = _getext(filename, 'update', *args, **kwargs)
try:
hdulist[_ext] = new_hdu
finally:
hdulist.close(closed=closed)
def info(filename, output=None, **kwargs):
"""
Print the summary information on a FITS file.
This includes the name, type, length of header, data shape and type
for each extension.
Parameters
----------
filename : file path, file object, or file like object
FITS file to obtain info from. If opened, mode must be one of
the following: rb, rb+, or ab+ (i.e. the file must be readable).
output : file, bool, optional
A file-like object to write the output to. If ``False``, does not
output to a file and instead returns a list of tuples representing the
HDU info. Writes to ``sys.stdout`` by default.
kwargs
Any additional keyword arguments to be passed to
`astropy.io.fits.open`.
*Note:* This function sets ``ignore_missing_end=True`` by default.
"""
mode, closed = _get_file_mode(filename, default='readonly')
# Set the default value for the ignore_missing_end parameter
if 'ignore_missing_end' not in kwargs:
kwargs['ignore_missing_end'] = True
f = fitsopen(filename, mode=mode, **kwargs)
try:
ret = f.info(output=output)
finally:
if closed:
f.close()
return ret
def printdiff(inputa, inputb, *args, **kwargs):
"""
Compare two parts of a FITS file, including entire FITS files,
FITS `HDUList` objects and FITS ``HDU`` objects.
Parameters
----------
inputa : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputb``.
inputb : str, `HDUList` object, or ``HDU`` object
The filename of a FITS file, `HDUList`, or ``HDU``
object to compare to ``inputa``.
ext, extname, extver
Additional positional arguments are for extension specification if your
inputs are string filenames (will not work if
``inputa`` and ``inputb`` are ``HDU`` objects or `HDUList` objects).
They are flexible and are best illustrated by examples. In addition
to using these arguments positionally you can directly call the
keyword parameters ``ext``, ``extname``.
By extension number::
printdiff('inA.fits', 'inB.fits', 0) # the primary HDU
printdiff('inA.fits', 'inB.fits', 2) # the second extension
printdiff('inA.fits', 'inB.fits', ext=2) # the second extension
By name, i.e., ``EXTNAME`` value (if unique). ``EXTNAME`` values are
not case sensitive:
printdiff('inA.fits', 'inB.fits', 'sci')
printdiff('inA.fits', 'inB.fits', extname='sci') # equivalent
By combination of ``EXTNAME`` and ``EXTVER`` as separate
arguments or as a tuple::
printdiff('inA.fits', 'inB.fits', 'sci', 2) # EXTNAME='SCI'
# & EXTVER=2
printdiff('inA.fits', 'inB.fits', extname='sci', extver=2)
# equivalent
printdiff('inA.fits', 'inB.fits', ('sci', 2)) # equivalent
Ambiguous or conflicting specifications will raise an exception::
printdiff('inA.fits', 'inB.fits',
ext=('sci', 1), extname='err', extver=2)
kwargs
Any additional keyword arguments to be passed to
`~astropy.io.fits.FITSDiff`.
Notes
-----
The primary use for the `printdiff` function is to allow quick print out
of a FITS difference report and will write to ``sys.stdout``.
To save the diff report to a file please use `~astropy.io.fits.FITSDiff`
directly.
"""
# Pop extension keywords
extension = {key: kwargs.pop(key) for key in ['ext', 'extname', 'extver']
if key in kwargs}
has_extensions = args or extension
if isinstance(inputa, str) and has_extensions:
# Use handy _getext to interpret any ext keywords, but
# will need to close a if fails
modea, closeda = _get_file_mode(inputa)
modeb, closedb = _get_file_mode(inputb)
hdulista, extidxa = _getext(inputa, modea, *args, **extension)
# Have to close a if b doesn't make it
try:
hdulistb, extidxb = _getext(inputb, modeb, *args, **extension)
except Exception:
hdulista.close(closed=closeda)
raise
try:
hdua = hdulista[extidxa]
hdub = hdulistb[extidxb]
# See below print for note
print(HDUDiff(hdua, hdub, **kwargs).report())
finally:
hdulista.close(closed=closeda)
hdulistb.close(closed=closedb)
# If input is not a string, can feed HDU objects or HDUList directly,
# but can't currently handle extensions
elif isinstance(inputa, _ValidHDU) and has_extensions:
raise ValueError("Cannot use extension keywords when providing an "
"HDU object.")
elif isinstance(inputa, _ValidHDU) and not has_extensions:
print(HDUDiff(inputa, inputb, **kwargs).report())
elif isinstance(inputa, HDUList) and has_extensions:
raise NotImplementedError("Extension specification with HDUList "
"objects not implemented.")
# This function is EXCLUSIVELY for printing the diff report to screen
# in a one-liner call, hence the use of print instead of logging
else:
print(FITSDiff(inputa, inputb, **kwargs).report())
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def tabledump(filename, datafile=None, cdfile=None, hfile=None, ext=1,
overwrite=False):
"""
Dump a table HDU to a file in ASCII format. The table may be
dumped in three separate files, one containing column definitions,
one containing header parameters, and one for table data.
Parameters
----------
filename : file path, file object or file-like object
Input fits file.
datafile : file path, file object or file-like object, optional
Output data file. The default is the root name of the input
fits file appended with an underscore, followed by the
extension number (ext), followed by the extension ``.txt``.
cdfile : file path, file object or file-like object, optional
Output column definitions file. The default is `None`,
no column definitions output is produced.
hfile : file path, file object or file-like object, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
ext : int
The number of the extension containing the table HDU to be
dumped.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Notes
-----
The primary use for the `tabledump` function is to allow editing in a
standard text editor of the table data and parameters. The
`tableload` function can be used to reassemble the table from the
three ASCII files.
"""
# allow file object to already be opened in any of the valid modes
# and leave the file in the same state (opened or closed) as when
# the function was called
mode, closed = _get_file_mode(filename, default='readonly')
f = fitsopen(filename, mode=mode)
# Create the default data file name if one was not provided
try:
if not datafile:
root, tail = os.path.splitext(f._file.name)
datafile = root + '_' + repr(ext) + '.txt'
# Dump the data from the HDU to the files
f[ext].dump(datafile, cdfile, hfile, overwrite)
finally:
if closed:
f.close()
if isinstance(tabledump.__doc__, str):
tabledump.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def tableload(datafile, cdfile, hfile=None):
"""
Create a table from the input ASCII files. The input is from up
to three separate files, one containing column definitions, one
containing header parameters, and one containing column data. The
header parameters file is not required. When the header
parameters file is absent a minimal header is constructed.
Parameters
----------
datafile : file path, file object or file-like object
Input data file containing the table data in ASCII format.
cdfile : file path, file object or file-like object
Input column definition file containing the names, formats,
display formats, physical units, multidimensional array
dimensions, undefined values, scale factors, and offsets
associated with the columns in the table.
hfile : file path, file object or file-like object, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table.
If `None`, a minimal header is constructed.
Notes
-----
The primary use for the `tableload` function is to allow the input of
ASCII data that was edited in a standard text editor of the table
data and parameters. The tabledump function can be used to create the
initial ASCII files.
"""
return BinTableHDU.load(datafile, cdfile, hfile, replace=True)
if isinstance(tableload.__doc__, str):
tableload.__doc__ += BinTableHDU._tdump_file_format.replace('\n', '\n ')
def _getext(filename, mode, *args, ext=None, extname=None, extver=None,
**kwargs):
"""
Open the input file, return the `HDUList` and the extension.
This supports several different styles of extension selection. See the
:func:`getdata()` documentation for the different possibilities.
"""
err_msg = ('Redundant/conflicting extension arguments(s): {}'.format(
{'args': args, 'ext': ext, 'extname': extname,
'extver': extver}))
# This code would be much simpler if just one way of specifying an
# extension were picked. But now we need to support all possible ways for
# the time being.
if len(args) == 1:
# Must be either an extension number, an extension name, or an
# (extname, extver) tuple
if _is_int(args[0]) or (isinstance(ext, tuple) and len(ext) == 2):
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
ext = args[0]
elif isinstance(args[0], str):
# The first arg is an extension name; it could still be valid
# to provide an extver kwarg
if ext is not None or extname is not None:
raise TypeError(err_msg)
extname = args[0]
else:
# Take whatever we have as the ext argument; we'll validate it
# below
ext = args[0]
elif len(args) == 2:
# Must be an extname and extver
if ext is not None or extname is not None or extver is not None:
raise TypeError(err_msg)
extname = args[0]
extver = args[1]
elif len(args) > 2:
raise TypeError('Too many positional arguments.')
if (ext is not None and
not (_is_int(ext) or
(isinstance(ext, tuple) and len(ext) == 2 and
isinstance(ext[0], str) and _is_int(ext[1])))):
raise ValueError(
'The ext keyword must be either an extension number '
'(zero-indexed) or a (extname, extver) tuple.')
if extname is not None and not isinstance(extname, str):
raise ValueError('The extname argument must be a string.')
if extver is not None and not _is_int(extver):
raise ValueError('The extver argument must be an integer.')
if ext is None and extname is None and extver is None:
ext = 0
elif ext is not None and (extname is not None or extver is not None):
raise TypeError(err_msg)
elif extname:
if extver:
ext = (extname, extver)
else:
ext = (extname, 1)
elif extver and extname is None:
raise TypeError('extver alone cannot specify an extension.')
hdulist = fitsopen(filename, mode=mode, **kwargs)
return hdulist, ext
def _makehdu(data, header):
if header is None:
header = Header()
hdu = _BaseHDU(data, header)
if hdu.__class__ in (_BaseHDU, _ValidHDU):
# The HDU type was unrecognized, possibly due to a
# nonexistent/incomplete header
if ((isinstance(data, np.ndarray) and data.dtype.fields is not None) or
isinstance(data, np.recarray)):
hdu = BinTableHDU(data, header=header)
elif isinstance(data, np.ndarray):
hdu = ImageHDU(data, header=header)
else:
raise KeyError('Data must be a numpy array.')
return hdu
def _stat_filename_or_fileobj(filename):
closed = fileobj_closed(filename)
name = fileobj_name(filename) or ''
try:
loc = filename.tell()
except AttributeError:
loc = 0
noexist_or_empty = ((name and
(not os.path.exists(name) or
(os.path.getsize(name) == 0)))
or (not name and loc == 0))
return name, closed, noexist_or_empty
def _get_file_mode(filename, default='readonly'):
"""
Allow file object to already be opened in any of the valid modes and
and leave the file in the same state (opened or closed) as when
the function was called.
"""
mode = default
closed = fileobj_closed(filename)
fmode = fileobj_mode(filename)
if fmode is not None:
mode = FILE_MODES.get(fmode)
if mode is None:
raise OSError(
"File mode of the input file object ({!r}) cannot be used to "
"read/write FITS files.".format(fmode))
return mode, closed
|
28052441aa130f41181955867b240fa09b45f862139bde4e08a3d3f7081da488 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import itertools
import io
import mmap
import operator
import os
import platform
import signal
import sys
import tempfile
import textwrap
import threading
import warnings
import weakref
from contextlib import contextmanager, suppress
from ...utils import data
from distutils.version import LooseVersion
import numpy as np
from ...utils import wraps
from ...utils.exceptions import AstropyUserWarning
cmp = lambda a, b: (a > b) - (a < b)
all_integer_types = (int, np.integer)
class NotifierMixin:
"""
Mixin class that provides services by which objects can register
listeners to changes on that object.
All methods provided by this class are underscored, since this is intended
for internal use to communicate between classes in a generic way, and is
not machinery that should be exposed to users of the classes involved.
Use the ``_add_listener`` method to register a listener on an instance of
the notifier. This registers the listener with a weak reference, so if
no other references to the listener exist it is automatically dropped from
the list and does not need to be manually removed.
Call the ``_notify`` method on the notifier to update all listeners
upon changes. ``_notify('change_type', *args, **kwargs)`` results
in calling ``listener._update_change_type(*args, **kwargs)`` on all
listeners subscribed to that notifier.
If a particular listener does not have the appropriate update method
it is ignored.
Examples
--------
>>> class Widget(NotifierMixin):
... state = 1
... def __init__(self, name):
... self.name = name
... def update_state(self):
... self.state += 1
... self._notify('widget_state_changed', self)
...
>>> class WidgetListener:
... def _update_widget_state_changed(self, widget):
... print('Widget {0} changed state to {1}'.format(
... widget.name, widget.state))
...
>>> widget = Widget('fred')
>>> listener = WidgetListener()
>>> widget._add_listener(listener)
>>> widget.update_state()
Widget fred changed state to 2
"""
_listeners = None
def _add_listener(self, listener):
"""
Add an object to the list of listeners to notify of changes to this
object. This adds a weakref to the list of listeners that is
removed from the listeners list when the listener has no other
references to it.
"""
if self._listeners is None:
self._listeners = weakref.WeakValueDictionary()
self._listeners[id(listener)] = listener
def _remove_listener(self, listener):
"""
Removes the specified listener from the listeners list. This relies
on object identity (i.e. the ``is`` operator).
"""
if self._listeners is None:
return
with suppress(KeyError):
del self._listeners[id(listener)]
def _notify(self, notification, *args, **kwargs):
"""
Notify all listeners of some particular state change by calling their
``_update_<notification>`` method with the given ``*args`` and
``**kwargs``.
The notification does not by default include the object that actually
changed (``self``), but it certainly may if required.
"""
if self._listeners is None:
return
method_name = '_update_{0}'.format(notification)
for listener in self._listeners.valuerefs():
# Use valuerefs instead of itervaluerefs; see
# https://github.com/astropy/astropy/issues/4015
listener = listener() # dereference weakref
if listener is None:
continue
if hasattr(listener, method_name):
method = getattr(listener, method_name)
if callable(method):
method(*args, **kwargs)
def __getstate__(self):
"""
Exclude listeners when saving the listener's state, since they may be
ephemeral.
"""
# TODO: This hasn't come up often, but if anyone needs to pickle HDU
# objects it will be necessary when HDU objects' states are restored to
# re-register themselves as listeners on their new column instances.
try:
state = super().__getstate__()
except AttributeError:
# Chances are the super object doesn't have a getstate
state = self.__dict__.copy()
state['_listeners'] = None
return state
def first(iterable):
"""
Returns the first item returned by iterating over an iterable object.
Example:
>>> a = [1, 2, 3]
>>> first(a)
1
"""
return next(iter(iterable))
def itersubclasses(cls, _seen=None):
"""
Generator over all subclasses of a given class, in depth first order.
>>> class A: pass
>>> class B(A): pass
>>> class C(A): pass
>>> class D(B,C): pass
>>> class E(D): pass
>>>
>>> for cls in itersubclasses(A):
... print(cls.__name__)
B
D
E
C
>>> # get ALL classes currently defined
>>> [cls.__name__ for cls in itersubclasses(object)]
[...'tuple', ...'type', ...]
From http://code.activestate.com/recipes/576949/
"""
if _seen is None:
_seen = set()
try:
subs = cls.__subclasses__()
except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls)
for sub in sorted(subs, key=operator.attrgetter('__name__')):
if sub not in _seen:
_seen.add(sub)
yield sub
for sub in itersubclasses(sub, _seen):
yield sub
def ignore_sigint(func):
"""
This decorator registers a custom SIGINT handler to catch and ignore SIGINT
until the wrapped function is completed.
"""
@wraps(func)
def wrapped(*args, **kwargs):
# Get the name of the current thread and determine if this is a single
# threaded application
curr_thread = threading.currentThread()
single_thread = (threading.activeCount() == 1 and
curr_thread.getName() == 'MainThread')
class SigintHandler:
def __init__(self):
self.sigint_received = False
def __call__(self, signum, frame):
warnings.warn('KeyboardInterrupt ignored until {} is '
'complete!'.format(func.__name__),
AstropyUserWarning)
self.sigint_received = True
sigint_handler = SigintHandler()
# Define new signal interput handler
if single_thread:
# Install new handler
old_handler = signal.signal(signal.SIGINT, sigint_handler)
try:
func(*args, **kwargs)
finally:
if single_thread:
if old_handler is not None:
signal.signal(signal.SIGINT, old_handler)
else:
signal.signal(signal.SIGINT, signal.SIG_DFL)
if sigint_handler.sigint_received:
raise KeyboardInterrupt
return wrapped
def pairwise(iterable):
"""Return the items of an iterable paired with its next item.
Ex: s -> (s0,s1), (s1,s2), (s2,s3), ....
"""
a, b = itertools.tee(iterable)
for _ in b:
# Just a little trick to advance b without having to catch
# StopIter if b happens to be empty
break
return zip(a, b)
def encode_ascii(s):
if isinstance(s, str):
return s.encode('ascii')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.str_)):
ns = np.char.encode(s, 'ascii').view(type(s))
if ns.dtype.itemsize != s.dtype.itemsize / 4:
ns = ns.astype((np.bytes_, s.dtype.itemsize / 4))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.bytes_)):
raise TypeError('string operation on non-string array')
return s
def decode_ascii(s):
if isinstance(s, bytes):
try:
return s.decode('ascii')
except UnicodeDecodeError:
warnings.warn('non-ASCII characters are present in the FITS '
'file header and have been replaced by "?" '
'characters', AstropyUserWarning)
s = s.decode('ascii', errors='replace')
return s.replace(u'\ufffd', '?')
elif (isinstance(s, np.ndarray) and
issubclass(s.dtype.type, np.bytes_)):
# np.char.encode/decode annoyingly don't preserve the type of the
# array, hence the view() call
# It also doesn't necessarily preserve widths of the strings,
# hence the astype()
if s.size == 0:
# Numpy apparently also has a bug that if a string array is
# empty calling np.char.decode on it returns an empty float64
# array wth
dt = s.dtype.str.replace('S', 'U')
ns = np.array([], dtype=dt).view(type(s))
else:
ns = np.char.decode(s, 'ascii').view(type(s))
if ns.dtype.itemsize / 4 != s.dtype.itemsize:
ns = ns.astype((np.str_, s.dtype.itemsize))
return ns
elif (isinstance(s, np.ndarray) and
not issubclass(s.dtype.type, np.str_)):
# Don't silently pass through on non-string arrays; we don't want
# to hide errors where things that are not stringy are attempting
# to be decoded
raise TypeError('string operation on non-string array')
return s
def isreadable(f):
"""
Returns True if the file-like object can be read from. This is a common-
sense approximation of io.IOBase.readable.
"""
if hasattr(f, 'readable'):
return f.readable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.readable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'read'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'r+'):
return False
# Not closed, has a 'read()' method, and either has no known mode or a
# readable mode--should be good enough to assume 'readable'
return True
def iswritable(f):
"""
Returns True if the file-like object can be written to. This is a common-
sense approximation of io.IOBase.writable.
"""
if hasattr(f, 'writable'):
return f.writable()
if hasattr(f, 'closed') and f.closed:
# This mimics the behavior of io.IOBase.writable
raise ValueError('I/O operation on closed file')
if not hasattr(f, 'write'):
return False
if hasattr(f, 'mode') and not any(c in f.mode for c in 'wa+'):
return False
# Note closed, has a 'write()' method, and either has no known mode or a
# mode that supports writing--should be good enough to assume 'writable'
return True
def isfile(f):
"""
Returns True if the given object represents an OS-level file (that is,
``isinstance(f, file)``).
On Python 3 this also returns True if the given object is higher level
wrapper on top of a FileIO object, such as a TextIOWrapper.
"""
if isinstance(f, io.FileIO):
return True
elif hasattr(f, 'buffer'):
return isfile(f.buffer)
elif hasattr(f, 'raw'):
return isfile(f.raw)
return False
def fileobj_open(filename, mode):
"""
A wrapper around the `open()` builtin.
This exists because `open()` returns an `io.BufferedReader` by default.
This is bad, because `io.BufferedReader` doesn't support random access,
which we need in some cases. We must call open with buffering=0 to get
a raw random-access file reader.
"""
return open(filename, mode, buffering=0)
def fileobj_name(f):
"""
Returns the 'name' of file-like object f, if it has anything that could be
called its name. Otherwise f's class or type is returned. If f is a
string f itself is returned.
"""
if isinstance(f, str):
return f
elif isinstance(f, gzip.GzipFile):
# The .name attribute on GzipFiles does not always represent the name
# of the file being read/written--it can also represent the original
# name of the file being compressed
# See the documentation at
# https://docs.python.org/3/library/gzip.html#gzip.GzipFile
# As such, for gzip files only return the name of the underlying
# fileobj, if it exists
return fileobj_name(f.fileobj)
elif hasattr(f, 'name'):
return f.name
elif hasattr(f, 'filename'):
return f.filename
elif hasattr(f, '__class__'):
return str(f.__class__)
else:
return str(type(f))
def fileobj_closed(f):
"""
Returns True if the given file-like object is closed or if f is a string
(and assumed to be a pathname).
Returns False for all other types of objects, under the assumption that
they are file-like objects with no sense of a 'closed' state.
"""
if isinstance(f, str):
return True
if hasattr(f, 'closed'):
return f.closed
elif hasattr(f, 'fileobj') and hasattr(f.fileobj, 'closed'):
return f.fileobj.closed
elif hasattr(f, 'fp') and hasattr(f.fp, 'closed'):
return f.fp.closed
else:
return False
def fileobj_mode(f):
"""
Returns the 'mode' string of a file-like object if such a thing exists.
Otherwise returns None.
"""
# Go from most to least specific--for example gzip objects have a 'mode'
# attribute, but it's not analogous to the file.mode attribute
# gzip.GzipFile -like
if hasattr(f, 'fileobj') and hasattr(f.fileobj, 'mode'):
fileobj = f.fileobj
# astropy.io.fits._File -like, doesn't need additional checks because it's
# already validated
elif hasattr(f, 'fileobj_mode'):
return f.fileobj_mode
# PIL-Image -like investigate the fp (filebuffer)
elif hasattr(f, 'fp') and hasattr(f.fp, 'mode'):
fileobj = f.fp
# FILEIO -like (normal open(...)), keep as is.
elif hasattr(f, 'mode'):
fileobj = f
# Doesn't look like a file-like object, for example strings, urls or paths.
else:
return None
return _fileobj_normalize_mode(fileobj)
def _fileobj_normalize_mode(f):
"""Takes care of some corner cases in Python where the mode string
is either oddly formatted or does not truly represent the file mode.
"""
mode = f.mode
# Special case: Gzip modes:
if isinstance(f, gzip.GzipFile):
# GzipFiles can be either readonly or writeonly
if mode == gzip.READ:
return 'rb'
elif mode == gzip.WRITE:
return 'wb'
else:
return None # This shouldn't happen?
# Sometimes Python can produce modes like 'r+b' which will be normalized
# here to 'rb+'
if '+' in mode:
mode = mode.replace('+', '')
mode += '+'
return mode
def fileobj_is_binary(f):
"""
Returns True if the give file or file-like object has a file open in binary
mode. When in doubt, returns True by default.
"""
# This is kind of a hack for this to work correctly with _File objects,
# which, for the time being, are *always* binary
if hasattr(f, 'binary'):
return f.binary
if isinstance(f, io.TextIOBase):
return False
mode = fileobj_mode(f)
if mode:
return 'b' in mode
else:
return True
def translate(s, table, deletechars):
if deletechars:
table = table.copy()
for c in deletechars:
table[ord(c)] = None
return s.translate(table)
def fill(text, width, **kwargs):
"""
Like :func:`textwrap.wrap` but preserves existing paragraphs which
:func:`textwrap.wrap` does not otherwise handle well. Also handles section
headers.
"""
paragraphs = text.split('\n\n')
def maybe_fill(t):
if all(len(l) < width for l in t.splitlines()):
return t
else:
return textwrap.fill(t, width, **kwargs)
return '\n\n'.join(maybe_fill(p) for p in paragraphs)
# On MacOS X 10.8 and earlier, there is a bug that causes numpy.fromfile to
# fail when reading over 2Gb of data. If we detect these versions of MacOS X,
# we can instead read the data in chunks. To avoid performance penalties at
# import time, we defer the setting of this global variable until the first
# time it is needed.
CHUNKED_FROMFILE = None
def _array_from_file(infile, dtype, count):
"""Create a numpy array from a file or a file-like object."""
if isfile(infile):
global CHUNKED_FROMFILE
if CHUNKED_FROMFILE is None:
if (sys.platform == 'darwin' and
LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
CHUNKED_FROMFILE = True
else:
CHUNKED_FROMFILE = False
if CHUNKED_FROMFILE:
chunk_size = int(1024 ** 3 / dtype.itemsize) # 1Gb to be safe
if count < chunk_size:
return np.fromfile(infile, dtype=dtype, count=count)
else:
array = np.empty(count, dtype=dtype)
for beg in range(0, count, chunk_size):
end = min(count, beg + chunk_size)
array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg)
return array
else:
return np.fromfile(infile, dtype=dtype, count=count)
else:
# treat as file-like object with "read" method; this includes gzip file
# objects, because numpy.fromfile just reads the compressed bytes from
# their underlying file object, instead of the decompressed bytes
read_size = np.dtype(dtype).itemsize * count
s = infile.read(read_size)
array = np.frombuffer(s, dtype=dtype, count=count)
# copy is needed because np.frombuffer returns a read-only view of the
# underlying buffer
array = array.copy()
return array
_OSX_WRITE_LIMIT = (2 ** 32) - 1
_WIN_WRITE_LIMIT = (2 ** 31) - 1
def _array_to_file(arr, outfile):
"""
Write a numpy array to a file or a file-like object.
Parameters
----------
arr : `~numpy.ndarray`
The Numpy array to write.
outfile : file-like
A file-like object such as a Python file object, an `io.BytesIO`, or
anything else with a ``write`` method. The file object must support
the buffer interface in its ``write``.
If writing directly to an on-disk file this delegates directly to
`ndarray.tofile`. Otherwise a slower Python implementation is used.
"""
if isfile(outfile):
write = lambda a, f: a.tofile(f)
else:
write = _array_to_file_like
# Implements a workaround for a bug deep in OSX's stdlib file writing
# functions; on 64-bit OSX it is not possible to correctly write a number
# of bytes greater than 2 ** 32 and divisible by 4096 (or possibly 8192--
# whatever the default blocksize for the filesystem is).
# This issue should have a workaround in Numpy too, but hasn't been
# implemented there yet: https://github.com/astropy/astropy/issues/839
#
# Apparently Windows has its own fwrite bug:
# https://github.com/numpy/numpy/issues/2256
if (sys.platform == 'darwin' and arr.nbytes >= _OSX_WRITE_LIMIT + 1 and
arr.nbytes % 4096 == 0):
# chunksize is a count of elements in the array, not bytes
chunksize = _OSX_WRITE_LIMIT // arr.itemsize
elif sys.platform.startswith('win'):
chunksize = _WIN_WRITE_LIMIT // arr.itemsize
else:
# Just pass the whole array to the write routine
return write(arr, outfile)
# Write one chunk at a time for systems whose fwrite chokes on large
# writes.
idx = 0
arr = arr.view(np.ndarray).flatten()
while idx < arr.nbytes:
write(arr[idx:idx + chunksize], outfile)
idx += chunksize
def _array_to_file_like(arr, fileobj):
"""
Write a `~numpy.ndarray` to a file-like object (which is not supported by
`numpy.ndarray.tofile`).
"""
# If the array is empty, we can simply take a shortcut and return since
# there is nothing to write.
if len(arr) == 0:
return
if arr.flags.contiguous:
# It suffices to just pass the underlying buffer directly to the
# fileobj's write (assuming it supports the buffer interface). If
# it does not have the buffer interface, a TypeError should be returned
# in which case we can fall back to the other methods.
try:
fileobj.write(arr.data)
except TypeError:
pass
else:
return
if hasattr(np, 'nditer'):
# nditer version for non-contiguous arrays
for item in np.nditer(arr):
fileobj.write(item.tostring())
else:
# Slower version for Numpy versions without nditer;
# The problem with flatiter is it doesn't preserve the original
# byteorder
byteorder = arr.dtype.byteorder
if ((sys.byteorder == 'little' and byteorder == '>')
or (sys.byteorder == 'big' and byteorder == '<')):
for item in arr.flat:
fileobj.write(item.byteswap().tostring())
else:
for item in arr.flat:
fileobj.write(item.tostring())
def _write_string(f, s):
"""
Write a string to a file, encoding to ASCII if the file is open in binary
mode, or decoding if the file is open in text mode.
"""
# Assume if the file object doesn't have a specific mode, that the mode is
# binary
binmode = fileobj_is_binary(f)
if binmode and isinstance(s, str):
s = encode_ascii(s)
elif not binmode and not isinstance(f, str):
s = decode_ascii(s)
f.write(s)
def _convert_array(array, dtype):
"""
Converts an array to a new dtype--if the itemsize of the new dtype is
the same as the old dtype and both types are not numeric, a view is
returned. Otherwise a new array must be created.
"""
if array.dtype == dtype:
return array
elif (array.dtype.itemsize == dtype.itemsize and not
(np.issubdtype(array.dtype, np.number) and
np.issubdtype(dtype, np.number))):
# Includes a special case when both dtypes are at least numeric to
# account for ticket #218: https://aeon.stsci.edu/ssb/trac/pyfits/ticket/218
return array.view(dtype)
else:
return array.astype(dtype)
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1)
def _is_pseudo_unsigned(dtype):
return dtype.kind == 'u' and dtype.itemsize >= 2
def _is_int(val):
return isinstance(val, all_integer_types)
def _str_to_num(val):
"""Converts a given string to either an int or a float if necessary."""
try:
num = int(val)
except ValueError:
# If this fails then an exception should be raised anyways
num = float(val)
return num
def _words_group(input, strlen):
"""
Split a long string into parts where each part is no longer
than ``strlen`` and no word is cut into two pieces. But if
there is one single word which is longer than ``strlen``, then
it will be split in the middle of the word.
"""
words = []
nblanks = input.count(' ')
nmax = max(nblanks, len(input) // strlen + 1)
arr = np.frombuffer((input + ' ').encode('utf8'), dtype=(bytes, 1))
# locations of the blanks
blank_loc = np.nonzero(arr == b' ')[0]
offset = 0
xoffset = 0
for idx in range(nmax):
try:
loc = np.nonzero(blank_loc >= strlen + offset)[0][0]
offset = blank_loc[loc - 1] + 1
if loc == 0:
offset = -1
except Exception:
offset = len(input)
# check for one word longer than strlen, break in the middle
if offset <= xoffset:
offset = xoffset + strlen
# collect the pieces in a list
words.append(input[xoffset:offset])
if len(input) == offset:
break
xoffset = offset
return words
def _tmp_name(input):
"""
Create a temporary file name which should not already exist. Use the
directory of the input file as the base name of the mkstemp() output.
"""
if input is not None:
input = os.path.dirname(input)
f, fn = tempfile.mkstemp(dir=input)
os.close(f)
return fn
def _get_array_mmap(array):
"""
If the array has an mmap.mmap at base of its base chain, return the mmap
object; otherwise return None.
"""
if isinstance(array, mmap.mmap):
return array
base = array
while hasattr(base, 'base') and base.base is not None:
if isinstance(base.base, mmap.mmap):
return base.base
base = base.base
@contextmanager
def _free_space_check(hdulist, dirname=None):
try:
yield
except OSError as exc:
error_message = ''
if not isinstance(hdulist, list):
hdulist = [hdulist, ]
if dirname is None:
dirname = os.path.dirname(hdulist._file.name)
if os.path.isdir(dirname):
free_space = data.get_free_space_in_dir(dirname)
hdulist_size = sum(hdu.size for hdu in hdulist)
if free_space < hdulist_size:
error_message = ("Not enough space on disk: requested {}, "
"available {}. ".format(hdulist_size, free_space))
for hdu in hdulist:
hdu._close()
raise OSError(error_message + str(exc))
def _extract_number(value, default):
"""
Attempts to extract an integer number from the given value. If the
extraction fails, the value of the 'default' argument is returned.
"""
try:
# The _str_to_num method converts the value to string/float
# so we need to perform one additional conversion to int on top
return int(_str_to_num(value))
except (TypeError, ValueError):
return default
def get_testdata_filepath(filename):
"""
Return a string representing the path to the file requested from the
io.fits test data set.
.. versionadded:: 2.0.3
Parameters
----------
filename : str
The filename of the test data file.
Returns
-------
filepath : str
The path to the requested file.
"""
return data.get_pkg_data_filename(
'io/fits/tests/data/{}'.format(filename), 'astropy')
def _rstrip_inplace(array):
"""
Performs an in-place rstrip operation on string arrays. This is necessary
since the built-in `np.char.rstrip` in Numpy does not perform an in-place
calculation.
"""
# The following implementation convert the string to unsigned integers of
# the right length. Trailing spaces (which are represented as 32) are then
# converted to null characters (represented as zeros). To avoid creating
# large temporary mask arrays, we loop over chunks (attempting to do that
# on a 1-D version of the array; large memory may still be needed in the
# unlikely case that a string array has small first dimension and cannot
# be represented as a contiguous 1-D array in memory).
dt = array.dtype
if dt.kind not in 'SU':
raise TypeError("This function can only be used on string arrays")
# View the array as appropriate integers. The last dimension will
# equal the number of characters in each string.
bpc = 1 if dt.kind == 'S' else 4
dt_int = "{0}{1}u{2}".format(dt.itemsize // bpc, dt.byteorder, bpc)
b = array.view(dt_int, np.ndarray)
# For optimal speed, work in chunks of the internal ufunc buffer size.
bufsize = np.getbufsize()
# Attempt to have the strings as a 1-D array to give the chunk known size.
# Note: the code will work if this fails; the chunks will just be larger.
if b.ndim > 2:
try:
b.shape = -1, b.shape[-1]
except AttributeError: # can occur for non-contiguous arrays
pass
for j in range(0, b.shape[0], bufsize):
c = b[j:j + bufsize]
# Mask which will tell whether we're in a sequence of trailing spaces.
mask = np.ones(c.shape[:-1], dtype=bool)
# Loop over the characters in the strings, in reverse order. We process
# the i-th character of all strings in the chunk at the same time. If
# the character is 32, this corresponds to a space, and we then change
# this to 0. We then construct a new mask to find rows where the
# i-th character is 0 (null) and the i-1-th is 32 (space) and repeat.
for i in range(-1, -c.shape[-1], -1):
mask &= c[..., i] == 32
c[..., i][mask] = 0
mask = c[..., i] == 0
return array
|
3b474889e6c4aa31fb0f6d17fa8403416b198533e4f150851bb4ed68bb7f643d | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
basic.py:
Basic table read / write functionality for simple character
delimited files with various options for column header definition.
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
class BasicHeader(core.BaseHeader):
"""
Basic table Header Reader
Set a few defaults for common ascii table formats
(start at line 0, comments begin with ``#`` and possibly white space)
"""
start_line = 0
comment = r'\s*#'
write_comment = '# '
class BasicData(core.BaseData):
"""
Basic table Data Reader
Set a few defaults for common ascii table formats
(start at line 1, comments begin with ``#`` and possibly white space)
"""
start_line = 1
comment = r'\s*#'
write_comment = '# '
class Basic(core.BaseReader):
r"""
Read a character-delimited table with a single header line at the top
followed by data lines to the end of the table. Lines beginning with # as
the first non-whitespace character are comments. This reader is highly
configurable.
::
rdr = ascii.get_reader(Reader=ascii.Basic)
rdr.header.splitter.delimiter = ' '
rdr.data.splitter.delimiter = ' '
rdr.header.start_line = 0
rdr.data.start_line = 1
rdr.data.end_line = None
rdr.header.comment = r'\s*#'
rdr.data.comment = r'\s*#'
Example table::
# Column definition is the first uncommented line
# Default delimiter is the space character.
apples oranges pears
# Data starts after the header column definition, blank lines ignored
1 2 3
4 5 6
"""
_format_name = 'basic'
_description = 'Basic table with custom delimiters'
header_class = BasicHeader
data_class = BasicData
class NoHeaderHeader(BasicHeader):
"""
Reader for table header without a header
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
class NoHeaderData(BasicData):
"""
Reader for table data without a header
Data starts at first uncommented line since there is no header line.
"""
start_line = 0
class NoHeader(Basic):
"""
Read a table with no header line. Columns are autonamed using
header.auto_format which defaults to "col%d". Otherwise this reader
the same as the :class:`Basic` class from which it is derived. Example::
# Table data
1 2 "hello there"
3 4 world
"""
_format_name = 'no_header'
_description = 'Basic table with no headers'
header_class = NoHeaderHeader
data_class = NoHeaderData
class CommentedHeaderHeader(BasicHeader):
"""
Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""
Return only lines that start with the comment regexp. For these
lines strip out the matching characters.
"""
re_comment = re.compile(self.comment)
for line in lines:
match = re_comment.match(line)
if match:
yield line[match.end():]
def write(self, lines):
lines.append(self.write_comment + self.splitter.join(self.colnames))
class CommentedHeader(Basic):
"""
Read a file where the column names are given in a line that begins with
the header comment character. ``header_start`` can be used to specify the
line index of column names, and it can be a negative index (for example -1
for the last commented line). The default delimiter is the <space>
character.::
# col1 col2 col3
# Comment line
1 2 3
4 5 6
"""
_format_name = 'commented_header'
_description = 'Column names in a commented line'
header_class = CommentedHeaderHeader
data_class = NoHeaderData
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# Strip off the comment line set as the header line for
# commented_header format (first by default).
if 'comments' in out.meta:
idx = self.header.start_line
if idx < 0:
idx = len(out.meta['comments']) + idx
out.meta['comments'] = out.meta['comments'][:idx] + out.meta['comments'][idx+1:]
if not out.meta['comments']:
del out.meta['comments']
return out
def write_header(self, lines, meta):
"""
Write comment lines after, rather than before, the header.
"""
self.header.write(lines)
self.header.write_comments(lines, meta)
class TabHeaderSplitter(core.DefaultSplitter):
"""Split lines on tab and do not remove whitespace"""
delimiter = '\t'
process_line = None
class TabDataSplitter(TabHeaderSplitter):
"""
Don't strip data value whitespace since that is significant in TSV tables
"""
process_val = None
skipinitialspace = False
class TabHeader(BasicHeader):
"""
Reader for header of tables with tab separated header
"""
splitter_class = TabHeaderSplitter
class TabData(BasicData):
"""
Reader for data of tables with tab separated data
"""
splitter_class = TabDataSplitter
class Tab(Basic):
"""
Read a tab-separated file. Unlike the :class:`Basic` reader, whitespace is
not stripped from the beginning and end of either lines or individual column
values.
Example::
col1 <tab> col2 <tab> col3
# Comment line
1 <tab> 2 <tab> 5
"""
_format_name = 'tab'
_description = 'Basic table with tab-separated values'
header_class = TabHeader
data_class = TabData
class CsvSplitter(core.DefaultSplitter):
"""
Split on comma for CSV (comma-separated-value) tables
"""
delimiter = ','
class CsvHeader(BasicHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
comment = None
write_comment = None
class CsvData(BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = CsvSplitter
fill_values = [(core.masked, '')]
comment = None
write_comment = None
class Csv(Basic):
"""
Read a CSV (comma-separated-values) file.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211,0.8,18.1
2,38.12321,-88.1321,2.2,17.0
Plain csv (comma separated value) files typically contain as many entries
as there are columns on each line. In contrast, common spreadsheet editors
stop writing if all remaining cells on a line are empty, which can lead to
lines where the rightmost entries are missing. This Reader can deal with
such files.
Masked values (indicated by an empty '' field value when reading) are
written out in the same way with an empty ('') field. This is different
from the typical default for `astropy.io.ascii` in which missing values are
indicated by ``--``.
Example::
num,ra,dec,radius,mag
1,32.23222,10.1211
2,38.12321,-88.1321,2.2,17.0
"""
_format_name = 'csv'
_io_registry_can_write = True
_description = 'Comma-separated-values'
header_class = CsvHeader
data_class = CsvData
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust row if it is too short.
If a data row is shorter than the header, add empty values to make it the
right length.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table.
"""
if len(str_vals) < ncols:
str_vals.extend((ncols - len(str_vals)) * [''])
return str_vals
class RdbHeader(TabHeader):
"""
Header for RDB tables
"""
col_type_map = {'n': core.NumType,
's': core.StrType}
def get_type_map_key(self, col):
return col.raw_type[-1]
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
This is a specialized get_cols for the RDB type:
Line 0: RDB col names
Line 1: RDB col definitions
Line 2+: RDB data rows
Parameters
----------
lines : list
List of table lines
Returns
-------
None
"""
header_lines = self.process_lines(lines) # this is a generator
header_vals_list = [hl for _, hl in zip(range(2), self.splitter(header_lines))]
if len(header_vals_list) != 2:
raise ValueError('RDB header requires 2 lines')
self.names, raw_types = header_vals_list
if len(self.names) != len(raw_types):
raise core.InconsistentTableError('RDB header mismatch between number of column names and column types.')
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in raw_types):
raise core.InconsistentTableError('RDB types definitions do not all match [num](N|S): {}'.format(raw_types))
self._set_cols_from_names()
for col, raw_type in zip(self.cols, raw_types):
col.raw_type = raw_type
col.type = self.get_col_type(col)
def write(self, lines):
lines.append(self.splitter.join(self.colnames))
rdb_types = []
for col in self.cols:
# Check if dtype.kind is string or unicode. See help(np.core.numerictypes)
rdb_type = 'S' if col.info.dtype.kind in ('S', 'U') else 'N'
rdb_types.append(rdb_type)
lines.append(self.splitter.join(rdb_types))
class RdbData(TabData):
"""
Data reader for RDB data. Starts reading at line 2.
"""
start_line = 2
class Rdb(Tab):
"""
Read a tab-separated file with an extra line after the column definition
line. The RDB format meets this definition. Example::
col1 <tab> col2 <tab> col3
N <tab> S <tab> N
1 <tab> 2 <tab> 5
In this reader the second line is just ignored.
"""
_format_name = 'rdb'
_io_registry_format_aliases = ['rdb']
_io_registry_suffix = '.rdb'
_description = 'Tab-separated with a type definition header line'
header_class = RdbHeader
data_class = RdbData
|
311e97bd4dc5b39842228e7b5795f54d6de63401255f80816324967a89dc5b0f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
""" An extensible ASCII table reader and writer.
core.py:
Core base classes and functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import copy
import csv
import functools
import itertools
import operator
import os
import re
import warnings
from collections import OrderedDict
from contextlib import suppress
from io import StringIO
import numpy
from ...utils.exceptions import AstropyWarning
from ...table import Table
from ...utils.data import get_readable_fileobj
from . import connect
# Global dictionary mapping format arg to the corresponding Reader class
FORMAT_CLASSES = {}
# Similar dictionary for fast readers
FAST_CLASSES = {}
class CsvWriter:
"""
Internal class to replace the csv writer ``writerow`` and ``writerows``
functions so that in the case of ``delimiter=' '`` and
``quoting=csv.QUOTE_MINIMAL``, the output field value is quoted for empty
fields (when value == '').
This changes the API slightly in that the writerow() and writerows()
methods return the output written string instead of the length of
that string.
Examples
--------
>>> from astropy.io.ascii.core import CsvWriter
>>> writer = CsvWriter(delimiter=' ')
>>> print(writer.writerow(['hello', '', 'world']))
hello "" world
"""
# Random 16-character string that gets injected instead of any
# empty fields and is then replaced post-write with doubled-quotechar.
# Created with:
# ''.join(random.choice(string.printable[:90]) for _ in range(16))
replace_sentinel = '2b=48Av%0-V3p>bX'
def __init__(self, csvfile=None, **kwargs):
self.csvfile = csvfile
# Temporary StringIO for catching the real csv.writer() object output
self.temp_out = StringIO()
self.writer = csv.writer(self.temp_out, **kwargs)
dialect = self.writer.dialect
self.quotechar2 = dialect.quotechar * 2
self.quote_empty = (dialect.quoting == csv.QUOTE_MINIMAL) and (dialect.delimiter == ' ')
def writerow(self, values):
"""
Similar to csv.writer.writerow but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerow, values, has_empty)
def writerows(self, values_list):
"""
Similar to csv.writer.writerows but with the custom quoting behavior.
Returns the written string instead of the length of that string.
"""
has_empty = False
# If QUOTE_MINIMAL and space-delimited then replace empty fields with
# the sentinel value.
if self.quote_empty:
for values in values_list:
for i, value in enumerate(values):
if value == '':
has_empty = True
values[i] = self.replace_sentinel
return self._writerow(self.writer.writerows, values_list, has_empty)
def _writerow(self, writerow_func, values, has_empty):
"""
Call ``writerow_func`` (either writerow or writerows) with ``values``.
If it has empty fields that have been replaced then change those
sentinel strings back to quoted empty strings, e.g. ``""``.
"""
# Clear the temporary StringIO buffer that self.writer writes into and
# then call the real csv.writer().writerow or writerows with values.
self.temp_out.seek(0)
self.temp_out.truncate()
writerow_func(values)
row_string = self.temp_out.getvalue()
if self.quote_empty and has_empty:
row_string = re.sub(self.replace_sentinel, self.quotechar2, row_string)
# self.csvfile is defined then write the output. In practice the pure
# Python writer calls with csvfile=None, while the fast writer calls with
# a file-like object.
if self.csvfile:
self.csvfile.write(row_string)
return row_string
class MaskedConstant(numpy.ma.core.MaskedConstant):
"""A trivial extension of numpy.ma.masked
We want to be able to put the generic term ``masked`` into a dictionary.
The constant ``numpy.ma.masked`` is not hashable (see
https://github.com/numpy/numpy/issues/4660), so we need to extend it
here with a hash value.
See https://github.com/numpy/numpy/issues/11021 for rationale for
__copy__ and __deepcopy__ methods.
"""
def __hash__(self):
'''All instances of this class shall have the same hash.'''
# Any large number will do.
return 1234567890
def __copy__(self):
"""This is a singleton so just return self."""
return self
def __deepcopy__(self, memo):
return self
masked = MaskedConstant()
class InconsistentTableError(ValueError):
"""
Indicates that an input table is inconsistent in some way.
The default behavior of ``BaseReader`` is to throw an instance of
this class if a data row doesn't match the header.
"""
class OptionalTableImportError(ImportError):
"""
Indicates that a dependency for table reading is not present.
An instance of this class is raised whenever an optional reader
with certain required dependencies cannot operate because of
an ImportError.
"""
class ParameterError(NotImplementedError):
"""
Indicates that a reader cannot handle a passed parameter.
The C-based fast readers in ``io.ascii`` raise an instance of
this error class upon encountering a parameter that the
C engine cannot handle.
"""
class FastOptionsError(NotImplementedError):
"""
Indicates that one of the specified options for fast
reading is invalid.
"""
class NoType:
"""
Superclass for ``StrType`` and ``NumType`` classes.
This class is the default type of ``Column`` and provides a base
class for other data types.
"""
class StrType(NoType):
"""
Indicates that a column consists of text data.
"""
class NumType(NoType):
"""
Indicates that a column consists of numerical data.
"""
class FloatType(NumType):
"""
Describes floating-point data.
"""
class BoolType(NoType):
"""
Describes boolean data.
"""
class IntType(NumType):
"""
Describes integer data.
"""
class AllType(StrType, FloatType, IntType):
"""
Subclass of all other data types.
This type is returned by ``convert_numpy`` if the given numpy
type does not match ``StrType``, ``FloatType``, or ``IntType``.
"""
class Column:
"""Table column.
The key attributes of a Column object are:
* **name** : column name
* **type** : column type (NoType, StrType, NumType, FloatType, IntType)
* **dtype** : numpy dtype (optional, overrides **type** if set)
* **str_vals** : list of column values as strings
* **data** : list of converted column values
"""
def __init__(self, name):
self.name = name
self.type = NoType # Generic type (Int, Float, Str etc)
self.dtype = None # Numpy dtype if available
self.str_vals = []
self.fill_values = {}
class BaseInputter:
"""
Get the lines from the table input and return a list of lines.
"""
encoding = None
"""Encoding used to read the file"""
def get_lines(self, table):
"""
Get the lines from the ``table`` input. The input table can be one of:
* File name
* String (newline separated) with all header and data lines (must have at least 2 lines)
* File-like object with read() method
* List of strings
Parameters
----------
table : str, file_like, list
Can be either a file name, string (newline separated) with all header and data
lines (must have at least 2 lines), a file-like object with a ``read()`` method,
or a list of strings.
Returns
-------
lines : list
List of lines
"""
try:
if (hasattr(table, 'read') or
('\n' not in table + '' and '\r' not in table + '')):
with get_readable_fileobj(table,
encoding=self.encoding) as fileobj:
table = fileobj.read()
lines = table.splitlines()
except TypeError:
try:
# See if table supports indexing, slicing, and iteration
table[0]
table[0:1]
iter(table)
lines = table
except TypeError:
raise TypeError(
'Input "table" must be a string (filename or data) or an iterable')
return self.process_lines(lines)
def process_lines(self, lines):
"""Process lines for subsequent use. In the default case do nothing.
This routine is not generally intended for removing comment lines or
stripping whitespace. These are done (if needed) in the header and
data line processing.
Override this method if something more has to be done to convert raw
input lines to the table rows. For example the
ContinuationLinesInputter derived class accounts for continuation
characters if a row is split into lines."""
return lines
class BaseSplitter:
"""
Base splitter that uses python's split method to do the work.
This does not handle quoted values. A key feature is the formulation of
__call__ as a generator that returns a list of the split line values at
each iteration.
There are two methods that are intended to be overridden, first
``process_line()`` to do pre-processing on each input line before splitting
and ``process_val()`` to do post-processing on each split string value. By
default these apply the string ``strip()`` function. These can be set to
another function via the instance attribute or be disabled entirely, for
example::
reader.header.splitter.process_val = lambda x: x.lstrip()
reader.data.splitter.process_val = None
"""
delimiter = None
""" one-character string used to separate fields """
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end."""
return line.strip()
def process_val(self, val):
"""Remove whitespace at the beginning or end of value."""
return val.strip()
def __call__(self, lines):
if self.process_line:
lines = (self.process_line(x) for x in lines)
for line in lines:
vals = line.split(self.delimiter)
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
if self.delimiter is None:
delimiter = ' '
else:
delimiter = self.delimiter
return delimiter.join(str(x) for x in vals)
class DefaultSplitter(BaseSplitter):
"""Default class to split strings into columns using python csv. The class
attributes are taken from the csv Dialect class.
Typical usage::
# lines = ..
splitter = ascii.DefaultSplitter()
for col_vals in splitter(lines):
for col_val in col_vals:
...
"""
delimiter = ' '
""" one-character string used to separate fields. """
quotechar = '"'
""" control how instances of *quotechar* in a field are quoted """
doublequote = True
""" character to remove special meaning from following character """
escapechar = None
""" one-character stringto quote fields containing special characters """
quoting = csv.QUOTE_MINIMAL
""" control when quotes are recognised by the reader """
skipinitialspace = True
""" ignore whitespace immediately following the delimiter """
csv_writer = None
csv_writer_out = StringIO()
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. This is especially useful for
whitespace-delimited files to prevent spurious columns at the beginning or end.
If splitting on whitespace then replace unquoted tabs with space first"""
if self.delimiter == r'\s':
line = _replace_tab_with_space(line, self.escapechar, self.quotechar)
return line.strip()
def __call__(self, lines):
"""Return an iterator over the table ``lines``, where each iterator output
is a list of the split line values.
Parameters
----------
lines : list
List of table lines
Returns
-------
lines : iterator
"""
if self.process_line:
lines = [self.process_line(x) for x in lines]
delimiter = ' ' if self.delimiter == r'\s' else self.delimiter
csv_reader = csv.reader(lines,
delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
skipinitialspace=self.skipinitialspace
)
for vals in csv_reader:
if self.process_val:
yield [self.process_val(x) for x in vals]
else:
yield vals
def join(self, vals):
delimiter = ' ' if self.delimiter is None else str(self.delimiter)
if self.csv_writer is None:
self.csv_writer = CsvWriter(delimiter=delimiter,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar,
quoting=self.quoting,
lineterminator='')
if self.process_val:
vals = [self.process_val(x) for x in vals]
out = self.csv_writer.writerow(vals)
return out
def _replace_tab_with_space(line, escapechar, quotechar):
"""Replace tabs with spaces in given string, preserving quoted substrings
Parameters
----------
line : str
String containing tabs to be replaced with spaces.
escapechar : str
Character in ``line`` used to escape special characters.
quotechar : str
Character in ``line`` indicating the start/end of a substring.
Returns
-------
line : str
A copy of ``line`` with tabs replaced by spaces, preserving quoted substrings.
"""
newline = []
in_quote = False
lastchar = 'NONE'
for char in line:
if char == quotechar and lastchar != escapechar:
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
def _get_line_index(line_or_func, lines):
"""Return the appropriate line index, depending on ``line_or_func`` which
can be either a function, a positive or negative int, or None.
"""
if hasattr(line_or_func, '__call__'):
return line_or_func(lines)
elif line_or_func:
if line_or_func >= 0:
return line_or_func
else:
n_lines = sum(1 for line in lines)
return n_lines + line_or_func
else:
return line_or_func
class BaseHeader:
"""
Base table header reader
"""
auto_format = 'col{}'
""" format string for auto-generating column names """
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
names = None
""" list of names corresponding to each data column """
write_comment = False
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
def __init__(self):
self.splitter = self.splitter_class()
def _set_cols_from_names(self):
self.cols = [Column(name=x) for x in self.names]
def update_meta(self, lines, meta):
"""
Extract any table-level metadata, e.g. keywords, comments, column metadata, from
the table ``lines`` and update the OrderedDict ``meta`` in place. This base
method extracts comment lines and stores them in ``meta`` for output.
"""
if self.comment:
re_comment = re.compile(self.comment)
comment_lines = [x for x in lines if re_comment.match(x)]
else:
comment_lines = []
comment_lines = [re.sub('^' + self.comment, '', x).strip()
for x in comment_lines]
if comment_lines:
meta.setdefault('table', {})['comments'] = comment_lines
def get_cols(self, lines):
"""Initialize the header Column objects from the table ``lines``.
Based on the previously set Header attributes find or create the column names.
Sets ``self.cols`` with the list of Columns.
Parameters
----------
lines : list
List of table lines
"""
start_line = _get_line_index(self.start_line, self.process_lines(lines))
if start_line is None:
# No header line so auto-generate names from n_data_cols
# Get the data values from the first line of table data to determine n_data_cols
try:
first_data_vals = next(self.data.get_str_vals())
except StopIteration:
raise InconsistentTableError('No data lines found so cannot autogenerate '
'column names')
n_data_cols = len(first_data_vals)
self.names = [self.auto_format.format(i)
for i in range(1, n_data_cols + 1)]
else:
for i, line in enumerate(self.process_lines(lines)):
if i == start_line:
break
else: # No header line matching
raise ValueError('No header line found in table')
self.names = next(self.splitter([line]))
self._set_cols_from_names()
def process_lines(self, lines):
"""Generator to yield non-blank and non-comment lines"""
if self.comment:
re_comment = re.compile(self.comment)
# Yield non-comment lines
for line in lines:
if line.strip() and (not self.comment or not re_comment.match(line)):
yield line
def write_comments(self, lines, meta):
if self.write_comment is not False:
for comment in meta.get('comments', []):
lines.append(self.write_comment + comment)
def write(self, lines):
if self.start_line is not None:
for i, spacer_line in zip(range(self.start_line),
itertools.cycle(self.write_spacer_lines)):
lines.append(spacer_line)
lines.append(self.splitter.join([x.info.name for x in self.cols]))
@property
def colnames(self):
"""Return the column names of the table"""
return tuple(col.name if isinstance(col, Column) else col.info.name
for col in self.cols)
def get_type_map_key(self, col):
return col.raw_type
def get_col_type(self, col):
try:
type_map_key = self.get_type_map_key(col)
return self.col_type_map[type_map_key.lower()]
except KeyError:
raise ValueError('Unknown data type ""{}"" for column "{}"'.format(
col.raw_type, col.name))
def check_column_names(self, names, strict_names, guessing):
"""
Check column names.
This must be done before applying the names transformation
so that guessing will fail appropriately if ``names`` is supplied.
For instance if the basic reader is given a table with no column header
row.
Parameters
----------
names : list
User-supplied list of column names
strict_names : bool
Whether to impose extra requirements on names
guessing : bool
True if this method is being called while guessing the table format
"""
if strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in self.colnames:
if (_is_number(name) or len(name) == 0
or name[0] in bads or name[-1] in bads):
raise InconsistentTableError('Column name {0!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if guessing and len(self.colnames) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(list(self.colnames)))
if names is not None and len(names) != len(self.colnames):
raise InconsistentTableError('Length of names argument ({0}) does not match number'
' of table columns ({1})'.format(len(names), len(self.colnames)))
class BaseData:
"""
Base table data reader.
"""
start_line = None
""" None, int, or a function of ``lines`` that returns None or int """
end_line = None
""" None, int, or a function of ``lines`` that returns None or int """
comment = None
""" Regular expression for comment lines """
splitter_class = DefaultSplitter
""" Splitter class for splitting data lines into columns """
write_spacer_lines = ['ASCII_TABLE_WRITE_SPACER_LINE']
fill_include_names = None
fill_exclude_names = None
fill_values = [(masked, '')]
formats = {}
def __init__(self):
# Need to make sure fill_values list is instance attribute, not class attribute.
# On read, this will be overwritten by the default in the ui.read (thus, in
# the current implementation there can be no different default for different
# Readers). On write, ui.py does not specify a default, so this line here matters.
self.fill_values = copy.copy(self.fill_values)
self.formats = copy.copy(self.formats)
self.splitter = self.splitter_class()
def process_lines(self, lines):
"""
Strip out comment lines and blank lines from list of ``lines``
Parameters
----------
lines : list
All lines in table
Returns
-------
lines : list
List of lines
"""
nonblank_lines = (x for x in lines if x.strip())
if self.comment:
re_comment = re.compile(self.comment)
return [x for x in nonblank_lines if not re_comment.match(x)]
else:
return [x for x in nonblank_lines]
def get_data_lines(self, lines):
"""Set the ``data_lines`` attribute to the lines slice comprising the
table data values."""
data_lines = self.process_lines(lines)
start_line = _get_line_index(self.start_line, data_lines)
end_line = _get_line_index(self.end_line, data_lines)
if start_line is not None or end_line is not None:
self.data_lines = data_lines[slice(start_line, end_line)]
else: # Don't copy entire data lines unless necessary
self.data_lines = data_lines
def get_str_vals(self):
"""Return a generator that returns a list of column values (as strings)
for each data line."""
return self.splitter(self.data_lines)
def masks(self, cols):
"""Set fill value for each column and then apply that fill value
In the first step it is evaluated with value from ``fill_values`` applies to
which column using ``fill_include_names`` and ``fill_exclude_names``.
In the second step all replacements are done for the appropriate columns.
"""
if self.fill_values:
self._set_fill_values(cols)
self._set_masks(cols)
def _set_fill_values(self, cols):
"""Set the fill values of the individual cols based on fill_values of BaseData
fill values has the following form:
<fill_spec> = (<bad_value>, <fill_value>, <optional col_name>...)
fill_values = <fill_spec> or list of <fill_spec>'s
"""
if self.fill_values:
# when we write tables the columns may be astropy.table.Columns
# which don't carry a fill_values by default
for col in cols:
if not hasattr(col, 'fill_values'):
col.fill_values = {}
# if input is only one <fill_spec>, then make it a list
with suppress(TypeError):
self.fill_values[0] + ''
self.fill_values = [self.fill_values]
# Step 1: Set the default list of columns which are affected by
# fill_values
colnames = set(self.header.colnames)
if self.fill_include_names is not None:
colnames.intersection_update(self.fill_include_names)
if self.fill_exclude_names is not None:
colnames.difference_update(self.fill_exclude_names)
# Step 2a: Find out which columns are affected by this tuple
# iterate over reversed order, so last condition is set first and
# overwritten by earlier conditions
for replacement in reversed(self.fill_values):
if len(replacement) < 2:
raise ValueError("Format of fill_values must be "
"(<bad>, <fill>, <optional col1>, ...)")
elif len(replacement) == 2:
affect_cols = colnames
else:
affect_cols = replacement[2:]
for i, key in ((i, x) for i, x in enumerate(self.header.colnames)
if x in affect_cols):
cols[i].fill_values[replacement[0]] = str(replacement[1])
def _set_masks(self, cols):
"""Replace string values in col.str_vals and set masks"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
col.mask = numpy.zeros(len(col.str_vals), dtype=numpy.bool)
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
col.mask[i] = True
def _replace_vals(self, cols):
"""Replace string values in col.str_vals"""
if self.fill_values:
for col in (col for col in cols if col.fill_values):
for i, str_val in ((i, x) for i, x in enumerate(col.str_vals)
if x in col.fill_values):
col.str_vals[i] = col.fill_values[str_val]
if masked in col.fill_values and hasattr(col, 'mask'):
mask_val = col.fill_values[masked]
for i in col.mask.nonzero()[0]:
col.str_vals[i] = mask_val
def str_vals(self):
'''convert all values in table to a list of lists of strings'''
self._set_fill_values(self.cols)
self._set_col_formats()
for col in self.cols:
col.str_vals = list(col.info.iter_str_vals())
self._replace_vals(self.cols)
return [col.str_vals for col in self.cols]
def write(self, lines):
if hasattr(self.start_line, '__call__'):
raise TypeError('Start_line attribute cannot be callable for write()')
else:
data_start_line = self.start_line or 0
while len(lines) < data_start_line:
lines.append(itertools.cycle(self.write_spacer_lines))
col_str_iters = self.str_vals()
for vals in zip(*col_str_iters):
lines.append(self.splitter.join(vals))
def _set_col_formats(self):
"""
"""
for col in self.cols:
if col.info.name in self.formats:
col.info.format = self.formats[col.name]
def convert_numpy(numpy_type):
"""Return a tuple containing a function which converts a list into a numpy
array and the type produced by the converter function.
Parameters
----------
numpy_type : numpy data-type
The numpy type required of an array returned by ``converter``. Must be a
valid `numpy type <https://docs.scipy.org/doc/numpy/user/basics.types.html>`_,
e.g. numpy.int, numpy.uint, numpy.int8, numpy.int64, numpy.float,
numpy.float64, numpy.str.
Returns
-------
(converter, converter_type) : (function, generic data-type)
``converter`` is a function which accepts a list and converts it to a
numpy array of type ``numpy_type``.
``converter_type`` tracks the generic data type produced by the converter
function.
Raises
------
ValueError
Raised by ``converter`` if the list elements could not be converted to
the required type.
"""
# Infer converter type from an instance of numpy_type.
type_name = numpy.array([], dtype=numpy_type).dtype.name
if 'int' in type_name:
converter_type = IntType
elif 'float' in type_name:
converter_type = FloatType
elif 'bool' in type_name:
converter_type = BoolType
elif 'str' in type_name:
converter_type = StrType
else:
converter_type = AllType
def bool_converter(vals):
"""
Convert values "False" and "True" to bools. Raise an exception
for any other string values.
"""
if len(vals) == 0:
return numpy.array([], dtype=bool)
# Try a smaller subset first for a long array
if len(vals) > 10000:
svals = numpy.asarray(vals[:1000])
if not numpy.all((svals == 'False') | (svals == 'True')):
raise ValueError('bool input strings must be only False or True')
vals = numpy.asarray(vals)
trues = vals == 'True'
falses = vals == 'False'
if not numpy.all(trues | falses):
raise ValueError('bool input strings must be only False or True')
return trues
def generic_converter(vals):
return numpy.array(vals, numpy_type)
converter = bool_converter if converter_type is BoolType else generic_converter
return converter, converter_type
class BaseOutputter:
"""Output table as a dict of column objects keyed on column name. The
table data are stored as plain python lists within the column objects.
"""
converters = {}
# Derived classes must define default_converters and __call__
@staticmethod
def _validate_and_copy(col, converters):
"""Validate the format for the type converters and then copy those
which are valid converters for this column (i.e. converter type is
a subclass of col.type)"""
converters_out = []
try:
for converter in converters:
converter_func, converter_type = converter
if not issubclass(converter_type, NoType):
raise ValueError()
if issubclass(converter_type, col.type):
converters_out.append((converter_func, converter_type))
except (ValueError, TypeError):
raise ValueError('Error: invalid format for converters, see '
'documentation\n{}'.format(converters))
return converters_out
def _convert_vals(self, cols):
for col in cols:
# If a specific dtype was specified for a column, then use that
# to set the defaults, otherwise use the generic defaults.
default_converters = ([convert_numpy(col.dtype)] if col.dtype
else self.default_converters)
# If the user supplied a specific convert then that takes precedence over defaults
converters = self.converters.get(col.name, default_converters)
col.converters = self._validate_and_copy(col, converters)
# Catch the last error in order to provide additional information
# in case all attempts at column conversion fail. The initial
# value of of last_error will apply if no converters are defined
# and the first col.converters[0] access raises IndexError.
last_err = 'no converters defined'
while not hasattr(col, 'data'):
try:
converter_func, converter_type = col.converters[0]
if not issubclass(converter_type, col.type):
raise TypeError('converter type does not match column type')
col.data = converter_func(col.str_vals)
col.type = converter_type
except (TypeError, ValueError) as err:
col.converters.pop(0)
last_err = err
except OverflowError as err:
# Overflow during conversion (most likely an int that doesn't fit in native C long).
# Put string at the top of the converters list for the next while iteration.
warnings.warn("OverflowError converting to {0} for column {1}, using string instead."
.format(converter_type.__name__, col.name), AstropyWarning)
col.converters.insert(0, convert_numpy(numpy.str))
last_err = err
except IndexError:
raise ValueError('Column {} failed to convert: {}'.format(col.name, last_err))
class TableOutputter(BaseOutputter):
"""
Output the table as an astropy.table.Table object.
"""
default_converters = [convert_numpy(numpy.int),
convert_numpy(numpy.float),
convert_numpy(numpy.str)]
def __call__(self, cols, meta):
# Sets col.data to numpy array and col.type to io.ascii Type class (e.g.
# FloatType) for each col.
self._convert_vals(cols)
# If there are any values that were filled and tagged with a mask bit then this
# will be a masked table. Otherwise use a plain table.
masked = any(hasattr(col, 'mask') and numpy.any(col.mask) for col in cols)
out = Table([x.data for x in cols], names=[x.name for x in cols], masked=masked,
meta=meta['table'])
for col, out_col in zip(cols, out.columns.values()):
if masked and hasattr(col, 'mask'):
out_col.data.mask = col.mask
for attr in ('format', 'unit', 'description'):
if hasattr(col, attr):
setattr(out_col, attr, getattr(col, attr))
if hasattr(col, 'meta'):
out_col.meta.update(col.meta)
return out
class MetaBaseReader(type):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
format = dct.get('_format_name')
if format is None:
return
fast = dct.get('_fast')
if fast is not None:
FAST_CLASSES[format] = cls
FORMAT_CLASSES[format] = cls
io_formats = ['ascii.' + format] + dct.get('_io_registry_format_aliases', [])
if dct.get('_io_registry_suffix'):
func = functools.partial(connect.io_identify, dct['_io_registry_suffix'])
connect.io_registry.register_identifier(io_formats[0], Table, func)
for io_format in io_formats:
func = functools.partial(connect.io_read, io_format)
connect.io_registry.register_reader(io_format, Table, func)
if dct.get('_io_registry_can_write', True):
func = functools.partial(connect.io_write, io_format)
connect.io_registry.register_writer(io_format, Table, func)
def _is_number(x):
with suppress(ValueError):
x = float(x)
return True
return False
def _apply_include_exclude_names(table, names, include_names, exclude_names):
"""
Apply names, include_names and exclude_names to a table.
Parameters
----------
table : `~astropy.table.Table`
Input table
names : list
List of names to override those in table (set to None to use existing names)
include_names : list
List of names to include in output
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
"""
if names is not None:
# Rename table column names to those passed by user
# Temporarily rename with names that are not in `names` or `table.colnames`.
# This ensures that rename succeeds regardless of existing names.
xxxs = 'x' * max(len(name) for name in list(names) + list(table.colnames))
for ii, colname in enumerate(table.colnames):
table.rename_column(colname, xxxs + str(ii))
for ii, name in enumerate(names):
table.rename_column(xxxs + str(ii), name)
names = set(table.colnames)
if include_names is not None:
names.intersection_update(include_names)
if exclude_names is not None:
names.difference_update(exclude_names)
if names != set(table.colnames):
remove_names = set(table.colnames) - set(names)
table.remove_columns(remove_names)
class BaseReader(metaclass=MetaBaseReader):
"""Class providing methods to read and write an ASCII table using the specified
header, data, inputter, and outputter instances.
Typical usage is to instantiate a Reader() object and customize the
``header``, ``data``, ``inputter``, and ``outputter`` attributes. Each
of these is an object of the corresponding class.
There is one method ``inconsistent_handler`` that can be used to customize the
behavior of ``read()`` in the event that a data row doesn't match the header.
The default behavior is to raise an InconsistentTableError.
"""
names = None
include_names = None
exclude_names = None
strict_names = False
guessing = False
encoding = None
header_class = BaseHeader
data_class = BaseData
inputter_class = BaseInputter
outputter_class = TableOutputter
def __init__(self):
self.header = self.header_class()
self.data = self.data_class()
self.inputter = self.inputter_class()
self.outputter = self.outputter_class()
# Data and Header instances benefit from a little cross-coupling. Header may need to
# know about number of data columns for auto-column name generation and Data may
# need to know about header (e.g. for fixed-width tables where widths are spec'd in header.
self.data.header = self.header
self.header.data = self.data
# Metadata, consisting of table-level meta and column-level meta. The latter
# could include information about column type, description, formatting, etc,
# depending on the table meta format.
self.meta = OrderedDict(table=OrderedDict(),
cols=OrderedDict())
def read(self, table):
"""Read the ``table`` and return the results in a format determined by
the ``outputter`` attribute.
The ``table`` parameter is any string or object that can be processed
by the instance ``inputter``. For the base Inputter class ``table`` can be
one of:
* File name
* File-like object
* String (newline separated) with all header and data lines (must have at least 2 lines)
* List of strings
Parameters
----------
table : str, file_like, list
Input table.
Returns
-------
table : `~astropy.table.Table`
Output table
"""
# If ``table`` is a file then store the name in the ``data``
# attribute. The ``table`` is a "file" if it is a string
# without the new line specific to the OS.
with suppress(TypeError):
# Strings only
if os.linesep not in table + '':
self.data.table_name = os.path.basename(table)
# Get a list of the lines (rows) in the table
self.lines = self.inputter.get_lines(table)
# Set self.data.data_lines to a slice of lines contain the data rows
self.data.get_data_lines(self.lines)
# Extract table meta values (e.g. keywords, comments, etc). Updates self.meta.
self.header.update_meta(self.lines, self.meta)
# Get the table column definitions
self.header.get_cols(self.lines)
# Make sure columns are valid
self.header.check_column_names(self.names, self.strict_names, self.guessing)
self.cols = cols = self.header.cols
self.data.splitter.cols = cols
n_cols = len(cols)
for i, str_vals in enumerate(self.data.get_str_vals()):
if len(str_vals) != n_cols:
str_vals = self.inconsistent_handler(str_vals, n_cols)
# if str_vals is None, we skip this row
if str_vals is None:
continue
# otherwise, we raise an error only if it is still inconsistent
if len(str_vals) != n_cols:
errmsg = ('Number of header columns ({}) inconsistent with'
' data columns ({}) at data line {}\n'
'Header values: {}\n'
'Data values: {}'.format(
n_cols, len(str_vals), i,
[x.name for x in cols], str_vals))
raise InconsistentTableError(errmsg)
for j, col in enumerate(cols):
col.str_vals.append(str_vals[j])
self.data.masks(cols)
if hasattr(self.header, 'table_meta'):
self.meta['table'].update(self.header.table_meta)
table = self.outputter(cols, self.meta)
self.cols = self.header.cols
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
return table
def inconsistent_handler(self, str_vals, ncols):
"""
Adjust or skip data entries if a row is inconsistent with the header.
The default implementation does no adjustment, and hence will always trigger
an exception in read() any time the number of data entries does not match
the header.
Note that this will *not* be called if the row already matches the header.
Parameters
----------
str_vals : list
A list of value strings from the current row of the table.
ncols : int
The expected number of entries from the table header.
Returns
-------
str_vals : list
List of strings to be parsed into data entries in the output table. If
the length of this list does not match ``ncols``, an exception will be
raised in read(). Can also be None, in which case the row will be
skipped.
"""
# an empty list will always trigger an InconsistentTableError in read()
return str_vals
@property
def comment_lines(self):
"""Return lines in the table that match header.comment regexp"""
if not hasattr(self, 'lines'):
raise ValueError('Table must be read prior to accessing the header comment lines')
if self.header.comment:
re_comment = re.compile(self.header.comment)
comment_lines = [x for x in self.lines if re_comment.match(x)]
else:
comment_lines = []
return comment_lines
def update_table_data(self, table):
"""
Update table columns in place if needed.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
return table
def write_header(self, lines, meta):
self.header.write_comments(lines, meta)
self.header.write(lines)
def write(self, table):
"""
Write ``table`` as list of strings.
Parameters
----------
table : `~astropy.table.Table`
Input table data.
Returns
-------
lines : list
List of strings corresponding to ASCII table
"""
# Check column names before altering
self.header.cols = list(table.columns.values())
self.header.check_column_names(self.names, self.strict_names, False)
# In-place update of columns in input ``table`` to reflect column
# filtering. Note that ``table`` is guaranteed to be a copy of the
# original user-supplied table.
_apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)
# This is a hook to allow updating the table columns after name
# filtering but before setting up to write the data. This is currently
# only used by ECSV and is otherwise just a pass-through.
table = self.update_table_data(table)
# Now use altered columns
new_cols = list(table.columns.values())
# link information about the columns to the writer object (i.e. self)
self.header.cols = new_cols
self.data.cols = new_cols
self.header.table_meta = table.meta
# Write header and data to lines list
lines = []
self.write_header(lines, table.meta)
self.data.write(lines)
return lines
class ContinuationLinesInputter(BaseInputter):
"""Inputter where lines ending in ``continuation_char`` are joined
with the subsequent line. Example::
col1 col2 col3
1 \
2 3
4 5 \
6
"""
continuation_char = '\\'
replace_char = ' '
# If no_continue is not None then lines matching this regex are not subject
# to line continuation. The initial use case here is Daophot. In this
# case the continuation character is just replaced with replace_char.
no_continue = None
def process_lines(self, lines):
re_no_continue = re.compile(self.no_continue) if self.no_continue else None
parts = []
outlines = []
for line in lines:
if re_no_continue and re_no_continue.match(line):
line = line.replace(self.continuation_char, self.replace_char)
if line.endswith(self.continuation_char):
parts.append(line.replace(self.continuation_char, self.replace_char))
else:
parts.append(line)
outlines.append(''.join(parts))
parts = []
return outlines
class WhitespaceSplitter(DefaultSplitter):
def process_line(self, line):
"""Replace tab with space within ``line`` while respecting quoted substrings"""
newline = []
in_quote = False
lastchar = None
for char in line:
if char == self.quotechar and (self.escapechar is None or
lastchar != self.escapechar):
in_quote = not in_quote
if char == '\t' and not in_quote:
char = ' '
lastchar = char
newline.append(char)
return ''.join(newline)
extra_reader_pars = ('Reader', 'Inputter', 'Outputter',
'delimiter', 'comment', 'quotechar', 'header_start',
'data_start', 'data_end', 'converters', 'encoding',
'data_Splitter', 'header_Splitter',
'names', 'include_names', 'exclude_names', 'strict_names',
'fill_values', 'fill_include_names', 'fill_exclude_names')
def _get_reader(Reader, Inputter=None, Outputter=None, **kwargs):
"""Initialize a table reader allowing for common customizations. See ui.get_reader()
for param docs. This routine is for internal (package) use only and is useful
because it depends only on the "core" module.
"""
from .fastbasic import FastBasic
if issubclass(Reader, FastBasic): # Fast readers handle args separately
if Inputter is not None:
kwargs['Inputter'] = Inputter
return Reader(**kwargs)
# If user explicitly passed a fast reader with enable='force'
# (e.g. by passing non-default options), raise an error for slow readers
if 'fast_reader' in kwargs:
if kwargs['fast_reader']['enable'] == 'force':
raise ParameterError('fast_reader required with ' +
'{0}, but this is not a fast C reader: {1}'
.format(kwargs['fast_reader'], Reader))
else:
del kwargs['fast_reader'] # Otherwise ignore fast_reader parameter
reader_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_reader_pars)
reader = Reader(**reader_kwargs)
if Inputter is not None:
reader.inputter = Inputter()
if Outputter is not None:
reader.outputter = Outputter()
# Issue #855 suggested to set data_start to header_start + default_header_length
# Thus, we need to retrieve this from the class definition before resetting these numbers.
try:
default_header_length = reader.data.start_line - reader.header.start_line
except TypeError: # Start line could be None or an instancemethod
default_header_length = None
if 'delimiter' in kwargs:
reader.header.splitter.delimiter = kwargs['delimiter']
reader.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
reader.header.comment = kwargs['comment']
reader.data.comment = kwargs['comment']
if 'quotechar' in kwargs:
reader.header.splitter.quotechar = kwargs['quotechar']
reader.data.splitter.quotechar = kwargs['quotechar']
if 'data_start' in kwargs:
reader.data.start_line = kwargs['data_start']
if 'data_end' in kwargs:
reader.data.end_line = kwargs['data_end']
if 'header_start' in kwargs:
if (reader.header.start_line is not None):
reader.header.start_line = kwargs['header_start']
# For FixedWidthTwoLine the data_start is calculated relative to the position line.
# However, position_line is given as absolute number and not relative to header_start.
# So, ignore this Reader here.
if (('data_start' not in kwargs) and (default_header_length is not None)
and reader._format_name not in ['fixed_width_two_line', 'commented_header']):
reader.data.start_line = reader.header.start_line + default_header_length
elif kwargs['header_start'] is not None:
# User trying to set a None header start to some value other than None
raise ValueError('header_start cannot be modified for this Reader')
if 'converters' in kwargs:
reader.outputter.converters = kwargs['converters']
if 'data_Splitter' in kwargs:
reader.data.splitter = kwargs['data_Splitter']()
if 'header_Splitter' in kwargs:
reader.header.splitter = kwargs['header_Splitter']()
if 'names' in kwargs:
reader.names = kwargs['names']
if 'include_names' in kwargs:
reader.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
reader.exclude_names = kwargs['exclude_names']
# Strict names is normally set only within the guessing process to
# indicate that column names cannot be numeric or have certain
# characters at the beginning or end. It gets used in
# BaseHeader.check_column_names().
if 'strict_names' in kwargs:
reader.strict_names = kwargs['strict_names']
if 'fill_values' in kwargs:
reader.data.fill_values = kwargs['fill_values']
if 'fill_include_names' in kwargs:
reader.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
reader.data.fill_exclude_names = kwargs['fill_exclude_names']
if 'encoding' in kwargs:
reader.encoding = kwargs['encoding']
reader.inputter.encoding = kwargs['encoding']
return reader
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'strip_whitespace',
'names', 'include_names', 'exclude_names',
'fill_values', 'fill_include_names',
'fill_exclude_names')
def _get_writer(Writer, fast_writer, **kwargs):
"""Initialize a table writer allowing for common customizations. This
routine is for internal (package) use only and is useful because it depends
only on the "core" module. """
from .fastbasic import FastBasic
# A value of None for fill_values imply getting the default string
# representation of masked values (depending on the writer class), but the
# machinery expects a list. The easiest here is to just pop the value off,
# i.e. fill_values=None is the same as not providing it at all.
if 'fill_values' in kwargs and kwargs['fill_values'] is None:
del kwargs['fill_values']
if issubclass(Writer, FastBasic): # Fast writers handle args separately
return Writer(**kwargs)
elif fast_writer and 'fast_{0}'.format(Writer._format_name) in FAST_CLASSES:
# Switch to fast writer
kwargs['fast_writer'] = fast_writer
return FAST_CLASSES['fast_{0}'.format(Writer._format_name)](**kwargs)
writer_kwargs = dict([k, v] for k, v in kwargs.items() if k not in extra_writer_pars)
writer = Writer(**writer_kwargs)
if 'delimiter' in kwargs:
writer.header.splitter.delimiter = kwargs['delimiter']
writer.data.splitter.delimiter = kwargs['delimiter']
if 'comment' in kwargs:
writer.header.write_comment = kwargs['comment']
writer.data.write_comment = kwargs['comment']
if 'quotechar' in kwargs:
writer.header.splitter.quotechar = kwargs['quotechar']
writer.data.splitter.quotechar = kwargs['quotechar']
if 'formats' in kwargs:
writer.data.formats = kwargs['formats']
if 'strip_whitespace' in kwargs:
if kwargs['strip_whitespace']:
# Restore the default SplitterClass process_val method which strips
# whitespace. This may have been changed in the Writer
# initialization (e.g. Rdb and Tab)
writer.data.splitter.process_val = operator.methodcaller('strip')
else:
writer.data.splitter.process_val = None
if 'names' in kwargs:
writer.header.names = kwargs['names']
if 'include_names' in kwargs:
writer.include_names = kwargs['include_names']
if 'exclude_names' in kwargs:
writer.exclude_names = kwargs['exclude_names']
if 'fill_values' in kwargs:
# Prepend user-specified values to the class default.
with suppress(TypeError, IndexError):
# Test if it looks like (match, replace_string, optional_colname),
# in which case make it a list
kwargs['fill_values'][1] + ''
kwargs['fill_values'] = [kwargs['fill_values']]
writer.data.fill_values = kwargs['fill_values'] + writer.data.fill_values
if 'fill_include_names' in kwargs:
writer.data.fill_include_names = kwargs['fill_include_names']
if 'fill_exclude_names' in kwargs:
writer.data.fill_exclude_names = kwargs['fill_exclude_names']
return writer
|
6ab59c668c3b450b128464894e6b7a59d6f2fa557d826af541bedadacc79abef | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
latex.py:
Classes to read and write LaTeX tables
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
from . import core
latexdicts = {'AA': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline',
'data_end': r'\hline'},
'doublelines': {'tabletype': 'table',
'header_start': r'\hline \hline', 'header_end': r'\hline\hline',
'data_end': r'\hline\hline'},
'template': {'tabletype': 'tabletype', 'caption': 'caption',
'tablealign': 'tablealign',
'col_align': 'col_align', 'preamble': 'preamble',
'header_start': 'header_start',
'header_end': 'header_end', 'data_start': 'data_start',
'data_end': 'data_end', 'tablefoot': 'tablefoot',
'units': {'col1': 'unit of col1', 'col2': 'unit of col2'}}
}
RE_COMMENT = re.compile(r'(?<!\\)%') # % character but not \%
def add_dictval_to_list(adict, key, alist):
'''
Add a value from a dictionary to a list
Parameters
----------
adict : dictionary
key : hashable
alist : list
List where value should be added
'''
if key in adict:
if isinstance(adict[key], str):
alist.append(adict[key])
else:
alist.extend(adict[key])
def find_latex_line(lines, latex):
'''
Find the first line which matches a patters
Parameters
----------
lines : list
List of strings
latex : str
Search pattern
Returns
-------
line_num : int, None
Line number. Returns None, if no match was found
'''
re_string = re.compile(latex.replace('\\', '\\\\'))
for i, line in enumerate(lines):
if re_string.match(line):
return i
else:
return None
class LatexInputter(core.BaseInputter):
def process_lines(self, lines):
return [lin.strip() for lin in lines]
class LatexSplitter(core.BaseSplitter):
'''Split LaTeX table date. Default delimiter is `&`.
'''
delimiter = '&'
def __call__(self, lines):
last_line = RE_COMMENT.split(lines[-1])[0].strip()
if not last_line.endswith(r'\\'):
lines[-1] = last_line + r'\\'
return super().__call__(lines)
def process_line(self, line):
"""Remove whitespace at the beginning or end of line. Also remove
\\ at end of line"""
line = RE_COMMENT.split(line)[0]
line = line.strip()
if line.endswith(r'\\'):
line = line.rstrip(r'\\')
else:
raise core.InconsistentTableError(r'Lines in LaTeX table have to end with \\')
return line
def process_val(self, val):
"""Remove whitespace and {} at the beginning or end of value."""
val = val.strip()
if val and (val[0] == '{') and (val[-1] == '}'):
val = val[1:-1]
return val
def join(self, vals):
'''Join values together and add a few extra spaces for readability'''
delimiter = ' ' + self.delimiter + ' '
return delimiter.join(x.strip() for x in vals) + r' \\'
class LatexHeader(core.BaseHeader):
'''Class to read the header of Latex Tables'''
header_start = r'\begin{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
line = find_latex_line(lines, self.header_start)
if line is not None:
return line + 1
else:
return None
def _get_units(self):
units = {}
col_units = [col.info.unit for col in self.cols]
for name, unit in zip(self.colnames, col_units):
if unit:
try:
units[name] = unit.to_string(format='latex_inline')
except AttributeError:
units[name] = unit
return units
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
if self.latex['tabletype'] is not None:
lines.append(r'\begin{' + self.latex['tabletype'] + r'}' + align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\caption{' + self.latex['caption'] + '}')
lines.append(self.header_start + r'{' + self.latex['col_align'] + r'}')
add_dictval_to_list(self.latex, 'header_start', lines)
lines.append(self.splitter.join(self.colnames))
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
lines.append(self.splitter.join([units.get(name, ' ') for name in self.colnames]))
add_dictval_to_list(self.latex, 'header_end', lines)
class LatexData(core.BaseData):
'''Class to read the data in LaTeX tables'''
data_start = None
data_end = r'\end{tabular}'
splitter_class = LatexSplitter
def start_line(self, lines):
if self.data_start:
return find_latex_line(lines, self.data_start)
else:
start = self.header.start_line(lines)
if start is None:
raise core.InconsistentTableError(r'Could not find table start')
return start + 1
def end_line(self, lines):
if self.data_end:
return find_latex_line(lines, self.data_end)
else:
return None
def write(self, lines):
add_dictval_to_list(self.latex, 'data_start', lines)
core.BaseData.write(self, lines)
add_dictval_to_list(self.latex, 'data_end', lines)
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
if self.latex['tabletype'] is not None:
lines.append(r'\end{' + self.latex['tabletype'] + '}')
class Latex(core.BaseReader):
r'''Write and read LaTeX tables.
This class implements some LaTeX specific commands. Its main
purpose is to write out a table in a form that LaTeX can compile. It
is beyond the scope of this class to implement every possible LaTeX
command, instead the focus is to generate a syntactically valid
LaTeX tables.
This class can also read simple LaTeX tables (one line per table
row, no ``\multicolumn`` or similar constructs), specifically, it
can read the tables that it writes.
Reading a LaTeX table, the following keywords are accepted:
**ignore_latex_commands** :
Lines starting with these LaTeX commands will be treated as comments (i.e. ignored).
When writing a LaTeX table, the some keywords can customize the
format. Care has to be taken here, because python interprets ``\\``
in a string as an escape character. In order to pass this to the
output either format your strings as raw strings with the ``r``
specifier or use a double ``\\\\``.
Examples::
caption = r'My table \label{mytable}'
caption = 'My table \\\\label{mytable}'
**latexdict** : Dictionary of extra parameters for the LaTeX output
* tabletype : used for first and last line of table.
The default is ``\\begin{table}``. The following would generate a table,
which spans the whole page in a two-column document::
ascii.write(data, sys.stdout, Writer = ascii.Latex,
latexdict = {'tabletype': 'table*'})
If ``None``, the table environment will be dropped, keeping only
the ``tabular`` environment.
* tablealign : positioning of table in text.
The default is not to specify a position preference in the text.
If, e.g. the alignment is ``ht``, then the LaTeX will be ``\\begin{table}[ht]``.
* col_align : Alignment of columns
If not present all columns will be centered.
* caption : Table caption (string or list of strings)
This will appear above the table as it is the standard in
many scientific publications. If you prefer a caption below
the table, just write the full LaTeX command as
``latexdict['tablefoot'] = r'\caption{My table}'``
* preamble, header_start, header_end, data_start, data_end, tablefoot: Pure LaTeX
Each one can be a string or a list of strings. These strings
will be inserted into the table without any further
processing. See the examples below.
* units : dictionary of strings
Keys in this dictionary should be names of columns. If
present, a line in the LaTeX table directly below the column
names is added, which contains the values of the
dictionary. Example::
from astropy.io import ascii
data = {'name': ['bike', 'car'], 'mass': [75,1200], 'speed': [10, 130]}
ascii.write(data, Writer=ascii.Latex,
latexdict = {'units': {'mass': 'kg', 'speed': 'km/h'}})
If the column has no entry in the ``units`` dictionary, it defaults
to the **unit** attribute of the column. If this attribute is not
specified (i.e. it is None), the unit will be written as ``' '``.
Run the following code to see where each element of the
dictionary is inserted in the LaTeX table::
from astropy.io import ascii
data = {'cola': [1,2], 'colb': [3,4]}
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['template'])
Some table styles are predefined in the dictionary
``ascii.latex.latexdicts``. The following generates in table in
style preferred by A&A and some other journals::
ascii.write(data, Writer=ascii.Latex, latexdict=ascii.latex.latexdicts['AA'])
As an example, this generates a table, which spans all columns
and is centered on the page::
ascii.write(data, Writer=ascii.Latex, col_align='|lr|',
latexdict={'preamble': r'\begin{center}',
'tablefoot': r'\end{center}',
'tabletype': 'table*'})
**caption** : Set table caption
Shorthand for::
latexdict['caption'] = caption
**col_align** : Set the column alignment.
If not present this will be auto-generated for centered
columns. Shorthand for::
latexdict['col_align'] = col_align
'''
_format_name = 'latex'
_io_registry_format_aliases = ['latex']
_io_registry_suffix = '.tex'
_description = 'LaTeX table'
header_class = LatexHeader
data_class = LatexData
inputter_class = LatexInputter
def __init__(self, ignore_latex_commands=['hline', 'vspace', 'tableline', 'toprule', 'midrule', 'bottomrule'],
latexdict={}, caption='', col_align=None):
super().__init__()
self.latex = {}
# The latex dict drives the format of the table and needs to be shared
# with data and header
self.header.latex = self.latex
self.data.latex = self.latex
self.latex['tabletype'] = 'table'
self.latex.update(latexdict)
if caption:
self.latex['caption'] = caption
if col_align:
self.latex['col_align'] = col_align
self.ignore_latex_commands = ignore_latex_commands
self.header.comment = '%|' + '|'.join(
[r'\\' + command for command in self.ignore_latex_commands])
self.data.comment = self.header.comment
def write(self, table=None):
self.header.start_line = None
self.data.start_line = None
return core.BaseReader.write(self, table=table)
class AASTexHeaderSplitter(LatexSplitter):
r'''Extract column names from a `deluxetable`_.
This splitter expects the following LaTeX code **in a single line**:
\tablehead{\colhead{col1} & ... & \colhead{coln}}
'''
def __call__(self, lines):
return super(LatexSplitter, self).__call__(lines)
def process_line(self, line):
"""extract column names from tablehead
"""
line = line.split('%')[0]
line = line.replace(r'\tablehead', '')
line = line.strip()
if (line[0] == '{') and (line[-1] == '}'):
line = line[1:-1]
else:
raise core.InconsistentTableError(r'\tablehead is missing {}')
return line.replace(r'\colhead', '')
def join(self, vals):
return ' & '.join([r'\colhead{' + str(x) + '}' for x in vals])
class AASTexHeader(LatexHeader):
r'''In a `deluxetable
<http://fits.gsfc.nasa.gov/standard30/deluxetable.sty>`_ some header
keywords differ from standard LaTeX.
This header is modified to take that into account.
'''
header_start = r'\tablehead'
splitter_class = AASTexHeaderSplitter
def start_line(self, lines):
return find_latex_line(lines, r'\tablehead')
def write(self, lines):
if 'col_align' not in self.latex:
self.latex['col_align'] = len(self.cols) * 'c'
if 'tablealign' in self.latex:
align = '[' + self.latex['tablealign'] + ']'
else:
align = ''
lines.append(r'\begin{' + self.latex['tabletype'] + r'}{' + self.latex['col_align'] + r'}'
+ align)
add_dictval_to_list(self.latex, 'preamble', lines)
if 'caption' in self.latex:
lines.append(r'\tablecaption{' + self.latex['caption'] + '}')
tablehead = ' & '.join([r'\colhead{' + name + '}' for name in self.colnames])
units = self._get_units()
if 'units' in self.latex:
units.update(self.latex['units'])
if units:
tablehead += r'\\ ' + self.splitter.join([units.get(name, ' ')
for name in self.colnames])
lines.append(r'\tablehead{' + tablehead + '}')
class AASTexData(LatexData):
r'''In a `deluxetable`_ the data is enclosed in `\startdata` and `\enddata`
'''
data_start = r'\startdata'
data_end = r'\enddata'
def start_line(self, lines):
return find_latex_line(lines, self.data_start) + 1
def write(self, lines):
lines.append(self.data_start)
lines_length_initial = len(lines)
core.BaseData.write(self, lines)
# To remove extra space(s) and // appended which creates an extra new line
# in the end.
if len(lines) > lines_length_initial:
# we compile separately because py2.6 doesn't have a flags keyword in re.sub
re_final_line = re.compile(r'\s* \\ \\ \s* $', flags=re.VERBOSE)
lines[-1] = re.sub(re_final_line, '', lines[-1])
lines.append(self.data_end)
add_dictval_to_list(self.latex, 'tablefoot', lines)
lines.append(r'\end{' + self.latex['tabletype'] + r'}')
class AASTex(Latex):
'''Write and read AASTeX tables.
This class implements some AASTeX specific commands.
AASTeX is used for the AAS (American Astronomical Society)
publications like ApJ, ApJL and AJ.
It derives from the ``Latex`` reader and accepts the same
keywords. However, the keywords ``header_start``, ``header_end``,
``data_start`` and ``data_end`` in ``latexdict`` have no effect.
'''
_format_name = 'aastex'
_io_registry_format_aliases = ['aastex']
_io_registry_suffix = '' # AASTex inherits from Latex, so override this class attr
_description = 'AASTeX deluxetable used for AAS journals'
header_class = AASTexHeader
data_class = AASTexData
def __init__(self, **kwargs):
super(AASTex, self).__init__(**kwargs)
# check if tabletype was explicitly set by the user
if not (('latexdict' in kwargs) and ('tabletype' in kwargs['latexdict'])):
self.latex['tabletype'] = 'deluxetable'
|
d0b22c00caf37717b97e2c07b49f036be3775a56fb63acdb9d4bfe36c1dfe312 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible HTML table reader and writer.
html.py:
Classes to read and write HTML tables
`BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
must be installed to read HTML tables.
"""
import warnings
import numpy
from . import core
from ...table import Column
from ...utils.xml import writer
from copy import deepcopy
class SoupString(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
class ListWriter:
"""
Allows for XMLWriter to write to a list instead of a file.
"""
def __init__(self, out):
self.out = out
def write(self, data):
self.out.append(data)
def identify_table(soup, htmldict, numtable):
"""
Checks whether the given BeautifulSoup tag is the table
the user intends to process.
"""
if soup is None or soup.name != 'table':
return False # Tag is not a <table>
elif 'table_id' not in htmldict:
return numtable == 1
table_id = htmldict['table_id']
if isinstance(table_id, str):
return 'id' in soup.attrs and soup['id'] == table_id
elif isinstance(table_id, int):
return table_id == numtable
# Return False if an invalid parameter is given
return False
class HTMLInputter(core.BaseInputter):
"""
Input lines of HTML in a valid form.
This requires `BeautifulSoup
<http://www.crummy.com/software/BeautifulSoup/>`_ to be installed.
"""
def process_lines(self, lines):
"""
Convert the given input into a list of SoupString rows
for further processing.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise core.OptionalTableImportError('BeautifulSoup must be '
'installed to read HTML tables')
if 'parser' not in self.html:
with warnings.catch_warnings():
# Ignore bs4 parser warning #4550.
warnings.filterwarnings('ignore', '.*no parser was explicitly specified.*')
soup = BeautifulSoup('\n'.join(lines))
else: # use a custom backend parser
soup = BeautifulSoup('\n'.join(lines), self.html['parser'])
tables = soup.find_all('table')
for i, possible_table in enumerate(tables):
if identify_table(possible_table, self.html, i + 1):
table = possible_table # Find the correct table
break
else:
if isinstance(self.html['table_id'], int):
err_descr = 'number {0}'.format(self.html['table_id'])
else:
err_descr = "id '{0}'".format(self.html['table_id'])
raise core.InconsistentTableError(
'ERROR: HTML table {0} not found'.format(err_descr))
# Get all table rows
soup_list = [SoupString(x) for x in table.find_all('tr')]
return soup_list
class HTMLSplitter(core.BaseSplitter):
"""
Split HTML table data.
"""
def __call__(self, lines):
"""
Return HTML data from lines as a generator.
"""
for line in lines:
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
header_elements = soup.find_all('th')
if header_elements:
# Return multicolumns as tuples for HTMLHeader handling
yield [(el.text.strip(), el['colspan']) if el.has_attr('colspan')
else el.text.strip() for el in header_elements]
data_elements = soup.find_all('td')
if data_elements:
yield [el.text.strip() for el in data_elements]
if len(lines) == 0:
raise core.InconsistentTableError('HTML tables must contain data '
'in a <table> tag')
class HTMLOutputter(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [core.convert_numpy(numpy.int),
core.convert_numpy(numpy.float),
core.convert_numpy(numpy.str),
core.convert_numpy(numpy.unicode)]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, 'colspan'):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num:col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
class HTMLHeader(core.BaseHeader):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which header data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.th is not None:
return i
return None
def _set_cols_from_names(self):
"""
Set columns from header names, handling multicolumns appropriately.
"""
self.cols = []
new_names = []
for name in self.names:
if isinstance(name, tuple):
col = core.Column(name=name[0])
col.colspan = int(name[1])
self.cols.append(col)
new_names.append(name[0])
for i in range(1, int(name[1])):
# Add dummy columns
self.cols.append(core.Column(''))
new_names.append('')
else:
self.cols.append(core.Column(name=name))
new_names.append(name)
self.names = new_names
class HTMLData(core.BaseData):
splitter_class = HTMLSplitter
def start_line(self, lines):
"""
Return the line number at which table data begins.
"""
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
if soup.th is not None:
raise core.InconsistentTableError('HTML tables cannot '
'have headings and data in the same row')
return i
raise core.InconsistentTableError('No start line found for HTML data')
def end_line(self, lines):
"""
Return the line number at which table data ends.
"""
last_index = -1
for i, line in enumerate(lines):
if not isinstance(line, SoupString):
raise TypeError('HTML lines should be of type SoupString')
soup = line.soup
if soup.td is not None:
last_index = i
if last_index == -1:
return None
return last_index + 1
class HTML(core.BaseReader):
"""Read and write HTML tables.
In order to customize input and output, a dict of parameters may
be passed to this class holding specific customizations.
**htmldict** : Dictionary of parameters for HTML input/output.
* css : Customized styling
If present, this parameter will be included in a <style>
tag and will define stylistic attributes of the output.
* table_id : ID for the input table
If a string, this defines the HTML id of the table to be processed.
If an integer, this specifies the index of the input table in the
available tables. Unless this parameter is given, the reader will
use the first table found in the input file.
* multicol : Use multi-dimensional columns for output
The writer will output tuples as elements of multi-dimensional
columns if this parameter is true, and if not then it will
use the syntax 1.36583e-13 .. 1.36583e-13 for output. If not
present, this parameter will be true by default.
* raw_html_cols : column name or list of names with raw HTML content
This allows one to include raw HTML content in the column output,
for instance to include link references in a table. This option
requires that the bleach package be installed. Only whitelisted
tags are allowed through for security reasons (see the
raw_html_clean_kwargs arg).
* raw_html_clean_kwargs : dict of keyword args controlling HTML cleaning
Raw HTML will be cleaned to prevent unsafe HTML from ending up in
the table output. This is done by calling ``bleach.clean(data,
**raw_html_clean_kwargs)``. For details on the available options
(e.g. tag whitelist) see:
http://bleach.readthedocs.io/en/latest/clean.html
* parser : Specific HTML parsing library to use
If specified, this specifies which HTML parsing library
BeautifulSoup should use as a backend. The options to choose
from are 'html.parser' (the standard library parser), 'lxml'
(the recommended parser), 'xml' (lxml's XML parser), and
'html5lib'. html5lib is a highly lenient parser and therefore
might work correctly for unusual input if a different parser
fails.
* jsfiles : list of js files to include when writing table.
* cssfiles : list of css files to include when writing table.
* js : js script to include in the body when writing table.
* table_class : css class for the table
"""
_format_name = 'html'
_io_registry_format_aliases = ['html']
_io_registry_suffix = '.html'
_description = 'HTML table'
header_class = HTMLHeader
data_class = HTMLData
inputter_class = HTMLInputter
def __init__(self, htmldict={}):
"""
Initialize classes for HTML reading and writing.
"""
super().__init__()
self.html = deepcopy(htmldict)
if 'multicol' not in htmldict:
self.html['multicol'] = True
if 'table_id' not in htmldict:
self.html['table_id'] = 1
self.inputter.html = self.html
def read(self, table):
"""
Read the ``table`` in HTML format and return a resulting ``Table``.
"""
self.outputter = HTMLOutputter()
return super().read(table)
def write(self, table):
"""
Return data in ``table`` converted to HTML as a list of strings.
"""
cols = list(table.columns.values())
self.data.header.cols = cols
if isinstance(self.data.fill_values, tuple):
self.data.fill_values = [self.data.fill_values]
self.data._set_fill_values(cols)
lines = []
# Set HTML escaping to False for any column in the raw_html_cols input
raw_html_cols = self.html.get('raw_html_cols', [])
if isinstance(raw_html_cols, str):
raw_html_cols = [raw_html_cols] # Allow for a single string as input
cols_escaped = [col.info.name not in raw_html_cols for col in cols]
# Kwargs that get passed on to bleach.clean() if that is available.
raw_html_clean_kwargs = self.html.get('raw_html_clean_kwargs', {})
# Use XMLWriter to output HTML to lines
w = writer.XMLWriter(ListWriter(lines))
with w.tag('html'):
with w.tag('head'):
# Declare encoding and set CSS style for table
with w.tag('meta', attrib={'charset': 'utf-8'}):
pass
with w.tag('meta', attrib={'http-equiv': 'Content-type',
'content': 'text/html;charset=UTF-8'}):
pass
if 'css' in self.html:
with w.tag('style'):
w.data(self.html['css'])
if 'cssfiles' in self.html:
for filename in self.html['cssfiles']:
with w.tag('link', rel="stylesheet", href=filename, type='text/css'):
pass
if 'jsfiles' in self.html:
for filename in self.html['jsfiles']:
with w.tag('script', src=filename):
w.data('') # need this instead of pass to get <script></script>
with w.tag('body'):
if 'js' in self.html:
with w.xml_cleaning_method('none'):
with w.tag('script'):
w.data(self.html['js'])
if isinstance(self.html['table_id'], str):
html_table_id = self.html['table_id']
else:
html_table_id = None
if 'table_class' in self.html:
html_table_class = self.html['table_class']
attrib = {"class": html_table_class}
else:
attrib = {}
with w.tag('table', id=html_table_id, attrib=attrib):
with w.tag('thead'):
with w.tag('tr'):
for col in cols:
if len(col.shape) > 1 and self.html['multicol']:
# Set colspan attribute for multicolumns
w.start('th', colspan=col.shape[1])
else:
w.start('th')
w.data(col.info.name.strip())
w.end(indent=False)
col_str_iters = []
new_cols_escaped = []
# Make a container to hold any new_col objects created
# below for multicolumn elements. This is purely to
# maintain a reference for these objects during
# subsequent iteration to format column values. This
# requires that the weakref info._parent be maintained.
new_cols = []
for col, col_escaped in zip(cols, cols_escaped):
if len(col.shape) > 1 and self.html['multicol']:
span = col.shape[1]
for i in range(span):
# Split up multicolumns into separate columns
new_col = Column([el[i] for el in col])
new_col_iter_str_vals = self.fill_values(col, new_col.info.iter_str_vals())
col_str_iters.append(new_col_iter_str_vals)
new_cols_escaped.append(col_escaped)
new_cols.append(new_col)
else:
col_iter_str_vals = self.fill_values(col, col.info.iter_str_vals())
col_str_iters.append(col_iter_str_vals)
new_cols_escaped.append(col_escaped)
for row in zip(*col_str_iters):
with w.tag('tr'):
for el, col_escaped in zip(row, new_cols_escaped):
# Potentially disable HTML escaping for column
method = ('escape_xml' if col_escaped else 'bleach_clean')
with w.xml_cleaning_method(method, **raw_html_clean_kwargs):
w.start('td')
w.data(el.strip())
w.end(indent=False)
# Fixes XMLWriter's insertion of unwanted line breaks
return [''.join(lines)]
def fill_values(self, col, col_str_iters):
"""
Return an iterator of the values with replacements based on fill_values
"""
# check if the col is a masked column and has fill values
is_masked_column = hasattr(col, 'mask')
has_fill_values = hasattr(col, 'fill_values')
for idx, col_str in enumerate(col_str_iters):
if is_masked_column and has_fill_values:
if col.mask[idx]:
yield col.fill_values[core.masked]
continue
if has_fill_values:
if col_str in col.fill_values:
yield col.fill_values[col_str]
continue
yield col_str
|
072cda05af02e6c9920f2a621e3ceaa109620e0ddd49a650a2c38b1b4fc4b992 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""An extensible ASCII table reader and writer.
ui.py:
Provides the main user functions for reading and writing tables.
:Copyright: Smithsonian Astrophysical Observatory (2010)
:Author: Tom Aldcroft ([email protected])
"""
import re
import os
import sys
import copy
import time
import warnings
import contextlib
from io import StringIO
import numpy as np
from . import core
from . import basic
from . import cds
from . import daophot
from . import ecsv
from . import sextractor
from . import ipac
from . import latex
from . import html
from . import rst
from . import fastbasic
from . import cparser
from . import fixedwidth
from ...table import Table, vstack, MaskedColumn
from ...utils.data import get_readable_fileobj
from ...utils.exceptions import AstropyWarning, AstropyDeprecationWarning
_read_trace = []
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
# Default setting for guess parameter in read()
_GUESS = True
def _probably_html(table, maxchars=100000):
"""
Determine if ``table`` probably contains HTML content. See PR #3693 and issue
#3691 for context.
"""
if not isinstance(table, str):
try:
# If table is an iterable (list of strings) then take the first
# maxchars of these. Make sure this is something with random
# access to exclude a file-like object
table[0]
table[:1]
size = 0
for i, line in enumerate(table):
size += len(line)
if size > maxchars:
break
table = os.linesep.join(table[:i+1])
except Exception:
pass
if isinstance(table, str):
# Look for signs of an HTML table in the first maxchars characters
table = table[:maxchars]
# URL ending in .htm or .html
if re.match(r'( http[s]? | ftp | file ) :// .+ \.htm[l]?$', table,
re.IGNORECASE | re.VERBOSE):
return True
# Filename ending in .htm or .html which exists
if re.search(r'\.htm[l]?$', table[-5:], re.IGNORECASE) and os.path.exists(table):
return True
# Table starts with HTML document type declaration
if re.match(r'\s* <! \s* DOCTYPE \s* HTML', table, re.IGNORECASE | re.VERBOSE):
return True
# Look for <TABLE .. >, <TR .. >, <TD .. > tag openers.
if all(re.search(r'< \s* {0} [^>]* >'.format(element), table, re.IGNORECASE | re.VERBOSE)
for element in ('table', 'tr', 'td')):
return True
return False
def set_guess(guess):
"""
Set the default value of the ``guess`` parameter for read()
Parameters
----------
guess : bool
New default ``guess`` value (e.g., True or False)
"""
global _GUESS
_GUESS = guess
def get_reader(Reader=None, Inputter=None, Outputter=None, **kwargs):
"""
Initialize a table reader allowing for common customizations. Most of the
default behavior for various parameters is determined by the Reader class.
Parameters
----------
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED). Default is :class:`Basic`.
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters.
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns.
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns.
names : list
List of names corresponding to each data column.
include_names : list, optional
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``).
fill_values : dict
Specification of fill values for bad or missing table values.
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``).
Returns
-------
reader : `~astropy.io.ascii.BaseReader` subclass
ASCII format reader instance
"""
# This function is a light wrapper around core._get_reader to provide a
# public interface with a default Reader.
if Reader is None:
# Default reader is Basic unless fast reader is forced
fast_reader = _get_fast_reader_dict(kwargs)
if fast_reader['enable'] == 'force':
Reader = fastbasic.FastBasic
else:
Reader = basic.Basic
reader = core._get_reader(Reader, Inputter=Inputter, Outputter=Outputter, **kwargs)
return reader
def _get_format_class(format, ReaderWriter, label):
if format is not None and ReaderWriter is not None:
raise ValueError('Cannot supply both format and {0} keywords'.format(label))
if format is not None:
if format in core.FORMAT_CLASSES:
ReaderWriter = core.FORMAT_CLASSES[format]
else:
raise ValueError('ASCII format {0!r} not in allowed list {1}'
.format(format, sorted(core.FORMAT_CLASSES)))
return ReaderWriter
def _get_fast_reader_dict(kwargs):
"""Convert 'fast_reader' key in kwargs into a dict if not already and make sure
'enable' key is available.
"""
fast_reader = copy.deepcopy(kwargs.get('fast_reader', True))
if isinstance(fast_reader, dict):
fast_reader.setdefault('enable', 'force')
else:
fast_reader = {'enable': fast_reader}
return fast_reader
def read(table, guess=None, **kwargs):
"""
Read the input ``table`` and return the table. Most of
the default behavior for various parameters is determined by the Reader
class.
Parameters
----------
table : str, file-like, list, pathlib.Path object
Input table as a file name, file-like object, list of strings,
single newline-separated string or pathlib.Path object .
guess : bool
Try to guess the table format. Defaults to None.
format : str, `~astropy.io.ascii.BaseReader`
Input table format
Inputter : `~astropy.io.ascii.BaseInputter`
Inputter class
Outputter : `~astropy.io.ascii.BaseOutputter`
Outputter class
delimiter : str
Column delimiter string
comment : str
Regular expression defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
header_start : int
Line index for the header line not counting comment or blank lines.
A line with only whitespace is considered blank.
data_start : int
Line index for the start of data not counting comment or blank lines.
A line with only whitespace is considered blank.
data_end : int
Line index for the end of data not counting comment or blank lines.
This value can be negative to count from the end.
converters : dict
Dictionary of converters
data_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split data columns
header_Splitter : `~astropy.io.ascii.BaseSplitter`
Splitter class to split header columns
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fill_values : dict
specification of fill values for bad or missing table values
fill_include_names : list
List of names to include in fill_values.
fill_exclude_names : list
List of names to exclude from fill_values (applied after ``fill_include_names``)
fast_reader : bool or dict
Whether to use the C engine, can also be a dict with options which
defaults to `False`; parameters for options dict:
use_fast_converter: bool
enable faster but slightly imprecise floating point conversion method
parallel: bool or int
multiprocessing conversion using ``cpu_count()`` or ``'number'`` processes
exponent_style: str
One-character string defining the exponent or ``'Fortran'`` to auto-detect
Fortran-style scientific notation like ``'3.14159D+00'`` (``'E'``, ``'D'``, ``'Q'``),
all case-insensitive; default ``'E'``, all other imply ``use_fast_converter``
chunk_size : int
If supplied with a value > 0 then read the table in chunks of
approximately ``chunk_size`` bytes. Default is reading table in one pass.
chunk_generator : bool
If True and ``chunk_size > 0`` then return an iterator that returns a
table for each chunk. The default is to return a single stacked table
for all the chunks.
Reader : `~astropy.io.ascii.BaseReader`
Reader class (DEPRECATED)
encoding: str
Allow to specify encoding to read the file (default= ``None``).
Returns
-------
dat : `~astropy.table.Table` OR <generator>
Output table
"""
del _read_trace[:]
# Downstream readers might munge kwargs
kwargs = copy.deepcopy(kwargs)
# Convert 'fast_reader' key in kwargs into a dict if not already and make sure
# 'enable' key is available.
fast_reader = _get_fast_reader_dict(kwargs)
kwargs['fast_reader'] = fast_reader
if fast_reader['enable'] and fast_reader.get('chunk_size'):
return _read_in_chunks(table, **kwargs)
if 'fill_values' not in kwargs:
kwargs['fill_values'] = [('', '0')]
# If an Outputter is supplied in kwargs that will take precedence.
if 'Outputter' in kwargs: # user specified Outputter, not supported for fast reading
fast_reader['enable'] = False
format = kwargs.get('format')
# Dictionary arguments are passed by reference per default and thus need
# special protection:
new_kwargs = copy.deepcopy(kwargs)
kwargs['fast_reader'] = copy.deepcopy(fast_reader)
# Get the Reader class based on possible format and Reader kwarg inputs.
Reader = _get_format_class(format, kwargs.get('Reader'), 'Reader')
if Reader is not None:
new_kwargs['Reader'] = Reader
format = Reader._format_name
# Remove format keyword if there, this is only allowed in read() not get_reader()
if 'format' in new_kwargs:
del new_kwargs['format']
if guess is None:
guess = _GUESS
if guess:
# If ``table`` is probably an HTML file then tell guess function to add
# the HTML reader at the top of the guess list. This is in response to
# issue #3691 (and others) where libxml can segfault on a long non-HTML
# file, thus prompting removal of the HTML reader from the default
# guess list.
new_kwargs['guess_html'] = _probably_html(table)
# If `table` is a filename or readable file object then read in the
# file now. This prevents problems in Python 3 with the file object
# getting closed or left at the file end. See #3132, #3013, #3109,
# #2001. If a `readme` arg was passed that implies CDS format, in
# which case the original `table` as the data filename must be left
# intact.
if 'readme' not in new_kwargs:
encoding = kwargs.get('encoding')
try:
with get_readable_fileobj(table, encoding=encoding) as fileobj:
table = fileobj.read()
except ValueError: # unreadable or invalid binary file
raise
except Exception:
pass
else:
# Ensure that `table` has at least one \r or \n in it
# so that the core.BaseInputter test of
# ('\n' not in table and '\r' not in table)
# will fail and so `table` cannot be interpreted there
# as a filename. See #4160.
if not re.search(r'[\r\n]', table):
table = table + os.linesep
# If the table got successfully read then look at the content
# to see if is probably HTML, but only if it wasn't already
# identified as HTML based on the filename.
if not new_kwargs['guess_html']:
new_kwargs['guess_html'] = _probably_html(table)
# Get the table from guess in ``dat``. If ``dat`` comes back as None
# then there was just one set of kwargs in the guess list so fall
# through below to the non-guess way so that any problems result in a
# more useful traceback.
dat = _guess(table, new_kwargs, format, fast_reader)
if dat is None:
guess = False
if not guess:
if format is None:
reader = get_reader(**new_kwargs)
format = reader._format_name
# Try the fast reader version of `format` first if applicable. Note that
# if user specified a fast format (e.g. format='fast_basic') this test
# will fail and the else-clause below will be used.
if fast_reader['enable'] and 'fast_{0}'.format(format) in core.FAST_CLASSES:
fast_kwargs = copy.deepcopy(new_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(format)]
fast_reader_rdr = get_reader(**fast_kwargs)
try:
dat = fast_reader_rdr.read(table)
_read_trace.append({'kwargs': copy.deepcopy(fast_kwargs),
'Reader': fast_reader_rdr.__class__,
'status': 'Success with fast reader (no guessing)'})
except (core.ParameterError, cparser.CParserError, UnicodeEncodeError) as err:
# special testing value to avoid falling back on the slow reader
if fast_reader['enable'] == 'force':
raise core.InconsistentTableError(
'fast reader {} exception: {}'
.format(fast_reader_rdr.__class__, err))
# If the fast reader doesn't work, try the slow version
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with slow reader after failing'
' with fast (no guessing)'})
else:
reader = get_reader(**new_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(new_kwargs),
'Reader': reader.__class__,
'status': 'Success with specified Reader class '
'(no guessing)'})
return dat
def _guess(table, read_kwargs, format, fast_reader):
"""
Try to read the table using various sets of keyword args. Start with the
standard guess list and filter to make it unique and consistent with
user-supplied read keyword args. Finally, if none of those work then
try the original user-supplied keyword args.
Parameters
----------
table : str, file-like, list
Input table as a file name, file-like object, list of strings, or
single newline-separated string.
read_kwargs : dict
Keyword arguments from user to be supplied to reader
format : str
Table format
fast_reader : dict
Options for the C engine fast reader. See read() function for details.
Returns
-------
dat : `~astropy.table.Table` or None
Output table or None if only one guess format was available
"""
# Keep a trace of all failed guesses kwarg
failed_kwargs = []
# Get an ordered list of read() keyword arg dicts that will be cycled
# through in order to guess the format.
full_list_guess = _get_guess_kwargs_list(read_kwargs)
# If a fast version of the reader is available, try that before the slow version
if (fast_reader['enable'] and format is not None and 'fast_{0}'.format(format) in
core.FAST_CLASSES):
fast_kwargs = copy.deepcopy(read_kwargs)
fast_kwargs['Reader'] = core.FAST_CLASSES['fast_{0}'.format(format)]
full_list_guess = [fast_kwargs] + full_list_guess
else:
fast_kwargs = None
# Filter the full guess list so that each entry is consistent with user kwarg inputs.
# This also removes any duplicates from the list.
filtered_guess_kwargs = []
fast_reader = read_kwargs.get('fast_reader')
for guess_kwargs in full_list_guess:
# If user specified slow reader then skip all fast readers
if (fast_reader['enable'] is False and
guess_kwargs['Reader'] in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: reader only available in fast version',
'dt': '{0:.3f} ms'.format(0.0)})
continue
# If user required a fast reader then skip all non-fast readers
if (fast_reader['enable'] == 'force' and
guess_kwargs['Reader'] not in core.FAST_CLASSES.values()):
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': guess_kwargs['Reader'].__class__,
'status': 'Disabled: no fast version of reader available',
'dt': '{0:.3f} ms'.format(0.0)})
continue
guess_kwargs_ok = True # guess_kwargs are consistent with user_kwargs?
for key, val in read_kwargs.items():
# Do guess_kwargs.update(read_kwargs) except that if guess_args has
# a conflicting key/val pair then skip this guess entirely.
if key not in guess_kwargs:
guess_kwargs[key] = copy.deepcopy(val)
elif val != guess_kwargs[key] and guess_kwargs != fast_kwargs:
guess_kwargs_ok = False
break
if not guess_kwargs_ok:
# User-supplied kwarg is inconsistent with the guess-supplied kwarg, e.g.
# user supplies delimiter="|" but the guess wants to try delimiter=" ",
# so skip the guess entirely.
continue
# Add the guess_kwargs to filtered list only if it is not already there.
if guess_kwargs not in filtered_guess_kwargs:
filtered_guess_kwargs.append(guess_kwargs)
# If there are not at least two formats to guess then return no table
# (None) to indicate that guessing did not occur. In that case the
# non-guess read() will occur and any problems will result in a more useful
# traceback.
if len(filtered_guess_kwargs) <= 1:
return None
# Define whitelist of exceptions that are expected from readers when
# processing invalid inputs. Note that OSError must fall through here
# so one cannot simply catch any exception.
guess_exception_classes = (core.InconsistentTableError, ValueError, TypeError,
AttributeError, core.OptionalTableImportError,
core.ParameterError, cparser.CParserError)
# Now cycle through each possible reader and associated keyword arguments.
# Try to read the table using those args, and if an exception occurs then
# keep track of the failed guess and move on.
for guess_kwargs in filtered_guess_kwargs:
t0 = time.time()
try:
# If guessing will try all Readers then use strict req'ts on column names
if 'Reader' not in read_kwargs:
guess_kwargs['strict_names'] = True
reader = get_reader(**guess_kwargs)
reader.guessing = True
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'Reader': reader.__class__,
'status': 'Success (guessing)',
'dt': '{0:.3f} ms'.format((time.time() - t0) * 1000)})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': '{0}: {1}'.format(err.__class__.__name__,
str(err)),
'dt': '{0:.3f} ms'.format((time.time() - t0) * 1000)})
failed_kwargs.append(guess_kwargs)
else:
# Failed all guesses, try the original read_kwargs without column requirements
try:
reader = get_reader(**read_kwargs)
dat = reader.read(table)
_read_trace.append({'kwargs': copy.deepcopy(read_kwargs),
'Reader': reader.__class__,
'status': 'Success with original kwargs without strict_names '
'(guessing)'})
return dat
except guess_exception_classes as err:
_read_trace.append({'kwargs': copy.deepcopy(guess_kwargs),
'status': '{0}: {1}'.format(err.__class__.__name__,
str(err))})
failed_kwargs.append(read_kwargs)
lines = ['\nERROR: Unable to guess table format with the guesses listed below:']
for kwargs in failed_kwargs:
sorted_keys = sorted([x for x in sorted(kwargs)
if x not in ('Reader', 'Outputter')])
reader_repr = repr(kwargs.get('Reader', basic.Basic))
keys_vals = ['Reader:' + re.search(r"\.(\w+)'>", reader_repr).group(1)]
kwargs_sorted = ((key, kwargs[key]) for key in sorted_keys)
keys_vals.extend(['{}: {!r}'.format(key, val) for key, val in kwargs_sorted])
lines.append(' '.join(keys_vals))
msg = ['',
'************************************************************************',
'** ERROR: Unable to guess table format with the guesses listed above. **',
'** **',
'** To figure out why the table did not read, use guess=False and **',
'** fast_reader=False, along with any appropriate arguments to read(). **',
'** In particular specify the format and any known attributes like the **',
'** delimiter. **',
'************************************************************************']
lines.extend(msg)
raise core.InconsistentTableError('\n'.join(lines))
def _get_guess_kwargs_list(read_kwargs):
"""
Get the full list of reader keyword argument dicts that are the basis
for the format guessing process. The returned full list will then be:
- Filtered to be consistent with user-supplied kwargs
- Cleaned to have only unique entries
- Used one by one to try reading the input table
Note that the order of the guess list has been tuned over years of usage.
Maintainers need to be very careful about any adjustments as the
reasoning may not be immediately evident in all cases.
This list can (and usually does) include duplicates. This is a result
of the order tuning, but these duplicates get removed later.
Parameters
----------
read_kwargs : dict
User-supplied read keyword args
Returns
-------
guess_kwargs_list : list
List of read format keyword arg dicts
"""
guess_kwargs_list = []
# If the table is probably HTML based on some heuristics then start with the
# HTML reader.
if read_kwargs.pop('guess_html', None):
guess_kwargs_list.append(dict(Reader=html.HTML))
# Start with ECSV because an ECSV file will be read by Basic. This format
# has very specific header requirements and fails out quickly.
guess_kwargs_list.append(dict(Reader=ecsv.Ecsv))
# Now try readers that accept the user-supplied keyword arguments
# (actually include all here - check for compatibility of arguments later).
# FixedWidthTwoLine would also be read by Basic, so it needs to come first;
# same for RST.
for reader in (fixedwidth.FixedWidthTwoLine, rst.RST,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastRdb, basic.Rdb,
fastbasic.FastTab, basic.Tab,
cds.Cds, daophot.Daophot, sextractor.SExtractor,
ipac.Ipac, latex.Latex, latex.AASTex):
guess_kwargs_list.append(dict(Reader=reader))
# Cycle through the basic-style readers using all combinations of delimiter
# and quotechar.
for Reader in (fastbasic.FastCommentedHeader, basic.CommentedHeader,
fastbasic.FastBasic, basic.Basic,
fastbasic.FastNoHeader, basic.NoHeader):
for delimiter in ("|", ",", " ", r"\s"):
for quotechar in ('"', "'"):
guess_kwargs_list.append(dict(
Reader=Reader, delimiter=delimiter, quotechar=quotechar))
return guess_kwargs_list
def _read_in_chunks(table, **kwargs):
"""
For fast_reader read the ``table`` in chunks and vstack to create
a single table, OR return a generator of chunk tables.
"""
fast_reader = kwargs['fast_reader']
chunk_size = fast_reader.pop('chunk_size')
chunk_generator = fast_reader.pop('chunk_generator', False)
fast_reader['parallel'] = False # No parallel with chunks
tbl_chunks = _read_in_chunks_generator(table, chunk_size, **kwargs)
if chunk_generator:
return tbl_chunks
tbl0 = next(tbl_chunks)
masked = tbl0.masked
# Numpy won't allow resizing the original so make a copy here.
out_cols = {col.name: col.data.copy() for col in tbl0.itercols()}
str_kinds = ('S', 'U')
for tbl in tbl_chunks:
masked |= tbl.masked
for name, col in tbl.columns.items():
# Concatenate current column data and new column data
# If one of the inputs is string-like and the other is not, then
# convert the non-string to a string. In a perfect world this would
# be handled by numpy, but as of numpy 1.13 this results in a string
# dtype that is too long (https://github.com/numpy/numpy/issues/10062).
col1, col2 = out_cols[name], col.data
if col1.dtype.kind in str_kinds and col2.dtype.kind not in str_kinds:
col2 = np.array(col2.tolist(), dtype=col1.dtype.kind)
elif col2.dtype.kind in str_kinds and col1.dtype.kind not in str_kinds:
col1 = np.array(col1.tolist(), dtype=col2.dtype.kind)
# Choose either masked or normal concatenation
concatenate = np.ma.concatenate if masked else np.concatenate
out_cols[name] = concatenate([col1, col2])
# Make final table from numpy arrays, converting dict to list
out_cols = [out_cols[name] for name in tbl0.colnames]
out = tbl0.__class__(out_cols, names=tbl0.colnames, meta=tbl0.meta,
copy=False)
return out
def _read_in_chunks_generator(table, chunk_size, **kwargs):
"""
For fast_reader read the ``table`` in chunks and return a generator
of tables for each chunk.
"""
@contextlib.contextmanager
def passthrough_fileobj(fileobj, encoding=None):
"""Stub for get_readable_fileobj, which does not seem to work in Py3
for input File-like object, see #6460"""
yield fileobj
# Set up to coerce `table` input into a readable file object by selecting
# an appropriate function.
# Convert table-as-string to a File object. Finding a newline implies
# that the string is not a filename.
if (isinstance(table, str) and ('\n' in table or '\r' in table)):
table = StringIO(table)
fileobj_context = passthrough_fileobj
elif hasattr(table, 'read') and hasattr(table, 'seek'):
fileobj_context = passthrough_fileobj
else:
# string filename or pathlib
fileobj_context = get_readable_fileobj
# Set up for iterating over chunks
kwargs['fast_reader']['return_header_chars'] = True
header = '' # Table header (up to start of data)
prev_chunk_chars = '' # Chars from previous chunk after last newline
first_chunk = True # True for the first chunk, False afterward
with fileobj_context(table, encoding=kwargs.get('encoding')) as fh:
while True:
chunk = fh.read(chunk_size)
# Got fewer chars than requested, must be end of file
final_chunk = len(chunk) < chunk_size
# If this is the last chunk and there is only whitespace then break
if final_chunk and not re.search(r'\S', chunk):
break
# Step backwards from last character in chunk and find first newline
for idx in range(len(chunk) - 1, -1, -1):
if final_chunk or chunk[idx] == '\n':
break
else:
raise ValueError('no newline found in chunk (chunk_size too small?)')
# Stick on the header to the chunk part up to (and including) the
# last newline. Make sure the small strings are concatenated first.
complete_chunk = (header + prev_chunk_chars) + chunk[:idx + 1]
prev_chunk_chars = chunk[idx + 1:]
# Now read the chunk as a complete table
tbl = read(complete_chunk, guess=False, **kwargs)
# For the first chunk pop the meta key which contains the header
# characters (everything up to the start of data) then fix kwargs
# so it doesn't return that in meta any more.
if first_chunk:
header = tbl.meta.pop('__ascii_fast_reader_header_chars__')
first_chunk = False
yield tbl
if final_chunk:
break
extra_writer_pars = ('delimiter', 'comment', 'quotechar', 'formats',
'names', 'include_names', 'exclude_names', 'strip_whitespace')
def get_writer(Writer=None, fast_writer=True, **kwargs):
"""
Initialize a table writer allowing for common customizations. Most of the
default behavior for various parameters is determined by the Writer class.
Parameters
----------
Writer : ``Writer``
Writer class (DEPRECATED). Defaults to :class:`Basic`.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
Returns
-------
writer : `~astropy.io.ascii.BaseReader` subclass
ASCII format writer instance
"""
if Writer is None:
Writer = basic.Basic
if 'strip_whitespace' not in kwargs:
kwargs['strip_whitespace'] = True
writer = core._get_writer(Writer, fast_writer, **kwargs)
# Handle the corner case of wanting to disable writing table comments for the
# commented_header format. This format *requires* a string for `write_comment`
# because that is used for the header column row, so it is not possible to
# set the input `comment` to None. Without adding a new keyword or assuming
# a default comment character, there is no other option but to tell user to
# simply remove the meta['comments'].
if (isinstance(writer, (basic.CommentedHeader, fastbasic.FastCommentedHeader))
and not isinstance(kwargs.get('comment', ''), str)):
raise ValueError("for the commented_header writer you must supply a string\n"
"value for the `comment` keyword. In order to disable writing\n"
"table comments use `del t.meta['comments']` prior to writing.")
return writer
def write(table, output=None, format=None, Writer=None, fast_writer=True, *,
overwrite=None, **kwargs):
"""Write the input ``table`` to ``filename``. Most of the default behavior
for various parameters is determined by the Writer class.
Parameters
----------
table : `~astropy.io.ascii.BaseReader`, array_like, str, file_like, list
Input table as a Reader object, Numpy struct array, file name,
file-like object, list of strings, or single newline-separated string.
output : str, file_like
Output [filename, file-like object]. Defaults to``sys.stdout``.
format : str
Output table format. Defaults to 'basic'.
delimiter : str
Column delimiter string
comment : str
String defining a comment line in table
quotechar : str
One-character string to quote fields containing special characters
formats : dict
Dictionary of format specifiers or formatting functions
strip_whitespace : bool
Strip surrounding whitespace from column values.
names : list
List of names corresponding to each data column
include_names : list
List of names to include in output.
exclude_names : list
List of names to exclude from output (applied after ``include_names``)
fast_writer : bool
Whether to use the fast Cython writer.
overwrite : bool
If ``overwrite=None`` (default) and the file exists, then a
warning will be issued. In a future release this will instead
generate an exception. If ``overwrite=False`` and the file
exists, then an exception is raised.
This parameter is ignored when the ``output`` arg is not a string
(e.g., a file object).
Writer : ``Writer``
Writer class (DEPRECATED).
"""
if isinstance(output, str):
if os.path.lexists(output):
if overwrite is None:
warnings.warn(
"{} already exists. "
"Automatically overwriting ASCII files is deprecated. "
"Use the argument 'overwrite=True' in the future.".format(
output), AstropyDeprecationWarning)
elif not overwrite:
raise OSError("{} already exists".format(output))
if output is None:
output = sys.stdout
# Ensure that `table` is a Table subclass.
names = kwargs.get('names')
if isinstance(table, Table):
# Note that making a copy of the table here is inefficient but
# without this copy a number of tests break (e.g. in test_fixedwidth).
# See #7605.
new_tbl = table.__class__(table, names=names)
# This makes a copy of the table columns. This is subject to a
# corner-case problem if writing a table with masked columns to ECSV
# where serialize_method is set to 'data_mask'. In this case that
# attribute gets dropped in the copy, so do the copy here. This
# should be removed when `info` really contains all the attributes
# (#6720).
for new_col, col in zip(new_tbl.itercols(), table.itercols()):
if isinstance(col, MaskedColumn):
new_col.info.serialize_method = col.info.serialize_method
table = new_tbl
else:
table = Table(table, names=names)
table0 = table[:0].copy()
core._apply_include_exclude_names(table0, kwargs.get('names'),
kwargs.get('include_names'), kwargs.get('exclude_names'))
diff_format_with_names = set(kwargs.get('formats', [])) - set(table0.colnames)
if diff_format_with_names:
warnings.warn(
'The keys {} specified in the formats argument does not match a column name.'
.format(diff_format_with_names), AstropyWarning)
if table.has_mixin_columns:
fast_writer = False
Writer = _get_format_class(format, Writer, 'Writer')
writer = get_writer(Writer=Writer, fast_writer=fast_writer, **kwargs)
if writer._format_name in core.FAST_CLASSES:
writer.write(table, output)
return
lines = writer.write(table)
# Write the lines to output
outstr = os.linesep.join(lines)
if not hasattr(output, 'write'):
output = open(output, 'w')
output.write(outstr)
output.write(os.linesep)
output.close()
else:
output.write(outstr)
output.write(os.linesep)
def get_read_trace():
"""
Return a traceback of the attempted read formats for the last call to
`~astropy.io.ascii.read` where guessing was enabled. This is primarily for
debugging.
The return value is a list of dicts, where each dict includes the keyword
args ``kwargs`` used in the read call and the returned ``status``.
Returns
-------
trace : list of dicts
Ordered list of format guesses and status
"""
return copy.deepcopy(_read_trace)
|
b7e35d105ca380f650ced8d304afb2661b2a3671b6701bc71a0f45a353dfff35 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
import copy
from collections import OrderedDict
from . import core
from ...table import Table
from . import cparser
from ...utils import set_locale
class FastBasic(metaclass=core.MetaBaseReader):
"""
This class is intended to handle the same format addressed by the
ordinary :class:`Basic` writer, but it acts as a wrapper for underlying C
code and is therefore much faster. Unlike the other ASCII readers and
writers, this class is not very extensible and is restricted
by optimization requirements.
"""
_format_name = 'fast_basic'
_description = 'Basic table with custom delimiter using the fast C engine'
_fast = True
fill_extra_cols = False
guessing = False
strict_names = False
def __init__(self, default_kwargs={}, **user_kwargs):
# Make sure user does not set header_start to None for a reader
# that expects a non-None value (i.e. a number >= 0). This mimics
# what happens in the Basic reader.
if (default_kwargs.get('header_start', 0) is not None and
user_kwargs.get('header_start', 0) is None):
raise ValueError('header_start cannot be set to None for this Reader')
# Set up kwargs and copy any user kwargs. Use deepcopy user kwargs
# since they may contain a dict item which would end up as a ref to the
# original and get munged later (e.g. in cparser.pyx validation of
# fast_reader dict).
kwargs = copy.deepcopy(default_kwargs)
kwargs.update(copy.deepcopy(user_kwargs))
delimiter = kwargs.pop('delimiter', ' ')
self.delimiter = str(delimiter) if delimiter is not None else None
self.write_comment = kwargs.get('comment', '# ')
self.comment = kwargs.pop('comment', '#')
if self.comment is not None:
self.comment = str(self.comment)
self.quotechar = str(kwargs.pop('quotechar', '"'))
self.header_start = kwargs.pop('header_start', 0)
# If data_start is not specified, start reading
# data right after the header line
data_start_default = user_kwargs.get('data_start', self.header_start +
1 if self.header_start is not None else 1)
self.data_start = kwargs.pop('data_start', data_start_default)
self.kwargs = kwargs
self.strip_whitespace_lines = True
self.strip_whitespace_fields = True
def _read_header(self):
# Use the tokenizer by default -- this method
# can be overridden for specialized headers
self.engine.read_header()
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
if self.comment is not None and len(self.comment) != 1:
raise core.ParameterError("The C reader does not support a comment regex")
elif self.data_start is None:
raise core.ParameterError("The C reader does not allow data_start to be None")
elif self.header_start is not None and self.header_start < 0 and \
not isinstance(self, FastCommentedHeader):
raise core.ParameterError("The C reader does not allow header_start to be "
"negative except for commented-header files")
elif self.data_start < 0:
raise core.ParameterError("The C reader does not allow data_start to be negative")
elif len(self.delimiter) != 1:
raise core.ParameterError("The C reader only supports 1-char delimiters")
elif len(self.quotechar) != 1:
raise core.ParameterError("The C reader only supports a length-1 quote character")
elif 'converters' in self.kwargs:
raise core.ParameterError("The C reader does not support passing "
"specialized converters")
elif 'encoding' in self.kwargs:
raise core.ParameterError("The C reader does not use the encoding parameter")
elif 'Outputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Outputter parameter")
elif 'Inputter' in self.kwargs:
raise core.ParameterError("The C reader does not use the Inputter parameter")
elif 'data_Splitter' in self.kwargs or 'header_Splitter' in self.kwargs:
raise core.ParameterError("The C reader does not use a Splitter class")
self.strict_names = self.kwargs.pop('strict_names', False)
# Process fast_reader kwarg, which may or may not exist (though ui.py will always
# pass this as a dict with at least 'enable' set).
fast_reader = self.kwargs.get('fast_reader', True)
if not isinstance(fast_reader, dict):
fast_reader = {}
fast_reader.pop('enable', None)
self.return_header_chars = fast_reader.pop('return_header_chars', False)
# Put fast_reader dict back into kwargs.
self.kwargs['fast_reader'] = fast_reader
self.engine = cparser.CParser(table, self.strip_whitespace_lines,
self.strip_whitespace_fields,
delimiter=self.delimiter,
header_start=self.header_start,
comment=self.comment,
quotechar=self.quotechar,
data_start=self.data_start,
fill_extra_cols=self.fill_extra_cols,
**self.kwargs)
conversion_info = self._read_header()
self.check_header()
if conversion_info is not None:
try_int, try_float, try_string = conversion_info
else:
try_int = {}
try_float = {}
try_string = {}
with set_locale('C'):
data, comments = self.engine.read(try_int, try_float, try_string)
out = self.make_table(data, comments)
if self.return_header_chars:
out.meta['__ascii_fast_reader_header_chars__'] = self.engine.header_chars
return out
def make_table(self, data, comments):
"""Actually make the output table give the data and comments."""
meta = OrderedDict()
if comments:
meta['comments'] = comments
return Table(data, names=list(self.engine.get_names()), meta=meta)
def check_header(self):
names = self.engine.get_header_names() or self.engine.get_names()
if self.strict_names:
# Impose strict requirements on column names (normally used in guessing)
bads = [" ", ",", "|", "\t", "'", '"']
for name in names:
if (core._is_number(name) or
len(name) == 0 or
name[0] in bads or
name[-1] in bads):
raise ValueError('Column name {0!r} does not meet strict name requirements'
.format(name))
# When guessing require at least two columns
if self.guessing and len(names) <= 1:
raise ValueError('Table format guessing requires at least two columns, got {}'
.format(names))
def write(self, table, output):
"""
Use a fast Cython method to write table data to output,
where output is a filename or file-like object.
"""
self._write(table, output, {})
def _write(self, table, output, default_kwargs,
header_output=True, output_types=False):
write_kwargs = {'delimiter': self.delimiter,
'quotechar': self.quotechar,
'strip_whitespace': self.strip_whitespace_fields,
'comment': self.write_comment
}
write_kwargs.update(default_kwargs)
# user kwargs take precedence over default kwargs
write_kwargs.update(self.kwargs)
writer = cparser.FastWriter(table, **write_kwargs)
writer.write(output, header_output, output_types)
class FastCsv(FastBasic):
"""
A faster version of the ordinary :class:`Csv` writer that uses the
optimized C parsing engine. Note that this reader will append empty
field values to the end of any row with not enough columns, while
:class:`FastBasic` simply raises an error.
"""
_format_name = 'fast_csv'
_description = 'Comma-separated values table using the fast C engine'
_fast = True
fill_extra_cols = True
def __init__(self, **kwargs):
super().__init__({'delimiter': ',', 'comment': None}, **kwargs)
def write(self, table, output):
"""
Override the default write method of `FastBasic` to
output masked values as empty fields.
"""
self._write(table, output, {'fill_values': [(core.masked, '')]})
class FastTab(FastBasic):
"""
A faster version of the ordinary :class:`Tab` reader that uses
the optimized C parsing engine.
"""
_format_name = 'fast_tab'
_description = 'Tab-separated values table using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t'}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
class FastNoHeader(FastBasic):
"""
This class uses the fast C engine to read tables with no header line. If
the names parameter is unspecified, the columns will be autonamed with
"col{}".
"""
_format_name = 'fast_no_header'
_description = 'Basic table with no headers using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({'header_start': None, 'data_start': 0}, **kwargs)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that columns names are not included in output.
"""
self._write(table, output, {}, header_output=None)
class FastCommentedHeader(FastBasic):
"""
A faster version of the :class:`CommentedHeader` reader, which looks for
column names in a commented line. ``header_start`` denotes the index of
the header line among all commented lines and is 0 by default.
"""
_format_name = 'fast_commented_header'
_description = 'Columns name in a commented line using the fast C engine'
_fast = True
def __init__(self, **kwargs):
super().__init__({}, **kwargs)
# Mimic CommentedHeader's behavior in which data_start
# is relative to header_start if unspecified; see #2692
if 'data_start' not in kwargs:
self.data_start = 0
def make_table(self, data, comments):
"""
Actually make the output table give the data and comments. This is
slightly different from the base FastBasic method in the way comments
are handled.
"""
meta = OrderedDict()
if comments:
idx = self.header_start
if idx < 0:
idx = len(comments) + idx
meta['comments'] = comments[:idx] + comments[idx+1:]
if not meta['comments']:
del meta['comments']
return Table(data, names=list(self.engine.get_names()), meta=meta)
def _read_header(self):
tmp = self.engine.source
commented_lines = []
for line in tmp.splitlines():
line = line.lstrip()
if line and line[0] == self.comment: # line begins with a comment
commented_lines.append(line[1:])
if len(commented_lines) == self.header_start + 1:
break
if len(commented_lines) <= self.header_start:
raise cparser.CParserError('not enough commented lines')
self.engine.setup_tokenizer([commented_lines[self.header_start]])
self.engine.header_start = 0
self.engine.read_header()
self.engine.setup_tokenizer(tmp)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` so
that column names are commented.
"""
self._write(table, output, {}, header_output='comment')
class FastRdb(FastBasic):
"""
A faster version of the :class:`Rdb` reader. This format is similar to
tab-delimited, but it also contains a header line after the column
name line denoting the type of each column (N for numeric, S for string).
"""
_format_name = 'fast_rdb'
_description = 'Tab-separated with a type definition header line'
_fast = True
def __init__(self, **kwargs):
super().__init__({'delimiter': '\t', 'data_start': 2}, **kwargs)
self.strip_whitespace_lines = False
self.strip_whitespace_fields = False
def _read_header(self):
tmp = self.engine.source
line1 = ''
line2 = ''
for line in tmp.splitlines():
# valid non-comment line
if not line1 and line.strip() and line.lstrip()[0] != self.comment:
line1 = line
elif not line2 and line.strip() and line.lstrip()[0] != self.comment:
line2 = line
break
else: # less than 2 lines in table
raise ValueError('RDB header requires 2 lines')
# tokenize the two header lines separately
self.engine.setup_tokenizer([line2])
self.engine.header_start = 0
self.engine.read_header()
types = self.engine.get_names()
self.engine.setup_tokenizer([line1])
self.engine.set_names([])
self.engine.read_header()
if len(self.engine.get_names()) != len(types):
raise core.InconsistentTableError('RDB header mismatch between number of '
'column names and column types')
if any(not re.match(r'\d*(N|S)$', x, re.IGNORECASE) for x in types):
raise core.InconsistentTableError('RDB type definitions do not all match '
'[num](N|S): {0}'.format(types))
try_int = {}
try_float = {}
try_string = {}
for name, col_type in zip(self.engine.get_names(), types):
if col_type[-1].lower() == 's':
try_int[name] = 0
try_float[name] = 0
try_string[name] = 1
else:
try_int[name] = 1
try_float[name] = 1
try_string[name] = 0
self.engine.setup_tokenizer(tmp)
return (try_int, try_float, try_string)
def write(self, table, output):
"""
Override the default writing behavior in `FastBasic` to
output a line with column types after the column name line.
"""
self._write(table, output, {}, output_types=True)
|
75205fde145a17f26c22a643a8f1d73dce1680ad8807a2c5292e618f6429cad3 | """A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach ([email protected])
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: globalkey = lambda *l: sum(l) will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indeces:
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
|
a693b5081017f1c7d54e7aa896fa87fe29ea562e702f9f921b565a772799cba2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
An extensible ASCII table reader and writer.
Classes to read DAOphot table format
:Copyright: Smithsonian Astrophysical Observatory (2011)
:Author: Tom Aldcroft ([email protected])
"""
import re
import numpy as np
import itertools as itt
from collections import defaultdict, OrderedDict
from . import core
from . import fixedwidth
from .misc import first_true_index, first_false_index, groupmore
class DaophotHeader(core.BaseHeader):
"""
Read the header from a file produced by the IRAF DAOphot routine.
"""
comment = r'\s*#K'
# Regex for extracting the format strings
re_format = re.compile(r'%-?(\d+)\.?\d?[sdfg]')
re_header_keyword = re.compile(r'[#]K'
r'\s+ (?P<name> \w+)'
r'\s* = (?P<stuff> .+) $',
re.VERBOSE)
aperture_values = ()
def __init__(self):
core.BaseHeader.__init__(self)
def parse_col_defs(self, grouped_lines_dict):
"""
Parse a series of column definition lines like below. There may be several
such blocks in a single file (where continuation characters have already been
stripped).
#N ID XCENTER YCENTER MAG MERR MSKY NITER
#U ## pixels pixels magnitudes magnitudes counts ##
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
"""
line_ids = ('#N', '#U', '#F')
coldef_dict = defaultdict(list)
# Function to strip identifier lines
stripper = lambda s: s[2:].strip(' \\')
for defblock in zip(*map(grouped_lines_dict.get, line_ids)):
for key, line in zip(line_ids, map(stripper, defblock)):
coldef_dict[key].append(line.split())
# Save the original columns so we can use it later to reconstruct the
# original header for writing
if self.data.is_multiline:
# Database contains multi-aperture data.
# Autogen column names, units, formats from last row of column headers
last_names, last_units, last_formats = list(zip(*map(coldef_dict.get, line_ids)))[-1]
N_multiline = len(self.data.first_block)
for i in np.arange(1, N_multiline + 1).astype('U2'):
# extra column names eg. RAPERT2, SUM2 etc...
extended_names = list(map(''.join, zip(last_names, itt.repeat(i))))
if i == '1': # Enumerate the names starting at 1
coldef_dict['#N'][-1] = extended_names
else:
coldef_dict['#N'].append(extended_names)
coldef_dict['#U'].append(last_units)
coldef_dict['#F'].append(last_formats)
# Get column widths from column format specifiers
get_col_width = lambda s: int(self.re_format.search(s).groups()[0])
col_widths = [[get_col_width(f) for f in formats]
for formats in coldef_dict['#F']]
# original data format might be shorter than 80 characters and filled with spaces
row_widths = np.fromiter(map(sum, col_widths), int)
row_short = Daophot.table_width - row_widths
# fix last column widths
for w, r in zip(col_widths, row_short):
w[-1] += r
self.col_widths = col_widths
# merge the multi-line header data into single line data
coldef_dict = dict((k, sum(v, [])) for (k, v) in coldef_dict.items())
return coldef_dict
def update_meta(self, lines, meta):
"""
Extract table-level keywords for DAOphot table. These are indicated by
a leading '#K ' prefix.
"""
table_meta = meta['table']
# self.lines = self.get_header_lines(lines)
Nlines = len(self.lines)
if Nlines > 0:
# Group the header lines according to their line identifiers (#K,
# #N, #U, #F or just # (spacer line)) function that grabs the line
# identifier
get_line_id = lambda s: s.split(None, 1)[0]
# Group lines by the line identifier ('#N', '#U', '#F', '#K') and
# capture line index
gid, groups = zip(*groupmore(get_line_id, self.lines, range(Nlines)))
# Groups of lines and their indices
grouped_lines, gix = zip(*groups)
# Dict of line groups keyed by line identifiers
grouped_lines_dict = dict(zip(gid, grouped_lines))
# Update the table_meta keywords if necessary
if '#K' in grouped_lines_dict:
keywords = OrderedDict(map(self.extract_keyword_line, grouped_lines_dict['#K']))
table_meta['keywords'] = keywords
coldef_dict = self.parse_col_defs(grouped_lines_dict)
line_ids = ('#N', '#U', '#F')
for name, unit, fmt in zip(*map(coldef_dict.get, line_ids)):
meta['cols'][name] = {'unit': unit,
'format': fmt}
self.meta = meta
self.names = coldef_dict['#N']
def extract_keyword_line(self, line):
"""
Extract info from a header keyword line (#K)
"""
m = self.re_header_keyword.match(line)
if m:
vals = m.group('stuff').strip().rsplit(None, 2)
keyword_dict = {'units': vals[-2],
'format': vals[-1],
'value': (vals[0] if len(vals) > 2 else "")}
return m.group('name'), keyword_dict
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines`` for a DAOphot
header. The DAOphot header is specialized so that we just copy the entire BaseHeader
get_cols routine and modify as needed.
Parameters
----------
lines : list
List of table lines
Returns
----------
col : list
List of table Columns
"""
if not self.names:
raise core.InconsistentTableError('No column names found in DAOphot header')
# Create the list of io.ascii column objects
self._set_cols_from_names()
# Set unit and format as needed.
coldefs = self.meta['cols']
for col in self.cols:
unit, fmt = map(coldefs[col.name].get, ('unit', 'format'))
if unit != '##':
col.unit = unit
if fmt != '##':
col.format = fmt
# Set column start and end positions.
col_width = sum(self.col_widths, [])
ends = np.cumsum(col_width)
starts = ends - col_width
for i, col in enumerate(self.cols):
col.start, col.end = starts[i], ends[i]
col.span = col.end - col.start
if hasattr(col, 'format'):
if any(x in col.format for x in 'fg'):
col.type = core.FloatType
elif 'd' in col.format:
col.type = core.IntType
elif 's' in col.format:
col.type = core.StrType
# INDEF is the missing value marker
self.data.fill_values.append(('INDEF', '0'))
class DaophotData(core.BaseData):
splitter_class = fixedwidth.FixedWidthSplitter
start_line = 0
comment = r'\s*#'
def __init__(self):
core.BaseData.__init__(self)
self.is_multiline = False
def get_data_lines(self, lines):
# Special case for multiline daophot databases. Extract the aperture
# values from the first multiline data block
if self.is_multiline:
# Grab the first column of the special block (aperture values) and
# recreate the aperture description string
aplist = next(zip(*map(str.split, self.first_block)))
self.header.aperture_values = tuple(map(float, aplist))
# Set self.data.data_lines to a slice of lines contain the data rows
core.BaseData.get_data_lines(self, lines)
class DaophotInputter(core.ContinuationLinesInputter):
continuation_char = '\\'
multiline_char = '*'
replace_char = ' '
re_multiline = re.compile(r'(#?)[^\\*#]*(\*?)(\\*) ?$')
def search_multiline(self, lines, depth=150):
"""
Search lines for special continuation character to determine number of
continued rows in a datablock. For efficiency, depth gives the upper
limit of lines to search.
"""
# The list of apertures given in the #K APERTURES keyword may not be
# complete!! This happens if the string description of the aperture
# list is longer than the field width of the #K APERTURES field. In
# this case we have to figure out how many apertures there are based on
# the file structure.
comment, special, cont = zip(*(self.re_multiline.search(l).groups()
for l in lines[:depth]))
# Find first non-comment line
data_start = first_false_index(comment)
# No data in lines[:depth]. This may be because there is no data in
# the file, or because the header is really huge. If the latter,
# increasing the search depth should help
if data_start is None:
return None, None, lines[:depth]
header_lines = lines[:data_start]
# Find first line ending on special row continuation character '*'
# indexed relative to data_start
first_special = first_true_index(special[data_start:depth])
if first_special is None: # no special lines
return None, None, header_lines
# last line ending on special '*', but not on line continue '/'
last_special = first_false_index(special[data_start + first_special:depth])
# index relative to first_special
# if first_special is None: #no end of special lines within search
# depth! increase search depth return self.search_multiline( lines,
# depth=2*depth )
# indexing now relative to line[0]
markers = np.cumsum([data_start, first_special, last_special])
# multiline portion of first data block
multiline_block = lines[markers[1]:markers[-1]]
return markers, multiline_block, header_lines
def process_lines(self, lines):
markers, block, header = self.search_multiline(lines)
self.data.is_multiline = markers is not None
self.data.markers = markers
self.data.first_block = block
# set the header lines returned by the search as a attribute of the header
self.data.header.lines = header
if markers is not None:
lines = lines[markers[0]:]
continuation_char = self.continuation_char
multiline_char = self.multiline_char
replace_char = self.replace_char
parts = []
outlines = []
for i, line in enumerate(lines):
mo = self.re_multiline.search(line)
if mo:
comment, special, cont = mo.groups()
if comment or cont:
line = line.replace(continuation_char, replace_char)
if special:
line = line.replace(multiline_char, replace_char)
if cont and not comment:
parts.append(line)
if not cont:
parts.append(line)
outlines.append(''.join(parts))
parts = []
else:
raise core.InconsistentTableError('multiline re could not match line '
'{}: {}'.format(i, line))
return outlines
class Daophot(core.BaseReader):
"""
Read a DAOphot file.
Example::
#K MERGERAD = INDEF scaleunit %-23.7g
#K IRAF = NOAO/IRAFV2.10EXPORT version %-23s
#K USER = davis name %-23s
#K HOST = tucana computer %-23s
#
#N ID XCENTER YCENTER MAG MERR MSKY NITER \\
#U ## pixels pixels magnitudes magnitudes counts ## \\
#F %-9d %-10.3f %-10.3f %-12.3f %-14.3f %-15.7g %-6d
#
#N SHARPNESS CHI PIER PERROR \\
#U ## ## ## perrors \\
#F %-23.3f %-12.3f %-6d %-13s
#
14 138.538 INDEF 15.461 0.003 34.85955 4 \\
-0.032 0.802 0 No_error
The keywords defined in the #K records are available via the output table
``meta`` attribute::
>>> import os
>>> from astropy.io import ascii
>>> filename = os.path.join(ascii.__path__[0], 'tests/t/daophot.dat')
>>> data = ascii.read(filename)
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword['value'], keyword['units'], keyword['format'])
...
MERGERAD INDEF scaleunit %-23.7g
IRAF NOAO/IRAFV2.10EXPORT version %-23s
USER name %-23s
...
The unit and formats are available in the output table columns::
>>> for colname in data.colnames:
... col = data[colname]
... print(colname, col.unit, col.format)
...
ID None %-9d
XCENTER pixels %-10.3f
YCENTER pixels %-10.3f
...
Any column values of INDEF are interpreted as a missing value and will be
masked out in the resultant table.
In case of multi-aperture daophot files containing repeated entries for the last
row of fields, extra unique column names will be created by suffixing
corresponding field names with numbers starting from 2 to N (where N is the
total number of apertures).
For example,
first aperture radius will be RAPERT and corresponding magnitude will be MAG,
second aperture radius will be RAPERT2 and corresponding magnitude will be MAG2,
third aperture radius will be RAPERT3 and corresponding magnitude will be MAG3,
and so on.
"""
_format_name = 'daophot'
_io_registry_format_aliases = ['daophot']
_io_registry_can_write = False
_description = 'IRAF DAOphot format table'
header_class = DaophotHeader
data_class = DaophotData
inputter_class = DaophotInputter
table_width = 80
def __init__(self):
core.BaseReader.__init__(self)
# The inputter needs to know about the data (see DaophotInputter.process_lines)
self.inputter.data = self.data
def write(self, table=None):
raise NotImplementedError
|
bc45fa9b025683a7ab9f746d522279abd4e3b69f00b73823901e790c92e4a5e1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Define the Enhanced Character-Separated-Values (ECSV) which allows for reading and
writing all the meta data associated with an astropy Table object.
"""
import re
from collections import OrderedDict
import contextlib
import warnings
from . import core, basic
from ...table import meta, serialize
from ...utils.data_info import serialize_context_as
from ...utils.exceptions import AstropyWarning
__doctest_requires__ = {'Ecsv': ['yaml']}
ECSV_VERSION = '0.9'
DELIMITERS = (' ', ',')
class EcsvHeader(basic.BasicHeader):
"""Header class for which the column definition line starts with the
comment character. See the :class:`CommentedHeader` class for an example.
"""
def process_lines(self, lines):
"""Return only non-blank lines that start with the comment regexp. For these
lines strip out the matching characters and leading/trailing whitespace."""
re_comment = re.compile(self.comment)
for line in lines:
line = line.strip()
if not line:
continue
match = re_comment.match(line)
if match:
out = line[match.end():]
if out:
yield out
else:
# Stop iterating on first failed match for a non-blank line
return
def write(self, lines):
"""
Write header information in the ECSV ASCII format. This format
starts with a delimiter separated list of the column names in order
to make this format readable by humans and simple csv-type readers.
It then encodes the full table meta and column attributes and meta
as YAML and pretty-prints this in the header. Finally the delimited
column names are repeated again, for humans and readers that look
for the *last* comment line as defining the column names.
"""
if self.splitter.delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
for col in self.cols:
if len(getattr(col, 'shape', ())) > 1:
raise ValueError("ECSV format does not support multidimensional column '{0}'"
.format(col.info.name))
# Now assemble the header dict that will be serialized by the YAML dumper
header = {'cols': self.cols, 'schema': 'astropy-2.0'}
if self.table_meta:
header['meta'] = self.table_meta
# Set the delimiter only for the non-default option(s)
if self.splitter.delimiter != ' ':
header['delimiter'] = self.splitter.delimiter
header_yaml_lines = (['%ECSV {0}'.format(ECSV_VERSION),
'---']
+ meta.get_yaml_from_header(header))
lines.extend([self.write_comment + line for line in header_yaml_lines])
lines.append(self.splitter.join([x.info.name for x in self.cols]))
def write_comments(self, lines, meta):
"""
Override the default write_comments to do nothing since this is handled
in the custom write method.
"""
pass
def update_meta(self, lines, meta):
"""
Override the default update_meta to do nothing. This process is done
in get_cols() for this reader.
"""
pass
def get_cols(self, lines):
"""
Initialize the header Column objects from the table ``lines``.
Parameters
----------
lines : list
List of table lines
"""
# Cache a copy of the original input lines before processing below
raw_lines = lines
# Extract non-blank comment (header) lines with comment character stripped
lines = list(self.process_lines(lines))
# Validate that this is a ECSV file
ecsv_header_re = r"""%ECSV [ ]
(?P<major> \d+)
\. (?P<minor> \d+)
\.? (?P<bugfix> \d+)? $"""
no_header_msg = ('ECSV header line like "# %ECSV <version>" not found as first line.'
' This is required for a ECSV file.')
if not lines:
raise core.InconsistentTableError(no_header_msg)
match = re.match(ecsv_header_re, lines[0].strip(), re.VERBOSE)
if not match:
raise core.InconsistentTableError(no_header_msg)
# ecsv_version could be constructed here, but it is not currently used.
try:
header = meta.get_header_from_yaml(lines)
except ImportError as exc:
if 'PyYAML package is required' in str(exc):
warnings.warn("file looks like ECSV format but PyYAML is not installed "
"so it cannot be parsed as ECSV",
AstropyWarning)
raise core.InconsistentTableError('unable to parse yaml in meta header'
' (PyYAML package is required)')
except meta.YamlParseError:
raise core.InconsistentTableError('unable to parse yaml in meta header')
if 'meta' in header:
self.table_meta = header['meta']
if 'delimiter' in header:
delimiter = header['delimiter']
if delimiter not in DELIMITERS:
raise ValueError('only space and comma are allowed for delimiter in ECSV format')
self.splitter.delimiter = delimiter
self.data.splitter.delimiter = delimiter
# Create the list of io.ascii column objects from `header`
header_cols = OrderedDict((x['name'], x) for x in header['datatype'])
self.names = [x['name'] for x in header['datatype']]
# Read the first non-commented line of table and split to get the CSV
# header column names. This is essentially what the Basic reader does.
header_line = next(super().process_lines(raw_lines))
header_names = next(self.splitter([header_line]))
# Check for consistency of the ECSV vs. CSV header column names
if header_names != self.names:
raise core.InconsistentTableError('column names from ECSV header {} do not '
'match names from header line of CSV data {}'
.format(self.names, header_names))
# BaseHeader method to create self.cols, which is a list of
# io.ascii.core.Column objects (*not* Table Column objects).
self._set_cols_from_names()
# Transfer attributes from the column descriptor stored in the input
# header YAML metadata to the new columns to create this table.
for col in self.cols:
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
col.dtype = header_cols[col.name]['datatype']
# ECSV "string" means numpy dtype.kind == 'U' AKA str in Python 3
if col.dtype == 'string':
col.dtype = 'str'
if col.dtype.startswith('complex'):
raise TypeError('ecsv reader does not support complex number types')
class EcsvOutputter(core.TableOutputter):
"""
After reading the input lines and processing, convert the Reader columns
and metadata to an astropy.table.Table object. This overrides the default
converters to be an empty list because there is no "guessing" of the
conversion function.
"""
default_converters = []
def __call__(self, cols, meta):
# Convert to a Table with all plain Column subclass columns
out = super().__call__(cols, meta)
# If mixin columns exist (based on the special '__mixin_columns__'
# key in the table ``meta``), then use that information to construct
# appropriate mixin columns and remove the original data columns.
# If no __mixin_columns__ exists then this function just passes back
# the input table.
out = serialize._construct_mixins_from_columns(out)
return out
class EcsvData(basic.BasicData):
def _set_fill_values(self, cols):
"""Set the fill values of the individual cols based on fill_values of BaseData
For ECSV handle the corner case of data that has been serialized using
the serialize_method='data_mask' option, which writes the full data and
mask directly, AND where that table includes a string column with zero-length
string entries ("") which are valid data.
Normally the super() method will set col.fill_value=('', '0') to replace
blanks with a '0'. But for that corner case subset, instead do not do
any filling.
"""
super()._set_fill_values(cols)
# Get the serialized columns spec. It might not exist and there might
# not even be any table meta, so punt in those cases.
try:
scs = self.header.table_meta['__serialized_columns__']
except (AttributeError, KeyError):
return
# Got some serialized columns, so check for string type and serialized
# as a MaskedColumn. Without 'data_mask', MaskedColumn objects are
# stored to ECSV as normal columns.
for col in cols:
if (col.dtype == 'str' and col.name in scs and
scs[col.name]['__class__'] == 'astropy.table.column.MaskedColumn'):
col.fill_values = {} # No data value replacement
class Ecsv(basic.Basic):
"""
Read a file which conforms to the ECSV (Enhanced Character Separated
Values) format. This format allows for specification of key table
and column meta-data, in particular the data type and unit. For details
see: https://github.com/astropy/astropy-APEs/blob/master/APE6.rst.
Examples
--------
>>> from astropy.table import Table
>>> ecsv_content = '''# %ECSV 0.9
... # ---
... # datatype:
... # - {name: a, unit: m / s, datatype: int64, format: '%03d'}
... # - {name: b, unit: km, datatype: int64, description: This is column b}
... a b
... 001 2
... 004 3
... '''
>>> Table.read(ecsv_content, format='ascii.ecsv')
<Table length=2>
a b
m / s km
int64 int64
----- -----
001 2
004 3
"""
_format_name = 'ecsv'
_description = 'Enhanced CSV'
_io_registry_suffix = '.ecsv'
header_class = EcsvHeader
data_class = EcsvData
outputter_class = EcsvOutputter
def update_table_data(self, table):
"""
Update table columns in place if mixin columns are present.
This is a hook to allow updating the table columns after name
filtering but before setting up to write the data. This is currently
only used by ECSV and is otherwise just a pass-through.
Parameters
----------
table : `astropy.table.Table`
Input table for writing
Returns
-------
table : `astropy.table.Table`
Output table for writing
"""
with serialize_context_as('ecsv'):
out = serialize._represent_mixins_as_columns(table)
return out
|
c468f1a6595ed9fc126c8dda16c02dc64f55fc16f732e9726fed6618fb06aaa4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing HDF5 tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`table_io` for more details.
"""
import os
import warnings
import numpy as np
# NOTE: Do not import anything from astropy.table here.
# https://github.com/astropy/astropy/issues/6604
from ...utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
HDF5_SIGNATURE = b'\x89HDF\r\n\x1a\n'
META_KEY = '__table_column_meta__'
__all__ = ['read_table_hdf5', 'write_table_hdf5']
def meta_path(path):
return path + '.' + META_KEY
def _find_all_structured_arrays(handle):
"""
Find all structured arrays in an HDF5 file
"""
import h5py
structured_arrays = []
def append_structured_arrays(name, obj):
if isinstance(obj, h5py.Dataset) and obj.dtype.kind == 'V':
structured_arrays.append(name)
handle.visititems(append_structured_arrays)
return structured_arrays
def is_hdf5(origin, filepath, fileobj, *args, **kwargs):
if fileobj is not None:
loc = fileobj.tell()
try:
signature = fileobj.read(8)
finally:
fileobj.seek(loc)
return signature == HDF5_SIGNATURE
elif filepath is not None:
return filepath.endswith(('.hdf5', '.h5'))
try:
import h5py
except ImportError:
return False
else:
return isinstance(args[0], (h5py.File, h5py.Group, h5py.Dataset))
def read_table_hdf5(input, path=None):
"""
Read a Table object from an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed. If more than one
table is present in the HDF5 file or group, the first table is read in and
a warning is displayed.
Parameters
----------
input : str or :class:`h5py:File` or :class:`h5py:Group` or
:class:`h5py:Dataset` If a string, the filename to read the table from.
If an h5py object, either the file or the group object to read the
table from.
path : str
The path from which to read the table inside the HDF5 file.
This should be relative to the input file or group.
"""
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
# This function is iterative, and only gets to writing the file when
# the input is an hdf5 Group. Moreover, the input variable is changed in
# place.
# Here, we save its value to be used at the end when the conditions are
# right.
input_save = input
if isinstance(input, (h5py.File, h5py.Group)):
# If a path was specified, follow the path
if path is not None:
try:
input = input[path]
except (KeyError, ValueError):
raise OSError("Path {0} does not exist".format(path))
# `input` is now either a group or a dataset. If it is a group, we
# will search for all structured arrays inside the group, and if there
# is one we can proceed otherwise an error is raised. If it is a
# dataset, we just proceed with the reading.
if isinstance(input, h5py.Group):
# Find all structured arrays in group
arrays = _find_all_structured_arrays(input)
if len(arrays) == 0:
raise ValueError("no table found in HDF5 group {0}".
format(path))
elif len(arrays) > 0:
path = arrays[0] if path is None else path + '/' + arrays[0]
warnings.warn("path= was not specified but multiple tables"
" are present, reading in first available"
" table (path={0})".format(path),
AstropyUserWarning)
return read_table_hdf5(input, path=path)
elif not isinstance(input, h5py.Dataset):
# If a file object was passed, then we need to extract the filename
# because h5py cannot properly read in file objects.
if hasattr(input, 'read'):
try:
input = input.name
except AttributeError:
raise TypeError("h5py can only open regular files")
# Open the file for reading, and recursively call read_table_hdf5 with
# the file object and the path.
f = h5py.File(input, 'r')
try:
return read_table_hdf5(f, path=path)
finally:
f.close()
# If we are here, `input` should be a Dataset object, which we can now
# convert to a Table.
# Create a Table object
from ...table import Table, meta, serialize
table = Table(np.array(input))
# Read the meta-data from the file. For back-compatibility, we can read
# the old file format where the serialized metadata were saved in the
# attributes of the HDF5 dataset.
# In the new format, instead, metadata are stored in a new dataset in the
# same file. This is introduced in Astropy 3.0
old_version_meta = META_KEY in input.attrs
new_version_meta = path is not None and meta_path(path) in input_save
if old_version_meta or new_version_meta:
if new_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input_save[meta_path(path)])
elif old_version_meta:
header = meta.get_header_from_yaml(
h.decode('utf-8') for h in input.attrs[META_KEY])
if 'meta' in list(header.keys()):
table.meta = header['meta']
header_cols = dict((x['name'], x) for x in header['datatype'])
for col in table.columns.values():
for attr in ('description', 'format', 'unit', 'meta'):
if attr in header_cols[col.name]:
setattr(col, attr, header_cols[col.name][attr])
# Construct new table with mixins, using tbl.meta['__serialized_columns__']
# as guidance.
table = serialize._construct_mixins_from_columns(table)
else:
# Read the meta-data from the file
table.meta.update(input.attrs)
return table
def _encode_mixins(tbl):
"""Encode a Table ``tbl`` that may have mixin columns to a Table with only
astropy Columns + appropriate meta-data to allow subsequent decoding.
"""
from ...table import serialize
from ...table.table import has_info_class
from ... import units as u
from ...utils.data_info import MixinInfo, serialize_context_as
# If PyYAML is not available then check to see if there are any mixin cols
# that *require* YAML serialization. HDF5 already has support for
# Quantity, so if those are the only mixins the proceed without doing the
# YAML bit, for backward compatibility (i.e. not requiring YAML to write
# Quantity).
try:
import yaml
except ImportError:
for col in tbl.itercols():
if (has_info_class(col, MixinInfo) and
col.__class__ is not u.Quantity):
raise TypeError("cannot write type {} column '{}' "
"to HDF5 without PyYAML installed."
.format(col.__class__.__name__, col.info.name))
# Convert the table to one with no mixins, only Column objects. This adds
# meta data which is extracted with meta.get_yaml_from_table.
with serialize_context_as('hdf5'):
encode_tbl = serialize._represent_mixins_as_columns(tbl)
return encode_tbl
def write_table_hdf5(table, output, path=None, compression=False,
append=False, overwrite=False, serialize_meta=False,
compatibility_mode=False):
"""
Write a Table object to an HDF5 file
This requires `h5py <http://www.h5py.org/>`_ to be installed.
Parameters
----------
table : `~astropy.table.Table`
Data table that is to be written to file.
output : str or :class:`h5py:File` or :class:`h5py:Group`
If a string, the filename to write the table to. If an h5py object,
either the file or the group object to write the table to.
path : str
The path to which to write the table inside the HDF5 file.
This should be relative to the input file or group.
compression : bool or str or int
Whether to compress the table inside the HDF5 file. If set to `True`,
``'gzip'`` compression is used. If a string is specified, it should be
one of ``'gzip'``, ``'szip'``, or ``'lzf'``. If an integer is
specified (in the range 0-9), ``'gzip'`` compression is used, and the
integer denotes the compression level.
append : bool
Whether to append the table to an existing HDF5 file.
overwrite : bool
Whether to overwrite any existing file without warning.
If ``append=True`` and ``overwrite=True`` then only the dataset will be
replaced; the file/group will not be overwritten.
"""
from ...table import meta
try:
import h5py
except ImportError:
raise Exception("h5py is required to read and write HDF5 files")
if path is None:
raise ValueError("table path should be set via the path= argument")
elif path.endswith('/'):
raise ValueError("table path should end with table name, not /")
if '/' in path:
group, name = path.rsplit('/', 1)
else:
group, name = None, path
if isinstance(output, (h5py.File, h5py.Group)):
if group:
try:
output_group = output[group]
except (KeyError, ValueError):
output_group = output.create_group(group)
else:
output_group = output
elif isinstance(output, str):
if os.path.exists(output) and not append:
if overwrite and not append:
os.remove(output)
else:
raise OSError("File exists: {0}".format(output))
# Open the file for appending or writing
f = h5py.File(output, 'a' if append else 'w')
# Recursively call the write function
try:
return write_table_hdf5(table, f, path=path,
compression=compression, append=append,
overwrite=overwrite,
serialize_meta=serialize_meta,
compatibility_mode=compatibility_mode)
finally:
f.close()
else:
raise TypeError('output should be a string or an h5py File or '
'Group object')
# Check whether table already exists
if name in output_group:
if append and overwrite:
# Delete only the dataset itself
del output_group[name]
else:
raise OSError("Table {0} already exists".format(path))
# Encode any mixin columns as plain columns + appropriate metadata
table = _encode_mixins(table)
# Warn if information will be lost when serialize_meta=False. This is
# hardcoded to the set difference between column info attributes and what
# HDF5 can store natively (name, dtype) with no meta.
if serialize_meta is False:
for col in table.itercols():
for attr in ('unit', 'format', 'description', 'meta'):
if getattr(col.info, attr, None) not in (None, {}):
warnings.warn("table contains column(s) with defined 'unit', 'format',"
" 'description', or 'meta' info attributes. These will"
" be dropped since serialize_meta=False.",
AstropyUserWarning)
# Write the table to the file
if compression:
if compression is True:
compression = 'gzip'
dset = output_group.create_dataset(name, data=table.as_array(),
compression=compression)
else:
dset = output_group.create_dataset(name, data=table.as_array())
if serialize_meta:
header_yaml = meta.get_yaml_from_table(table)
header_encoded = [h.encode('utf-8') for h in header_yaml]
if compatibility_mode:
warnings.warn("compatibility mode for writing is deprecated",
AstropyDeprecationWarning)
try:
dset.attrs[META_KEY] = header_encoded
except Exception as e:
warnings.warn(
"Attributes could not be written to the output HDF5 "
"file: {0}".format(e))
else:
output_group.create_dataset(meta_path(name),
data=header_encoded)
else:
# Write the Table meta dict key:value pairs to the file as HDF5
# attributes. This works only for a limited set of scalar data types
# like numbers, strings, etc., but not any complex types. This path
# also ignores column meta like unit or format.
for key in table.meta:
val = table.meta[key]
try:
dset.attrs[key] = val
except TypeError:
warnings.warn("Attribute `{0}` of type {1} cannot be written to "
"HDF5 files - skipping. (Consider specifying "
"serialize_meta=True to write all meta data)".format(key, type(val)),
AstropyUserWarning)
def register_hdf5():
"""
Register HDF5 with Unified I/O.
"""
from .. import registry as io_registry
from ...table import Table
io_registry.register_reader('hdf5', Table, read_table_hdf5)
io_registry.register_writer('hdf5', Table, write_table_hdf5)
io_registry.register_identifier('hdf5', Table, is_hdf5)
|
bbf2461a53f1d0d9c8b35cbc3cebc8fb7af7eb006c909a723933151483df5406 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from copy import copy
from io import StringIO
import pytest
import numpy as np
from ..registry import _readers, _writers, _identifiers
from .. import registry as io_registry
from ...table import Table
from ... import units as u
_READERS_ORIGINAL = copy(_readers)
_WRITERS_ORIGINAL = copy(_writers)
_IDENTIFIERS_ORIGINAL = copy(_identifiers)
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
class TestData:
read = classmethod(io_registry.read)
write = io_registry.write
def setup_function(function):
_readers.clear()
_writers.clear()
_identifiers.clear()
def empty_reader(*args, **kwargs):
return TestData()
def empty_writer(table, *args, **kwargs):
pass
def empty_identifier(*args, **kwargs):
return True
def test_get_reader_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.get_reader('test', TestData)
assert str(exc.value).startswith(
"No reader defined for format 'test' and class 'TestData'")
def test_get_writer_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.get_writer('test', TestData)
assert str(exc.value).startswith(
"No writer defined for format 'test' and class 'TestData'")
def test_register_reader():
io_registry.register_reader('test1', TestData, empty_reader)
io_registry.register_reader('test2', TestData, empty_reader)
assert io_registry.get_reader('test1', TestData) == empty_reader
assert io_registry.get_reader('test2', TestData) == empty_reader
io_registry.unregister_reader('test1', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_reader('test1', TestData)
assert io_registry.get_reader('test2', TestData) == empty_reader
io_registry.unregister_reader('test2', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_reader('test2', TestData)
def test_register_writer():
io_registry.register_writer('test1', TestData, empty_writer)
io_registry.register_writer('test2', TestData, empty_writer)
assert io_registry.get_writer('test1', TestData) == empty_writer
assert io_registry.get_writer('test2', TestData) == empty_writer
io_registry.unregister_writer('test1', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_writer('test1', TestData)
assert io_registry.get_writer('test2', TestData) == empty_writer
io_registry.unregister_writer('test2', TestData)
with pytest.raises(io_registry.IORegistryError):
io_registry.get_writer('test2', TestData)
def test_register_identifier():
io_registry.register_identifier('test1', TestData, empty_identifier)
io_registry.register_identifier('test2', TestData, empty_identifier)
io_registry.unregister_identifier('test1', TestData)
io_registry.unregister_identifier('test2', TestData)
def test_register_reader_invalid():
io_registry.register_reader('test', TestData, empty_reader)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_reader('test', TestData, empty_reader)
assert (str(exc.value) == "Reader for format 'test' and class 'TestData' "
"is already defined")
def test_register_writer_invalid():
io_registry.register_writer('test', TestData, empty_writer)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_writer('test', TestData, empty_writer)
assert (str(exc.value) == "Writer for format 'test' and class 'TestData' "
"is already defined")
def test_register_identifier_invalid():
io_registry.register_identifier('test', TestData, empty_identifier)
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.register_identifier('test', TestData, empty_identifier)
assert (str(exc.value) == "Identifier for format 'test' and class "
"'TestData' is already defined")
def test_unregister_reader_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_reader('test', TestData)
assert str(exc.value) == "No reader defined for format 'test' and class 'TestData'"
def test_unregister_writer_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_writer('test', TestData)
assert str(exc.value) == "No writer defined for format 'test' and class 'TestData'"
def test_unregister_identifier_invalid():
with pytest.raises(io_registry.IORegistryError) as exc:
io_registry.unregister_identifier('test', TestData)
assert str(exc.value) == "No identifier defined for format 'test' and class 'TestData'"
def test_register_reader_force():
io_registry.register_reader('test', TestData, empty_reader)
io_registry.register_reader('test', TestData, empty_reader, force=True)
def test_register_writer_force():
io_registry.register_writer('test', TestData, empty_writer)
io_registry.register_writer('test', TestData, empty_writer, force=True)
def test_register_identifier_force():
io_registry.register_identifier('test', TestData, empty_identifier)
io_registry.register_identifier('test', TestData, empty_identifier, force=True)
def test_read_noformat():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read()
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write()
assert str(exc.value).startswith("Format could not be identified.")
def test_read_noformat_arbitrary():
"""Test that all identifier functions can accept arbitrary input"""
_identifiers.update(_IDENTIFIERS_ORIGINAL)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(object())
assert str(exc.value).startswith("Format could not be identified.")
def test_read_noformat_arbitrary_file(tmpdir):
"""Tests that all identifier functions can accept arbitrary files"""
_readers.update(_READERS_ORIGINAL)
testfile = str(tmpdir.join('foo.example'))
with open(testfile, 'w') as f:
f.write("Hello world")
with pytest.raises(io_registry.IORegistryError) as exc:
Table.read(testfile)
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat_arbitrary():
"""Test that all identifier functions can accept arbitrary input"""
_identifiers.update(_IDENTIFIERS_ORIGINAL)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(object())
assert str(exc.value).startswith("Format could not be identified.")
def test_write_noformat_arbitrary_file(tmpdir):
"""Tests that all identifier functions can accept arbitrary files"""
_writers.update(_WRITERS_ORIGINAL)
testfile = str(tmpdir.join('foo.example'))
with pytest.raises(io_registry.IORegistryError) as exc:
Table().write(testfile)
assert str(exc.value).startswith("Format could not be identified.")
def test_read_toomanyformats():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read()
assert str(exc.value) == "Format is ambiguous - options are: test1, test2"
def test_write_toomanyformats():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: True)
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: True)
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write()
assert str(exc.value) == "Format is ambiguous - options are: test1, test2"
def test_read_format_noreader():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(format='test')
assert str(exc.value).startswith(
"No reader defined for format 'test' and class 'TestData'")
def test_write_format_nowriter():
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(format='test')
assert str(exc.value).startswith(
"No writer defined for format 'test' and class 'TestData'")
def test_read_identifier(tmpdir):
io_registry.register_identifier(
'test1', TestData,
lambda o, path, fileobj, *x, **y: path.endswith('a'))
io_registry.register_identifier(
'test2', TestData,
lambda o, path, fileobj, *x, **y: path.endswith('b'))
# Now check that we got past the identifier and are trying to get
# the reader. The io_registry.get_reader will fail but the error message
# will tell us if the identifier worked.
filename = tmpdir.join("testfile.a").strpath
open(filename, 'w').close()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(filename)
assert str(exc.value).startswith(
"No reader defined for format 'test1' and class 'TestData'")
filename = tmpdir.join("testfile.b").strpath
open(filename, 'w').close()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(filename)
assert str(exc.value).startswith(
"No reader defined for format 'test2' and class 'TestData'")
def test_write_identifier():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: x[0].startswith('a'))
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: x[0].startswith('b'))
# Now check that we got past the identifier and are trying to get
# the reader. The io_registry.get_writer will fail but the error message
# will tell us if the identifier worked.
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write('abc')
assert str(exc.value).startswith(
"No writer defined for format 'test1' and class 'TestData'")
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write('bac')
assert str(exc.value).startswith(
"No writer defined for format 'test2' and class 'TestData'")
def test_identifier_origin():
io_registry.register_identifier('test1', TestData, lambda o, *x, **y: o == 'read')
io_registry.register_identifier('test2', TestData, lambda o, *x, **y: o == 'write')
io_registry.register_reader('test1', TestData, empty_reader)
io_registry.register_writer('test2', TestData, empty_writer)
# There should not be too many formats defined
TestData.read()
TestData().write()
with pytest.raises(io_registry.IORegistryError) as exc:
TestData.read(format='test2')
assert str(exc.value).startswith(
"No reader defined for format 'test2' and class 'TestData'")
with pytest.raises(io_registry.IORegistryError) as exc:
TestData().write(format='test1')
assert str(exc.value).startswith(
"No writer defined for format 'test1' and class 'TestData'")
def test_read_valid_return():
io_registry.register_reader('test', TestData, lambda: TestData())
t = TestData.read(format='test')
assert isinstance(t, TestData)
def test_non_existing_unknown_ext():
"""Raise the correct error when attempting to read a non-existing
file with an unknown extension."""
with pytest.raises(OSError):
data = Table.read('non-existing-file-with-unknown.ext')
def test_read_basic_table():
data = np.array(list(zip([1, 2, 3], ['a', 'b', 'c'])),
dtype=[(str('A'), int), (str('B'), '|U1')])
io_registry.register_reader('test', Table, lambda x: Table(x))
t = Table.read(data, format='test')
assert t.keys() == ['A', 'B']
for i in range(3):
assert t['A'][i] == data['A'][i]
assert t['B'][i] == data['B'][i]
def test_register_readers_with_same_name_on_different_classes():
# No errors should be generated if the same name is registered for
# different objects...but this failed under python3
io_registry.register_reader('test', TestData, lambda: TestData())
io_registry.register_reader('test', Table, lambda: Table())
t = TestData.read(format='test')
assert isinstance(t, TestData)
tbl = Table.read(format='test')
assert isinstance(tbl, Table)
def test_inherited_registration():
# check that multi-generation inheritance works properly,
# meaning that a child inherits from parents before
# grandparents, see astropy/astropy#7156
class Child1(Table):
pass
class Child2(Child1):
pass
def _read():
return Table()
def _read1():
return Child1()
# check that reader gets inherited
io_registry.register_reader('test', Table, _read)
assert io_registry.get_reader('test', Child2) is _read
# check that nearest ancestor is identified
# (i.e. that the reader for Child2 is the registered method
# for Child1, and not Table)
io_registry.register_reader('test', Child1, _read1)
assert io_registry.get_reader('test', Child2) is _read1
def teardown_function(function):
_readers.update(_READERS_ORIGINAL)
_writers.update(_WRITERS_ORIGINAL)
_identifiers.update(_IDENTIFIERS_ORIGINAL)
class TestSubclass:
"""
Test using registry with a Table sub-class
"""
def test_read_table_subclass(self):
class MyTable(Table):
pass
data = ['a b', '1 2']
mt = MyTable.read(data, format='ascii')
t = Table.read(data, format='ascii')
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(mt) is MyTable
def test_write_table_subclass(self):
buffer = StringIO()
class MyTable(Table):
pass
mt = MyTable([[1], [2]], names=['a', 'b'])
mt.write(buffer, format='ascii')
assert buffer.getvalue() == os.linesep.join(['a b', '1 2', ''])
def test_read_table_subclass_with_columns_attributes(self, tmpdir):
"""Regression test for https://github.com/astropy/astropy/issues/7181
"""
class MTable(Table):
pass
mt = MTable([[1, 2.5]], names=['a'])
mt['a'].unit = u.m
mt['a'].format = '.4f'
mt['a'].description = 'hello'
testfile = str(tmpdir.join('junk.fits'))
mt.write(testfile, overwrite=True)
t = MTable.read(testfile)
assert np.all(mt == t)
assert mt.colnames == t.colnames
assert type(t) is MTable
assert t['a'].unit == u.m
assert t['a'].format == '{:13.4f}'
if HAS_YAML:
assert t['a'].description == 'hello'
else:
assert t['a'].description is None
|
eb07b1014a8e4620fbbd8894db2c0d7a83aeb2036f8c2278b3ee5ed286652a17 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import logging
import optparse
import os
import sys
import textwrap
import warnings
from ... import fits
from ..util import fill
from ....utils.exceptions import AstropyDeprecationWarning
log = logging.getLogger('fitsdiff')
USAGE = """
Compare two FITS image files and report the differences in header keywords and
data.
fitsdiff [options] filename1 filename2
where filename1 filename2 are the two files to be compared. They may also be
wild cards, in such cases, they must be enclosed by double or single quotes, or
they may be directory names. If both are directory names, all files in each of
the directories will be included; if only one is a directory name, then the
directory name will be prefixed to the file name(s) specified by the other
argument. for example::
fitsdiff "*.fits" "/machine/data1"
will compare all FITS files in the current directory to the corresponding files
in the directory /machine/data1.
""".strip()
EPILOG = """
If the two files are identical within the specified conditions, it will report
"No difference is found." If the value(s) of -c and -k takes the form
'@filename', list is in the text file 'filename', and each line in that text
file contains one keyword.
Example
-------
fitsdiff -k filename,filtnam1 -n 5 -r 1.e-6 test1.fits test2
This command will compare files test1.fits and test2.fits, report maximum of 5
different pixels values per extension, only report data values larger than
1.e-6 relative to each other, and will neglect the different values of keywords
FILENAME and FILTNAM1 (or their very existence).
fitsdiff command-line arguments can also be set using the environment variable
FITSDIFF_SETTINGS. If the FITSDIFF_SETTINGS environment variable is present,
each argument present will override the corresponding argument on the
command-line unless the --exact option is specified. The FITSDIFF_SETTINGS
environment variable exists to make it easier to change the
behavior of fitsdiff on a global level, such as in a set of regression tests.
""".strip()
class HelpFormatter(optparse.TitledHelpFormatter):
def format_epilog(self, epilog):
return '\n{}\n'.format(fill(epilog, self.width))
def handle_options(argv=None):
# This is a callback--less trouble than actually adding a new action type
def store_list(option, opt, value, parser):
setattr(parser.values, option.dest, [])
# Accept either a comma-separated list or a filename (starting with @)
# containing a value on each line
if value and value[0] == '@':
value = value[1:]
if not os.path.exists(value):
log.warning('{} argument {} does not exist'.format(opt, value))
return
try:
values = [v.strip() for v in open(value, 'r').readlines()]
setattr(parser.values, option.dest, values)
except OSError as exc:
log.warning('reading {} for {} failed: {}; ignoring this '
'argument'.format(value, opt, exc))
del exc
else:
setattr(parser.values, option.dest,
[v.strip() for v in value.split(',')])
parser = optparse.OptionParser(usage=USAGE, epilog=EPILOG,
formatter=HelpFormatter())
parser.add_option(
'-q', '--quiet', action='store_true',
help='Produce no output and just return a status code.')
parser.add_option(
'-n', '--num-diffs', type='int', default=10, dest='numdiffs',
metavar='INTEGER',
help='Max number of data differences (image pixel or table element) '
'to report per extension (default %default).')
parser.add_option(
'-d', '--difference-tolerance', type='float', default=None,
dest='tolerance', metavar='NUMBER',
help='DEPRECATED. Alias for "--relative-tolerance". '
'Deprecated, provided for backward compatibility (default %default).')
parser.add_option(
'-r', '--rtol', '--relative-tolerance', type='float', default=None,
dest='rtol', metavar='NUMBER',
help='The relative tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %default).')
parser.add_option(
'-a', '--atol', '--absolute-tolerance', type='float', default=None,
dest='atol', metavar='NUMBER',
help='The absolute tolerance for comparison of two numbers, '
'specifically two floating point numbers. This applies to data '
'in both images and tables, and to floating point keyword values '
'in headers (default %default).')
parser.add_option(
'-b', '--no-ignore-blanks', action='store_false',
dest='ignore_blanks', default=True,
help="Don't ignore trailing blanks (whitespace) in string values. "
"Otherwise trailing blanks both in header keywords/values and in "
"table column values) are not treated as significant i.e., "
"without this option 'ABCDEF ' and 'ABCDEF' are considered "
"equivalent. ")
parser.add_option(
'--no-ignore-blank-cards', action='store_false',
dest='ignore_blank_cards', default=True,
help="Don't ignore entirely blank cards in headers. Normally fitsdiff "
"does not consider blank cards when comparing headers, but this "
"will ensure that even blank cards match up. ")
parser.add_option(
'--exact', action='store_true',
dest='exact_comparisons', default=False,
help="Report ALL differences, "
"overriding command-line options and FITSDIFF_SETTINGS. ")
parser.add_option(
'-o', '--output-file', metavar='FILE',
help='Output results to this file; otherwise results are printed to '
'stdout.')
parser.add_option(
'-u', '--ignore-hdus', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_hdus',
metavar='HDU_NAMES',
help='Comma-separated list of HDU names not to be compared. HDU '
'names may contain wildcard patterns.')
group = optparse.OptionGroup(parser, 'Header Comparison Options')
group.add_option(
'-k', '--ignore-keywords', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_keywords',
metavar='KEYWORDS',
help='Comma-separated list of keywords not to be compared. Keywords '
'may contain wildcard patterns. To exclude all keywords, use '
'"*"; make sure to have double or single quotes around the '
'asterisk on the command-line.')
group.add_option(
'-c', '--ignore-comments', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_comments',
metavar='KEYWORDS',
help='Comma-separated list of keywords whose comments will not be '
'compared. Wildcards may be used as with --ignore-keywords.')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Table Comparison Options')
group.add_option(
'-f', '--ignore-fields', action='callback', callback=store_list,
nargs=1, type='str', default=[], dest='ignore_fields',
metavar='COLUMNS',
help='Comma-separated list of fields (i.e. columns) not to be '
'compared. All columns may be excluded using "*" as with '
'--ignore-keywords.')
parser.add_option_group(group)
options, args = parser.parse_args(argv)
# Determine which filenames to compare
if len(args) != 2:
parser.error('\n' + textwrap.fill(
'fitsdiff requires two arguments; see `fitsdiff --help` for more '
'details.', parser.formatter.width))
return options, args
def setup_logging(outfile=None):
log.setLevel(logging.INFO)
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
error_handler.setLevel(logging.WARNING)
log.addHandler(error_handler)
if outfile is not None:
output_handler = logging.FileHandler(outfile)
else:
output_handler = logging.StreamHandler()
class LevelFilter(logging.Filter):
"""Log only messages matching the specified level."""
def __init__(self, name='', level=logging.NOTSET):
logging.Filter.__init__(self, name)
self.level = level
def filter(self, rec):
return rec.levelno == self.level
# File output logs all messages, but stdout logs only INFO messages
# (since errors are already logged to stderr)
output_handler.addFilter(LevelFilter(level=logging.INFO))
output_handler.setFormatter(logging.Formatter('%(message)s'))
log.addHandler(output_handler)
def match_files(paths):
if os.path.isfile(paths[0]) and os.path.isfile(paths[1]):
# shortcut if both paths are files
return [paths]
dirnames = [None, None]
filelists = [None, None]
for i, path in enumerate(paths):
if glob.has_magic(path):
files = [os.path.split(f) for f in glob.glob(path)]
if not files:
log.error('Wildcard pattern %r did not match any files.', path)
sys.exit(2)
dirs, files = list(zip(*files))
if len(set(dirs)) > 1:
log.error('Wildcard pattern %r should match only one '
'directory.', path)
sys.exit(2)
dirnames[i] = set(dirs).pop()
filelists[i] = sorted(files)
elif os.path.isdir(path):
dirnames[i] = path
filelists[i] = sorted(os.listdir(path))
elif os.path.isfile(path):
dirnames[i] = os.path.dirname(path)
filelists[i] = [os.path.basename(path)]
else:
log.error(
'%r is not an existing file, directory, or wildcard '
'pattern; see `fitsdiff --help` for more usage help.', path)
sys.exit(2)
dirnames[i] = os.path.abspath(dirnames[i])
filematch = set(filelists[0]) & set(filelists[1])
for a, b in [(0, 1), (1, 0)]:
if len(filelists[a]) > len(filematch) and not os.path.isdir(paths[a]):
for extra in sorted(set(filelists[a]) - filematch):
log.warning('%r has no match in %r', extra, dirnames[b])
return [(os.path.join(dirnames[0], f),
os.path.join(dirnames[1], f)) for f in filematch]
def main(args=None):
args = args or sys.argv[1:]
if 'FITSDIFF_SETTINGS' in os.environ:
args = os.environ['FITSDIFF_SETTINGS'].split() + args
opts, args = handle_options(args)
if opts.tolerance is not None:
warnings.warn(
'"-d" ("--difference-tolerance") was deprecated in version 2.0 '
'and will be removed in a future version. '
'Use "-r" ("--relative-tolerance") instead.',
AstropyDeprecationWarning)
opts.rtol = opts.tolerance
if opts.rtol is None:
opts.rtol = 0.0
if opts.atol is None:
opts.atol = 0.0
if opts.exact_comparisons:
# override the options so that each is the most restrictive
opts.ignore_keywords = []
opts.ignore_comments = []
opts.ignore_fields = []
opts.rtol = 0.0
opts.atol = 0.0
opts.ignore_blanks = False
opts.ignore_blank_cards = False
if not opts.quiet:
setup_logging(opts.output_file)
files = match_files(args)
close_file = False
if opts.quiet:
out_file = None
elif opts.output_file:
out_file = open(opts.output_file, 'w')
close_file = True
else:
out_file = sys.stdout
identical = []
try:
for a, b in files:
# TODO: pass in any additional arguments here too
diff = fits.diff.FITSDiff(
a, b,
ignore_hdus=opts.ignore_hdus,
ignore_keywords=opts.ignore_keywords,
ignore_comments=opts.ignore_comments,
ignore_fields=opts.ignore_fields,
numdiffs=opts.numdiffs,
rtol=opts.rtol,
atol=opts.atol,
ignore_blanks=opts.ignore_blanks,
ignore_blank_cards=opts.ignore_blank_cards)
diff.report(fileobj=out_file)
identical.append(diff.identical)
return int(not all(identical))
finally:
if close_file:
out_file.close()
# Close the file if used for the logging output, and remove handlers to
# avoid having them multiple times for unit tests.
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.close()
log.removeHandler(handler)
|
437391e7335dea297de9e3a08159f8bb560330d9b3d2d34f08a4d90652a7d94b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import contextlib
import csv
import operator
import os
import re
import sys
import textwrap
import warnings
from contextlib import suppress
import numpy as np
from numpy import char as chararray
from .base import DELAYED, _ValidHDU, ExtensionHDU
# This module may have many dependencies on astropy.io.fits.column, but
# astropy.io.fits.column has fewer dependencies overall, so it's easier to
# keep table/column-related utilities in astropy.io.fits.column
from ..column import (FITS2NUMPY, KEYWORD_NAMES, KEYWORD_TO_ATTRIBUTE,
ATTRIBUTE_TO_KEYWORD, TDEF_RE, Column, ColDefs,
_AsciiColDefs, _FormatP, _FormatQ, _makep,
_parse_tformat, _scalar_to_format, _convert_format,
_cmp_recformats)
from ..fitsrec import FITS_rec, _get_recarray_field, _has_unicode_fields
from ..header import Header, _pad_length
from ..util import _is_int, _str_to_num
from ....utils import lazyproperty
from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from ....utils.decorators import deprecated_renamed_argument
class FITSTableDumpDialect(csv.excel):
"""
A CSV dialect for the Astropy format of ASCII dumps of FITS tables.
"""
delimiter = ' '
lineterminator = '\n'
quotechar = '"'
quoting = csv.QUOTE_ALL
skipinitialspace = True
class _TableLikeHDU(_ValidHDU):
"""
A class for HDUs that have table-like data. This is used for both
Binary/ASCII tables as well as Random Access Group HDUs (which are
otherwise too dissimilar for tables to use _TableBaseHDU directly).
"""
_data_type = FITS_rec
_columns_type = ColDefs
# TODO: Temporary flag representing whether uints are enabled; remove this
# after restructuring to support uints by default on a per-column basis
_uint = False
@classmethod
def match_header(cls, header):
"""
This is an abstract HDU type for HDUs that contain table-like data.
This is even more abstract than _TableBaseHDU which is specifically for
the standard ASCII and Binary Table types.
"""
raise NotImplementedError
@classmethod
def from_columns(cls, columns, header=None, nrows=0, fill=False,
character_as_bytes=False, **kwargs):
"""
Given either a `ColDefs` object, a sequence of `Column` objects,
or another table HDU or table data (a `FITS_rec` or multi-field
`numpy.ndarray` or `numpy.recarray` object, return a new table HDU of
the class this method was called on using the column definition from
the input.
See also `FITS_rec.from_columns`.
Parameters
----------
columns : sequence of `Column`, `ColDefs`, or other
The columns from which to create the table data, or an object with
a column-like structure from which a `ColDefs` can be instantiated.
This includes an existing `BinTableHDU` or `TableHDU`, or a
`numpy.recarray` to give some examples.
If these columns have data arrays attached that data may be used in
initializing the new table. Otherwise the input columns will be
used as a template for a new table with the requested number of
rows.
header : `Header`
An optional `Header` object to instantiate the new HDU yet. Header
keywords specifically related to defining the table structure (such
as the "TXXXn" keywords like TTYPEn) will be overridden by the
supplied column definitions, but all other informational and data
model-specific keywords are kept.
nrows : int
Number of rows in the new table. If the input columns have data
associated with them, the size of the largest input column is used.
Otherwise the default is 0.
fill : bool
If `True`, will fill all cells with zeros or blanks. If `False`,
copy the data from input, undefined cells will still be filled with
zeros/blanks.
character_as_bytes : bool
Whether to return bytes for string columns when accessed from the
HDU. By default this is `False` and (unicode) strings are returned,
but for large tables this may use up a lot of memory.
Notes
-----
Any additional keyword arguments accepted by the HDU class's
``__init__`` may also be passed in as keyword arguments.
"""
coldefs = cls._columns_type(columns)
data = FITS_rec.from_columns(coldefs, nrows=nrows, fill=fill,
character_as_bytes=character_as_bytes)
hdu = cls(data=data, header=header, character_as_bytes=character_as_bytes, **kwargs)
coldefs._add_listener(hdu)
return hdu
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
# The base class doesn't make any assumptions about where the column
# definitions come from, so just return an empty ColDefs
return ColDefs([])
@property
def _nrows(self):
"""
Table-like HDUs must provide an attribute that specifies the number of
rows in the HDU's table.
For now this is an internal-only attribute.
"""
raise NotImplementedError
def _get_tbdata(self):
"""Get the table data from an input HDU object."""
columns = self.columns
# TODO: Details related to variable length arrays need to be dealt with
# specifically in the BinTableHDU class, since they're a detail
# specific to FITS binary tables
if (any(type(r) in (_FormatP, _FormatQ)
for r in columns._recformats) and
self._data_size is not None and
self._data_size > self._theap):
# We have a heap; include it in the raw_data
raw_data = self._get_raw_data(self._data_size, np.uint8,
self._data_offset)
data = raw_data[:self._theap].view(dtype=columns.dtype,
type=np.rec.recarray)
else:
raw_data = self._get_raw_data(self._nrows, columns.dtype,
self._data_offset)
if raw_data is None:
# This can happen when a brand new table HDU is being created
# and no data has been assigned to the columns, which case just
# return an empty array
raw_data = np.array([], dtype=columns.dtype)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
data = data.view(self._data_type)
columns._add_listener(data)
return data
def _init_tbdata(self, data):
columns = self.columns
data.dtype = data.dtype.newbyteorder('>')
# hack to enable pseudo-uint support
data._uint = self._uint
# pass datLoc, for P format
data._heapoffset = self._theap
data._heapsize = self._header['PCOUNT']
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
data._gap = self._theap - tbsize
# pass the attributes
for idx, col in enumerate(columns):
# get the data for each column object from the rec.recarray
col.array = data.field(idx)
# delete the _arrays attribute so that it is recreated to point to the
# new data placed in the column object above
del columns._arrays
def _update_column_added(self, columns, column):
"""
Update the data upon addition of a new column through the `ColDefs`
interface.
"""
# TODO: It's not clear that this actually works--it probably does not.
# This is what the code used to do before introduction of the
# notifier interface, but I don't believe it actually worked (there are
# several bug reports related to this...)
if self._data_loaded:
del self.data
def _update_column_removed(self, columns, col_idx):
"""
Update the data upon removal of a column through the `ColDefs`
interface.
"""
# For now this doesn't do anything fancy--it just deletes the data
# attribute so that it is forced to be recreated again. It doesn't
# change anything on the existing data recarray (this is also how this
# worked before introducing the notifier interface)
if self._data_loaded:
del self.data
class _TableBaseHDU(ExtensionHDU, _TableLikeHDU):
"""
FITS table extension base HDU class.
Parameters
----------
data : array
Data to be used.
header : `Header` instance
Header to be used. If the ``data`` is also specified, header keywords
specifically related to defining the table structure (such as the
"TXXXn" keywords like TTYPEn) will be overridden by the supplied column
definitions, but all other informational and data model-specific
keywords are kept.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_manages_own_heap = False
"""
This flag implies that when writing VLA tables (P/Q format) the heap
pointers that go into P/Q table columns should not be reordered or
rearranged in any way by the default heap management code.
This is included primarily as an optimization for compressed image HDUs
which perform their own heap maintenance.
"""
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
super().__init__(data=data, header=header, name=name, ver=ver)
if header is not None and not isinstance(header, Header):
raise ValueError('header must be a Header object.')
self._uint = uint
self._character_as_bytes = character_as_bytes
if data is DELAYED:
# this should never happen
if header is None:
raise ValueError('No header to setup HDU.')
# if the file is read the first time, no need to copy, and keep it
# unchanged
else:
self._header = header
else:
# construct a list of cards of minimal header
cards = [
('XTENSION', '', ''),
('BITPIX', 8, 'array data type'),
('NAXIS', 2, 'number of array dimensions'),
('NAXIS1', 0, 'length of dimension 1'),
('NAXIS2', 0, 'length of dimension 2'),
('PCOUNT', 0, 'number of group parameters'),
('GCOUNT', 1, 'number of groups'),
('TFIELDS', 0, 'number of table fields')]
if header is not None:
# Make a "copy" (not just a view) of the input header, since it
# may get modified. the data is still a "view" (for now)
hcopy = header.copy(strip=True)
cards.extend(hcopy.cards)
self._header = Header(cards)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# self._data_type is FITS_rec.
if isinstance(data, self._data_type):
self.data = data
else:
self.data = self._data_type.from_columns(data)
# TEMP: Special column keywords are normally overwritten by attributes
# from Column objects. In Astropy 3.0, several new keywords are now
# recognized as being special column keywords, but we don't
# automatically clear them yet, as we need to raise a deprecation
# warning for at least one major version.
if header is not None:
future_ignore = set()
for keyword in self._header.keys():
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
future_ignore.add(base_keyword)
if future_ignore:
keys = ', '.join(x + 'n' for x in sorted(future_ignore))
warnings.warn("The following keywords are now recognized as special "
"column-related attributes and should be set via the "
"Column objects: {0}. In future, these values will be "
"dropped from manually specified headers automatically "
"and replaced with values generated based on the "
"Column objects.".format(keys), AstropyDeprecationWarning)
# TODO: Too much of the code in this class uses header keywords
# in making calculations related to the data size. This is
# unreliable, however, in cases when users mess with the header
# unintentionally--code that does this should be cleaned up.
self._header['NAXIS1'] = self.data._raw_itemsize
self._header['NAXIS2'] = self.data.shape[0]
self._header['TFIELDS'] = len(self.data._coldefs)
self.columns = self.data._coldefs
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
if not (isinstance(self._header[0], str) and
self._header[0].rstrip() == self._extension):
self._header[0] = (self._extension, self._ext_comment)
# Ensure that the correct EXTNAME is set on the new header if one was
# created, or that it overrides the existing EXTNAME if different
if name:
self.name = name
if ver is not None:
self.ver = ver
@classmethod
def match_header(cls, header):
"""
This is an abstract type that implements the shared functionality of
the ASCII and Binary Table HDU types, which should be used instead of
this.
"""
raise NotImplementedError
@lazyproperty
def columns(self):
"""
The :class:`ColDefs` objects describing the columns in this table.
"""
if self._has_data and hasattr(self.data, '_coldefs'):
return self.data._coldefs
return self._columns_type(self)
@lazyproperty
def data(self):
data = self._get_tbdata()
data._coldefs = self.columns
data._character_as_bytes = self._character_as_bytes
# Columns should now just return a reference to the data._coldefs
del self.columns
return data
@data.setter
def data(self, data):
if 'data' in self.__dict__:
if self.__dict__['data'] is data:
return
else:
self._data_replaced = True
else:
self._data_replaced = True
self._modified = True
if data is None and self.columns:
# Create a new table with the same columns, but empty rows
formats = ','.join(self.columns._recformats)
data = np.rec.array(None, formats=formats,
names=self.columns.names,
shape=0)
if isinstance(data, np.ndarray) and data.dtype.fields is not None:
# Go ahead and always make a view, even if the data is already the
# correct class (self._data_type) so we can update things like the
# column defs, if necessary
data = data.view(self._data_type)
if not isinstance(data.columns, self._columns_type):
# This would be the place, if the input data was for an ASCII
# table and this is binary table, or vice versa, to convert the
# data to the appropriate format for the table type
new_columns = self._columns_type(data.columns)
data = FITS_rec.from_columns(new_columns)
self.__dict__['data'] = data
self.columns = self.data.columns
self.update()
with suppress(TypeError, AttributeError):
# Make the ndarrays in the Column objects of the ColDefs
# object of the HDU reference the same ndarray as the HDU's
# FITS_rec object.
for idx, col in enumerate(self.columns):
col.array = self.data.field(idx)
# Delete the _arrays attribute so that it is recreated to
# point to the new data placed in the column objects above
del self.columns._arrays
elif data is None:
pass
else:
raise TypeError('Table data has incorrect type.')
# returning the data signals to lazyproperty that we've already handled
# setting self.__dict__['data']
return data
@property
def _nrows(self):
if not self._data_loaded:
return self._header.get('NAXIS2', 0)
else:
return len(self.data)
@lazyproperty
def _theap(self):
size = self._header['NAXIS1'] * self._header['NAXIS2']
return self._header.get('THEAP', size)
# TODO: Need to either rename this to update_header, for symmetry with the
# Image HDUs, or just at some point deprecate it and remove it altogether,
# since header updates should occur automatically when necessary...
def update(self):
"""
Update header keywords to reflect recent changes of columns.
"""
self._header.set('NAXIS1', self.data._raw_itemsize, after='NAXIS')
self._header.set('NAXIS2', self.data.shape[0], after='NAXIS1')
self._header.set('TFIELDS', len(self.columns), after='GCOUNT')
self._clear_table_keywords()
self._populate_table_keywords()
def copy(self):
"""
Make a copy of the table HDU, both header and data are copied.
"""
# touch the data, so it's defined (in the case of reading from a
# FITS file)
return self.__class__(data=self.data.copy(),
header=self._header.copy())
def _prewriteto(self, checksum=False, inplace=False):
if self._has_data:
self.data._scale_back(
update_heap_pointers=not self._manages_own_heap)
# check TFIELDS and NAXIS2
self._header['TFIELDS'] = len(self.data._coldefs)
self._header['NAXIS2'] = self.data.shape[0]
# calculate PCOUNT, for variable length tables
tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
heapstart = self._header.get('THEAP', tbsize)
self.data._gap = heapstart - tbsize
pcount = self.data._heapsize + self.data._gap
if pcount > 0:
self._header['PCOUNT'] = pcount
# update the other T****n keywords
self._populate_table_keywords()
# update TFORM for variable length columns
for idx in range(self.data._nfields):
format = self.data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.data.field(idx).max
# May be either _FormatP or _FormatQ
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
return super()._prewriteto(checksum, inplace)
def _verify(self, option='warn'):
"""
_TableBaseHDU verify method.
"""
errs = super()._verify(option=option)
self.req_cards('NAXIS', None, lambda v: (v == 2), 2, option, errs)
self.req_cards('BITPIX', None, lambda v: (v == 8), 8, option, errs)
self.req_cards('TFIELDS', 7,
lambda v: (_is_int(v) and v >= 0 and v <= 999), 0,
option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TFORM' + str(idx + 1), None, None, None, option,
errs)
return errs
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
shape, format = (), ''
nrows = 0
else:
nrows = len(self.data)
ncols = len(self.columns)
format = self.columns.formats
# if data is not touched yet, use header info.
else:
shape = ()
nrows = self._header['NAXIS2']
ncols = self._header['TFIELDS']
format = ', '.join([self._header['TFORM' + str(j + 1)]
for j in range(ncols)])
format = '[{}]'.format(format)
dims = "{}R x {}C".format(nrows, ncols)
ncards = len(self._header)
return (self.name, self.ver, class_name, ncards, dims, format)
def _update_column_removed(self, columns, idx):
super()._update_column_removed(columns, idx)
# Fix the header to reflect the column removal
self._clear_table_keywords(index=idx)
def _update_column_attribute_changed(self, column, col_idx, attr,
old_value, new_value):
"""
Update the header when one of the column objects is updated.
"""
# base_keyword is the keyword without the index such as TDIM
# while keyword is like TDIM1
base_keyword = ATTRIBUTE_TO_KEYWORD[attr]
keyword = base_keyword + str(col_idx + 1)
if keyword in self._header:
if new_value is None:
# If the new value is None, i.e. None was assigned to the
# column attribute, then treat this as equivalent to deleting
# that attribute
del self._header[keyword]
else:
self._header[keyword] = new_value
else:
keyword_idx = KEYWORD_NAMES.index(base_keyword)
# Determine the appropriate keyword to insert this one before/after
# if it did not already exist in the header
for before_keyword in reversed(KEYWORD_NAMES[:keyword_idx]):
before_keyword += str(col_idx + 1)
if before_keyword in self._header:
self._header.insert(before_keyword, (keyword, new_value),
after=True)
break
else:
for after_keyword in KEYWORD_NAMES[keyword_idx + 1:]:
after_keyword += str(col_idx + 1)
if after_keyword in self._header:
self._header.insert(after_keyword,
(keyword, new_value))
break
else:
# Just append
self._header[keyword] = new_value
def _clear_table_keywords(self, index=None):
"""
Wipe out any existing table definition keywords from the header.
If specified, only clear keywords for the given table index (shifting
up keywords for any other columns). The index is zero-based.
Otherwise keywords for all columns.
"""
# First collect all the table structure related keyword in the header
# into a single list so we can then sort them by index, which will be
# useful later for updating the header in a sensible order (since the
# header *might* not already be written in a reasonable order)
table_keywords = []
for idx, keyword in enumerate(self._header.keys()):
match = TDEF_RE.match(keyword)
try:
base_keyword = match.group('label')
except Exception:
continue # skip if there is no match
if base_keyword in KEYWORD_TO_ATTRIBUTE:
# TEMP: For Astropy 3.0 we don't clear away the following keywords
# as we are first raising a deprecation warning that these will be
# dropped automatically if they were specified in the header. We
# can remove this once we are happy to break backward-compatibility
if base_keyword in {'TCTYP', 'TCUNI', 'TCRPX', 'TCRVL', 'TCDLT', 'TRPOS'}:
continue
num = int(match.group('num')) - 1 # convert to zero-base
table_keywords.append((idx, match.group(0), base_keyword,
num))
# First delete
rev_sorted_idx_0 = sorted(table_keywords, key=operator.itemgetter(0),
reverse=True)
for idx, keyword, _, num in rev_sorted_idx_0:
if index is None or index == num:
del self._header[idx]
# Now shift up remaining column keywords if only one column was cleared
if index is not None:
sorted_idx_3 = sorted(table_keywords, key=operator.itemgetter(3))
for _, keyword, base_keyword, num in sorted_idx_3:
if num <= index:
continue
old_card = self._header.cards[keyword]
new_card = (base_keyword + str(num), old_card.value,
old_card.comment)
self._header.insert(keyword, new_card)
del self._header[keyword]
# Also decrement TFIELDS
if 'TFIELDS' in self._header:
self._header['TFIELDS'] -= 1
def _populate_table_keywords(self):
"""Populate the new table definition keywords from the header."""
for idx, column in enumerate(self.columns):
for keyword, attr in KEYWORD_TO_ATTRIBUTE.items():
val = getattr(column, attr)
if val is not None:
keyword = keyword + str(idx + 1)
self._header[keyword] = val
class TableHDU(_TableBaseHDU):
"""
FITS ASCII table extension HDU class.
Parameters
----------
data : array or `FITS_rec`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'TABLE'
_ext_comment = 'ASCII table extension'
_padding_byte = ' '
_columns_type = _AsciiColDefs
__format_RE = re.compile(
r'(?P<code>[ADEFIJ])(?P<width>\d+)(?:\.(?P<prec>\d+))?')
def __init__(self, data=None, header=None, name=None, ver=None, character_as_bytes=False):
super().__init__(data, header, name=name, ver=ver, character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return card.keyword == 'XTENSION' and xtension == cls._extension
def _get_tbdata(self):
columns = self.columns
names = [n for idx, n in enumerate(columns.names)]
# determine if there are duplicate field names and if there
# are throw an exception
dup = np.rec.find_duplicate(names)
if dup:
raise ValueError("Duplicate field names: {}".format(dup))
# TODO: Determine if this extra logic is necessary--I feel like the
# _AsciiColDefs class should be responsible for telling the table what
# its dtype should be...
itemsize = columns.spans[-1] + columns.starts[-1] - 1
dtype = {}
for idx in range(len(columns)):
data_type = 'S' + str(columns.spans[idx])
if idx == len(columns) - 1:
# The last column is padded out to the value of NAXIS1
if self._header['NAXIS1'] > itemsize:
data_type = 'S' + str(columns.spans[idx] +
self._header['NAXIS1'] - itemsize)
dtype[columns.names[idx]] = (data_type, columns.starts[idx] - 1)
raw_data = self._get_raw_data(self._nrows, dtype, self._data_offset)
data = raw_data.view(np.rec.recarray)
self._init_tbdata(data)
return data.view(self._data_type)
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# We have the data to be used.
# We need to pad the data to a block length before calculating
# the datasum.
bytes_array = self.data.view(type=np.ndarray, dtype=np.ubyte)
padding = np.frombuffer(_pad_length(self.size) * b' ',
dtype=np.ubyte)
d = np.append(bytes_array, padding)
cs = self._compute_checksum(d)
return cs
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _verify(self, option='warn'):
"""
`TableHDU` verify method.
"""
errs = super()._verify(option=option)
self.req_cards('PCOUNT', None, lambda v: (v == 0), 0, option, errs)
tfields = self._header['TFIELDS']
for idx in range(tfields):
self.req_cards('TBCOL' + str(idx + 1), None, _is_int, None, option,
errs)
return errs
class BinTableHDU(_TableBaseHDU):
"""
Binary table HDU class.
Parameters
----------
data : array, `FITS_rec`, or `~astropy.table.Table`
Data to be used.
header : `Header`
Header to be used.
name : str
Name to be populated in ``EXTNAME`` keyword.
uint : bool, optional
Set to `True` if the table contains unsigned integer columns.
ver : int > 0 or None, optional
The ver of the HDU, will be the value of the keyword ``EXTVER``.
If not given or None, it defaults to the value of the ``EXTVER``
card of the ``header`` or 1.
(default: None)
character_as_bytes : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
"""
_extension = 'BINTABLE'
_ext_comment = 'binary table extension'
def __init__(self, data=None, header=None, name=None, uint=False, ver=None,
character_as_bytes=False):
from ....table import Table
if isinstance(data, Table):
from ..convenience import table_to_hdu
hdu = table_to_hdu(data)
if header is not None:
hdu.header.update(header)
data = hdu.data
header = hdu.header
super().__init__(data, header, name=name, uint=uint, ver=ver,
character_as_bytes=character_as_bytes)
@classmethod
def match_header(cls, header):
card = header.cards[0]
xtension = card.value
if isinstance(xtension, str):
xtension = xtension.rstrip()
return (card.keyword == 'XTENSION' and
xtension in (cls._extension, 'A3DTABLE'))
def _calculate_datasum_with_heap(self):
"""
Calculate the value for the ``DATASUM`` card given the input data
"""
with _binary_table_byte_swap(self.data) as data:
dout = data.view(type=np.ndarray, dtype=np.ubyte)
csum = self._compute_checksum(dout)
# Now add in the heap data to the checksum (we can skip any gap
# between the table and the heap since it's all zeros and doesn't
# contribute to the checksum
# TODO: The following code may no longer be necessary since it is
# now possible to get a pointer directly to the heap data as a
# whole. That said, it is possible for the heap section to contain
# data that is not actually pointed to by the table (i.e. garbage;
# this *shouldn't* happen but it is not disallowed either)--need to
# double check whether or not the checksum should include such
# garbage
for idx in range(data._nfields):
if isinstance(data.columns._recformats[idx], _FormatP):
for coldata in data.field(idx):
# coldata should already be byteswapped from the call
# to _binary_table_byte_swap
if not len(coldata):
continue
csum = self._compute_checksum(coldata, csum)
return csum
def _calculate_datasum(self):
"""
Calculate the value for the ``DATASUM`` card in the HDU.
"""
if self._has_data:
# This method calculates the datasum while incorporating any
# heap data, which is obviously not handled from the base
# _calculate_datasum
return self._calculate_datasum_with_heap()
else:
# This is the case where the data has not been read from the file
# yet. We can handle that in a generic manner so we do it in the
# base class. The other possibility is that there is no data at
# all. This can also be handled in a generic manner.
return super()._calculate_datasum()
def _writedata_internal(self, fileobj):
size = 0
if self.data is None:
return size
with _binary_table_byte_swap(self.data) as data:
if _has_unicode_fields(data):
# If the raw data was a user-supplied recarray, we can't write
# unicode columns directly to the file, so we have to switch
# to a slower row-by-row write
self._writedata_by_row(fileobj)
else:
fileobj.writearray(data)
# write out the heap of variable length array columns this has
# to be done after the "regular" data is written (above)
fileobj.write((data._gap * '\0').encode('ascii'))
nbytes = data._gap
if not self._manages_own_heap:
# Write the heap data one column at a time, in the order
# that the data pointers appear in the column (regardless
# if that data pointer has a different, previous heap
# offset listed)
for idx in range(data._nfields):
if not isinstance(data.columns._recformats[idx],
_FormatP):
continue
field = self.data.field(idx)
for row in field:
if len(row) > 0:
nbytes += row.nbytes
if not fileobj.simulateonly:
fileobj.writearray(row)
else:
heap_data = data._get_heap_data()
if len(heap_data) > 0:
nbytes += len(heap_data)
if not fileobj.simulateonly:
fileobj.writearray(heap_data)
data._heapsize = nbytes - data._gap
size += nbytes
size += self.data.size * self.data._raw_itemsize
return size
def _writedata_by_row(self, fileobj):
fields = [self.data.field(idx)
for idx in range(len(self.data.columns))]
# Creating Record objects is expensive (as in
# `for row in self.data:` so instead we just iterate over the row
# indices and get one field at a time:
for idx in range(len(self.data)):
for field in fields:
item = field[idx]
field_width = None
if field.dtype.kind == 'U':
# Read the field *width* by reading past the field kind.
i = field.dtype.str.index(field.dtype.kind)
field_width = int(field.dtype.str[i+1:])
item = np.char.encode(item, 'ascii')
fileobj.writearray(item)
if field_width is not None:
j = item.dtype.str.index(item.dtype.kind)
item_length = int(item.dtype.str[j+1:])
# Fix padding problem (see #5296).
padding = '\x00'*(field_width - item_length)
fileobj.write(padding.encode('ascii'))
_tdump_file_format = textwrap.dedent("""
- **datafile:** Each line of the data file represents one row of table
data. The data is output one column at a time in column order. If
a column contains an array, each element of the column array in the
current row is output before moving on to the next column. Each row
ends with a new line.
Integer data is output right-justified in a 21-character field
followed by a blank. Floating point data is output right justified
using 'g' format in a 21-character field with 15 digits of
precision, followed by a blank. String data that does not contain
whitespace is output left-justified in a field whose width matches
the width specified in the ``TFORM`` header parameter for the
column, followed by a blank. When the string data contains
whitespace characters, the string is enclosed in quotation marks
(``""``). For the last data element in a row, the trailing blank in
the field is replaced by a new line character.
For column data containing variable length arrays ('P' format), the
array data is preceded by the string ``'VLA_Length= '`` and the
integer length of the array for that row, left-justified in a
21-character field, followed by a blank.
.. note::
This format does *not* support variable length arrays using the
('Q' format) due to difficult to overcome ambiguities. What this
means is that this file format cannot support VLA columns in
tables stored in files that are over 2 GB in size.
For column data representing a bit field ('X' format), each bit
value in the field is output right-justified in a 21-character field
as 1 (for true) or 0 (for false).
- **cdfile:** Each line of the column definitions file provides the
definitions for one column in the table. The line is broken up into
8, sixteen-character fields. The first field provides the column
name (``TTYPEn``). The second field provides the column format
(``TFORMn``). The third field provides the display format
(``TDISPn``). The fourth field provides the physical units
(``TUNITn``). The fifth field provides the dimensions for a
multidimensional array (``TDIMn``). The sixth field provides the
value that signifies an undefined value (``TNULLn``). The seventh
field provides the scale factor (``TSCALn``). The eighth field
provides the offset value (``TZEROn``). A field value of ``""`` is
used to represent the case where no value is provided.
- **hfile:** Each line of the header parameters file provides the
definition of a single HDU header card as represented by the card
image.
""")
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def dump(self, datafile=None, cdfile=None, hfile=None, overwrite=False):
"""
Dump the table HDU to a file in ASCII format. The table may be dumped
in three separate files, one containing column definitions, one
containing header parameters, and one for table data.
Parameters
----------
datafile : file path, file object or file-like object, optional
Output data file. The default is the root name of the
fits file associated with this HDU appended with the
extension ``.txt``.
cdfile : file path, file object or file-like object, optional
Output column definitions file. The default is `None`, no
column definitions output is produced.
hfile : file path, file object or file-like object, optional
Output header parameters file. The default is `None`,
no header parameters output is produced.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
Notes
-----
The primary use for the `dump` method is to allow viewing and editing
the table data and parameters in a standard text editor.
The `load` method can be used to create a new table from the three
plain text (ASCII) files.
"""
# check if the output files already exist
exist = []
files = [datafile, cdfile, hfile]
for f in files:
if isinstance(f, str):
if os.path.exists(f) and os.path.getsize(f) != 0:
if overwrite:
warnings.warn(
"Overwriting existing file '{}'.".format(f),
AstropyUserWarning)
os.remove(f)
else:
exist.append(f)
if exist:
raise OSError(' '.join(["File '{}' already exists.".format(f)
for f in exist]))
# Process the data
self._dump_data(datafile)
# Process the column definitions
if cdfile:
self._dump_coldefs(cdfile)
# Process the header parameters
if hfile:
self._header.tofile(hfile, sep='\n', endcard=False, padding=False)
if isinstance(dump.__doc__, str):
dump.__doc__ += _tdump_file_format.replace('\n', '\n ')
def load(cls, datafile, cdfile=None, hfile=None, replace=False,
header=None):
"""
Create a table from the input ASCII files. The input is from up to
three separate files, one containing column definitions, one containing
header parameters, and one containing column data.
The column definition and header parameters files are not required.
When absent the column definitions and/or header parameters are taken
from the header object given in the header argument; otherwise sensible
defaults are inferred (though this mode is not recommended).
Parameters
----------
datafile : file path, file object or file-like object
Input data file containing the table data in ASCII format.
cdfile : file path, file object, file-like object, optional
Input column definition file containing the names,
formats, display formats, physical units, multidimensional
array dimensions, undefined values, scale factors, and
offsets associated with the columns in the table. If
`None`, the column definitions are taken from the current
values in this object.
hfile : file path, file object, file-like object, optional
Input parameter definition file containing the header
parameter definitions to be associated with the table. If
`None`, the header parameter definitions are taken from
the current values in this objects header.
replace : bool
When `True`, indicates that the entire header should be
replaced with the contents of the ASCII file instead of
just updating the current header.
header : Header object
When the cdfile and hfile are missing, use this Header object in
the creation of the new table and HDU. Otherwise this Header
supercedes the keywords from hfile, which is only used to update
values not present in this Header, unless ``replace=True`` in which
this Header's values are completely replaced with the values from
hfile.
Notes
-----
The primary use for the `load` method is to allow the input of ASCII
data that was edited in a standard text editor of the table data and
parameters. The `dump` method can be used to create the initial ASCII
files.
"""
# Process the parameter file
if header is None:
header = Header()
if hfile:
if replace:
header = Header.fromtextfile(hfile)
else:
header.extend(Header.fromtextfile(hfile), update=True,
update_first=True)
coldefs = None
# Process the column definitions file
if cdfile:
coldefs = cls._load_coldefs(cdfile)
# Process the data file
data = cls._load_data(datafile, coldefs)
if coldefs is None:
coldefs = ColDefs(data)
# Create a new HDU using the supplied header and data
hdu = cls(data=data, header=header)
hdu.columns = coldefs
return hdu
if isinstance(load.__doc__, str):
load.__doc__ += _tdump_file_format.replace('\n', '\n ')
load = classmethod(load)
# Have to create a classmethod from this here instead of as a decorator;
# otherwise we can't update __doc__
def _dump_data(self, fileobj):
"""
Write the table data in the ASCII format read by BinTableHDU.load()
to fileobj.
"""
if not fileobj and self._file:
root = os.path.splitext(self._file.name)[0]
fileobj = root + '.txt'
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
linewriter = csv.writer(fileobj, dialect=FITSTableDumpDialect)
# Process each row of the table and output one row at a time
def format_value(val, format):
if format[0] == 'S':
itemsize = int(format[1:])
return '{:{size}}'.format(val, size=itemsize)
elif format in np.typecodes['AllInteger']:
# output integer
return '{:21d}'.format(val)
elif format in np.typecodes['Complex']:
return '{:21.15g}+{:.15g}j'.format(val.real, val.imag)
elif format in np.typecodes['Float']:
# output floating point
return '{:#21.15g}'.format(val)
for row in self.data:
line = [] # the line for this row of the table
# Process each column of the row.
for column in self.columns:
# format of data in a variable length array
# where None means it is not a VLA:
vla_format = None
format = _convert_format(column.format)
if isinstance(format, _FormatP):
# P format means this is a variable length array so output
# the length of the array for this row and set the format
# for the VLA data
line.append('VLA_Length=')
line.append('{:21d}'.format(len(row[column.name])))
_, dtype, option = _parse_tformat(column.format)
vla_format = FITS2NUMPY[option[0]][0]
if vla_format:
# Output the data for each element in the array
for val in row[column.name].flat:
line.append(format_value(val, vla_format))
else:
# The column data is a single element
dtype = self.data.dtype.fields[column.name][0]
array_format = dtype.char
if array_format == 'V':
array_format = dtype.base.char
if array_format == 'S':
array_format += str(dtype.itemsize)
if dtype.char == 'V':
for value in row[column.name].flat:
line.append(format_value(value, array_format))
else:
line.append(format_value(row[column.name],
array_format))
linewriter.writerow(line)
if close_file:
fileobj.close()
def _dump_coldefs(self, fileobj):
"""
Write the column definition parameters in the ASCII format read by
BinTableHDU.load() to fileobj.
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'w')
close_file = True
# Process each column of the table and output the result to the
# file one at a time
for column in self.columns:
line = [column.name, column.format]
attrs = ['disp', 'unit', 'dim', 'null', 'bscale', 'bzero']
line += ['{:16s}'.format(value if value else '""')
for value in (getattr(column, attr) for attr in attrs)]
fileobj.write(' '.join(line))
fileobj.write('\n')
if close_file:
fileobj.close()
@classmethod
def _load_data(cls, fileobj, coldefs=None):
"""
Read the table data from the ASCII file output by BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
initialpos = fileobj.tell() # We'll be returning here later
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
# First we need to do some preprocessing on the file to find out how
# much memory we'll need to reserve for the table. This is necessary
# even if we already have the coldefs in order to determine how many
# rows to reserve memory for
vla_lengths = []
recformats = []
names = []
nrows = 0
if coldefs is not None:
recformats = coldefs._recformats
names = coldefs.names
def update_recformats(value, idx):
fitsformat = _scalar_to_format(value)
recformat = _convert_format(fitsformat)
if idx >= len(recformats):
recformats.append(recformat)
else:
if _cmp_recformats(recformats[idx], recformat) < 0:
recformats[idx] = recformat
# TODO: The handling of VLAs could probably be simplified a bit
for row in linereader:
nrows += 1
if coldefs is not None:
continue
col = 0
idx = 0
while idx < len(row):
if row[idx] == 'VLA_Length=':
if col < len(vla_lengths):
vla_length = vla_lengths[col]
else:
vla_length = int(row[idx + 1])
vla_lengths.append(vla_length)
idx += 2
while vla_length:
update_recformats(row[idx], col)
vla_length -= 1
idx += 1
col += 1
else:
if col >= len(vla_lengths):
vla_lengths.append(None)
update_recformats(row[idx], col)
col += 1
idx += 1
# Update the recformats for any VLAs
for idx, length in enumerate(vla_lengths):
if length is not None:
recformats[idx] = str(length) + recformats[idx]
dtype = np.rec.format_parser(recformats, names, None).dtype
# TODO: In the future maybe enable loading a bit at a time so that we
# can convert from this format to an actual FITS file on disk without
# needing enough physical memory to hold the entire thing at once
hdu = BinTableHDU.from_columns(np.recarray(shape=1, dtype=dtype),
nrows=nrows, fill=True)
# TODO: It seems to me a lot of this could/should be handled from
# within the FITS_rec class rather than here.
data = hdu.data
for idx, length in enumerate(vla_lengths):
if length is not None:
arr = data.columns._arrays[idx]
dt = recformats[idx][len(str(length)):]
# NOTE: FormatQ not supported here; it's hard to determine
# whether or not it will be necessary to use a wider descriptor
# type. The function documentation will have to serve as a
# warning that this is not supported.
recformats[idx] = _FormatP(dt, max=length)
data.columns._recformats[idx] = recformats[idx]
name = data.columns.names[idx]
data._cache_field(name, _makep(arr, arr, recformats[idx]))
def format_value(col, val):
# Special formatting for a couple particular data types
if recformats[col] == FITS2NUMPY['L']:
return bool(int(val))
elif recformats[col] == FITS2NUMPY['M']:
# For some reason, in arrays/fields where numpy expects a
# complex it's not happy to take a string representation
# (though it's happy to do that in other contexts), so we have
# to convert the string representation for it:
return complex(val)
else:
return val
# Jump back to the start of the data and create a new line reader
fileobj.seek(initialpos)
linereader = csv.reader(fileobj, dialect=FITSTableDumpDialect)
for row, line in enumerate(linereader):
col = 0
idx = 0
while idx < len(line):
if line[idx] == 'VLA_Length=':
vla_len = vla_lengths[col]
idx += 2
slice_ = slice(idx, idx + vla_len)
data[row][col][:] = line[idx:idx + vla_len]
idx += vla_len
elif dtype[col].shape:
# This is an array column
array_size = int(np.multiply.reduce(dtype[col].shape))
slice_ = slice(idx, idx + array_size)
idx += array_size
else:
slice_ = None
if slice_ is None:
# This is a scalar row element
data[row][col] = format_value(col, line[idx])
idx += 1
else:
data[row][col].flat[:] = [format_value(col, val)
for val in line[slice_]]
col += 1
if close_file:
fileobj.close()
return data
@classmethod
def _load_coldefs(cls, fileobj):
"""
Read the table column definitions from the ASCII file output by
BinTableHDU.dump().
"""
close_file = False
if isinstance(fileobj, str):
fileobj = open(fileobj, 'r')
close_file = True
columns = []
for line in fileobj:
words = line[:-1].split()
kwargs = {}
for key in ['name', 'format', 'disp', 'unit', 'dim']:
kwargs[key] = words.pop(0).replace('""', '')
for key in ['null', 'bscale', 'bzero']:
word = words.pop(0).replace('""', '')
if word:
word = _str_to_num(word)
kwargs[key] = word
columns.append(Column(**kwargs))
if close_file:
fileobj.close()
return ColDefs(columns)
@contextlib.contextmanager
def _binary_table_byte_swap(data):
"""
Ensures that all the data of a binary FITS table (represented as a FITS_rec
object) is in a big-endian byte order. Columns are swapped in-place one
at a time, and then returned to their previous byte order when this context
manager exits.
Because a new dtype is needed to represent the byte-swapped columns, the
new dtype is temporarily applied as well.
"""
orig_dtype = data.dtype
names = []
formats = []
offsets = []
to_swap = []
if sys.byteorder == 'little':
swap_types = ('<', '=')
else:
swap_types = ('<',)
for idx, name in enumerate(orig_dtype.names):
field = _get_recarray_field(data, idx)
field_dtype, field_offset = orig_dtype.fields[name]
names.append(name)
formats.append(field_dtype)
offsets.append(field_offset)
if isinstance(field, chararray.chararray):
continue
# only swap unswapped
# must use field_dtype.base here since for multi-element dtypes,
# the .str with be '|V<N>' where <N> is the total bytes per element
if field.itemsize > 1 and field_dtype.base.str[0] in swap_types:
to_swap.append(field)
# Override the dtype for this field in the new record dtype with
# the byteswapped version
formats[-1] = field_dtype.newbyteorder()
# deal with var length table
recformat = data.columns._recformats[idx]
if isinstance(recformat, _FormatP):
coldata = data.field(idx)
for c in coldata:
if (not isinstance(c, chararray.chararray) and
c.itemsize > 1 and c.dtype.str[0] in swap_types):
to_swap.append(c)
for arr in reversed(to_swap):
arr.byteswap(True)
data.dtype = np.dtype({'names': names,
'formats': formats,
'offsets': offsets})
yield data
for arr in to_swap:
arr.byteswap(True)
data.dtype = orig_dtype
|
1e675c6bd2b3d756fefc209ab2df1fcaac0eb8188cfdc90e63597bc6f55d390b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import bz2
import gzip
import itertools
import os
import shutil
import sys
import warnings
import numpy as np
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
from ..file import _File
from ..header import _pad_length
from ..util import (_is_int, _tmp_name, fileobj_closed, ignore_sigint,
_get_array_mmap, _free_space_check)
from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning
from ....utils import indent
from ....utils.exceptions import AstropyUserWarning
from ....utils.decorators import deprecated_renamed_argument
def fitsopen(name, mode='readonly', memmap=None, save_backup=False,
cache=True, lazy_load_hdus=None, **kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : file path, file object, file-like object or pathlib.Path object
File to be opened.
mode : str, optional
Open mode, 'readonly', 'update', 'append', 'denywrite', or
'ostream'. Default is 'readonly'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? This value is obtained from the
configuration item ``astropy.io.fits.Conf.use_memmap``.
Default is `True`.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that
a backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
Default is `False`.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache. Default is `True`.
lazy_load_hdus : bool, optional
To avoid reading all the HDUs and headers in a FITS file immediately
upon opening. This is an optimization especially useful for large
files, as FITS has no way of determining the number and offsets of all
the HDUs in a file without scanning through the file and reading all
the headers. Default is `True`.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the central value and
``BSCALE == 1`` as unsigned integer data. For example, ``int16`` data
with ``BZERO = 32768`` and ``BSCALE = 1`` would be treated as
``uint16`` data. Default is `True` so that the pseudo-unsigned
integer convention is assumed.
ignore_missing_end : bool, optional
Do not issue an exception when opening a file that is missing an
``END`` card in the last header. Default is `False`.
checksum : bool, str, optional
If `True`, verifies that both ``DATASUM`` and ``CHECKSUM`` card values
(when present in the HDU header) match the header and data of all HDU's
in the file. Updates to a file that already has a checksum will
preserve and update the existing checksums unless this argument is
given a value of 'remove', in which case the CHECKSUM and DATASUM
values are not checked, and are removed when saving changes to the
file. Default is `False`.
disable_image_compression : bool, optional
If `True`, treats compressed image HDU's like normal binary table
HDU's. Default is `False`.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. Default is `False`.
character_as_bytes : bool, optional
Whether to return bytes for string columns, otherwise unicode strings
are returned, but this does not respect memory mapping and loads the
whole column in memory when accessed. Default is `False`.
ignore_blank : bool, optional
If `True`, the BLANK keyword is ignored if present.
Default is `False`.
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled image
data, restore the data to the original type and reapply the original
BSCALE/BZERO values. This could lead to loss of accuracy if scaling
back to integer values after performing floating point operations on
the data. Default is `False`.
Returns
-------
hdulist : an `HDUList` object
`HDUList` containing all of the header data units in the file.
"""
from .. import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if 'uint' not in kwargs:
kwargs['uint'] = conf.enable_uint
if not name:
raise ValueError('Empty filename: {!r}'.format(name))
return HDUList.fromfile(name, mode, memmap, save_backup, cache,
lazy_load_hdus, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : sequence of HDU objects or single HDU, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file object, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
self._save_backup = False
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == 'ostream'
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError("Element {} in the HDUList input is "
"not an HDU.".format(idx))
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initalizing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(super().__getitem__,
self._positive_index_of(key))
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def __contains__(self, item):
"""
Returns `True` if ``HDUList.index_of(item)`` succeeds.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except KeyError:
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(item))
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError('Extension {} is out of bound or not found.'
.format(key))
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, cache=True, lazy_load_hdus=True,
**kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, cache=cache,
lazy_load_hdus=lazy_load_hdus, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer, memoryview, etc.
A string or other memory buffer containing an entire FITS file. It
should be noted that if that memory is read-only (such as a Python
string) the returned :class:`HDUList`'s data portions will also be
read-only.
kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
''.format(data))
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info['file']
fm = info['filemode']
break
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
""" Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : HDU object
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super(HDUList, self).pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : HDU object
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it.")
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError('A GroupsHDU must be inserted as a '
'Primary HDU.')
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : HDU object
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('HDUList can only append an HDU.')
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError(
"Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str or tuple of (string, int)
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
'{} indices must be integers, extension names as strings, '
'or (extname, version) tuples; got {}'
''.format(self.__class__.__name__, _key))
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and
(_ver is None or _ver == hdu.ver)):
found = idx
break
if (found is None):
raise KeyError('Extension {!r} not found.'.format(key))
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(
'Extension {} is out of bound or not found.'.format(index))
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify='fix', verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '{}' mode is not supported."
.format(self._file.mode), AstropyUserWarning)
return
if self._save_backup and self._file.mode in ('append', 'update'):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + '.bak'
idx = 1
while os.path.exists(backup):
backup = filename + '.bak.' + str(idx)
idx += 1
warnings.warn('Saving a backup of {} to {}.'.format(
filename, backup), AstropyUserWarning)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError('Failed to save backup to destination {}: '
'{}'.format(filename, exc))
self.verify(option=output_verify)
if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
extver = str(hdu._header['extver'])
except KeyError:
extver = ''
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print('append HDU', hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if 'EXTEND' in hdr:
if not hdr['EXTEND'] and get_first_ext() is not None:
hdr['EXTEND'] = True
elif get_first_ext() is not None:
if hdr['NAXIS'] == 0:
hdr.set('EXTEND', True, after='NAXIS')
else:
n = hdr['NAXIS']
hdr.set('EXTEND', True, after='NAXIS' + str(n))
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, fileobj, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : file path, file object or file-like object
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
# writeto is only for writing a new file from scratch, so the most
# sensible mode to require is 'ostream'. This can accept an open
# file object that's open to write only, or in append/update modes
# but only if the file doesn't exist.
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except AttributeError:
dirname = None
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (self._file and self._file.mode in ('append', 'update')
and not self._file.closed):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = ['Filename: {}'.format(name),
'No. Name Ver Type Cards Dimensions Format']
format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}'
default = ('', '', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : a string containing the file name associated with the
HDUList object if an association exists. Otherwise returns
None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None,
memmap=None, save_backup=False, cache=True,
lazy_load_hdus=True, **kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
hdulist._save_backup = save_backup
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ('readonly', 'denywrite'):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError('Empty or corrupt FITS file')
if not lazy_load_hdus:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size:]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #{} (note: Astropy '
'uses zero-based indexing).\n{}\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.'.format(
len(self), indent(str(exc))), VerifyWarning)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option='warn'):
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = ("HDUList's element {} is not an "
"extension HDU.".format(str(idx)))
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
with self.fromfile(new_file, mode='append') as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode='rb+')
else:
old_file = old_name
ffo = _File(old_file, mode='update', memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith('win'):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print('One or more header is resized.')
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print('One or more data area is resized.')
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
462ab7c685e62a2f9602ef9381f2859f1bb5591b633b72104f09851a4ee0d30e | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import glob
import io
import os
import platform
import sys
import copy
import pytest
import numpy as np
from ..verify import VerifyError
from ....io import fits
from ....tests.helper import raises, catch_warnings, ignore_warnings
from ....utils.exceptions import AstropyUserWarning, AstropyDeprecationWarning
from . import FitsTestCase
class TestHDUListFunctions(FitsTestCase):
def test_update_name(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'))
hdul[4].name = 'Jim'
hdul[4].ver = 9
assert hdul[('JIM', 9)].header['extname'] == 'JIM'
def test_hdu_file_bytes(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul[0].filebytes()
assert res == 11520
res = hdul[1].filebytes()
assert res == 8640
def test_hdulist_file_info(self):
hdul = fits.open(self.data('checksum.fits'))
res = hdul.fileinfo(0)
def test_fileinfo(**kwargs):
assert res['datSpan'] == kwargs.get('datSpan', 2880)
assert res['resized'] == kwargs.get('resized', False)
assert res['filename'] == self.data('checksum.fits')
assert res['datLoc'] == kwargs.get('datLoc', 8640)
assert res['hdrLoc'] == kwargs.get('hdrLoc', 0)
assert res['filemode'] == 'readonly'
res = hdul.fileinfo(1)
test_fileinfo(datLoc=17280, hdrLoc=11520)
hdu = fits.ImageHDU(data=hdul[0].data)
hdul.insert(1, hdu)
res = hdul.fileinfo(0)
test_fileinfo(resized=True)
res = hdul.fileinfo(1)
test_fileinfo(datSpan=None, resized=True, datLoc=None, hdrLoc=None)
res = hdul.fileinfo(2)
test_fileinfo(resized=1, datLoc=17280, hdrLoc=11520)
def test_create_from_multiple_primary(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/145
Ensure that a validation error occurs when saving an HDUList containing
multiple PrimaryHDUs.
"""
hdul = fits.HDUList([fits.PrimaryHDU(), fits.PrimaryHDU()])
pytest.raises(VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
def test_append_primary_to_empty_list(self):
# Tests appending a Simple PrimaryHDU to an empty HDUList.
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_empty_list(self):
"""Tests appending a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_table_extension_to_empty_list(self):
"""Tests appending a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.append(hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_groupshdu_to_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_primary_to_non_empty_list(self):
"""Tests appending a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
def test_append_extension_to_non_empty_list(self):
"""Tests appending a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.append(hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-append.fits'))
assert fits.info(self.temp('test-append.fits'), output=False) == info
@raises(ValueError)
def test_append_groupshdu_to_non_empty_list(self):
"""Tests appending a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.append(hdu)
hdu = fits.GroupsHDU()
hdul.append(hdu)
def test_insert_primary_to_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_empty_list(self):
"""Tests inserting a Simple ImageHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_table_extension_to_empty_list(self):
"""Tests inserting a Simple Table ExtensionHDU to a empty HDUList."""
hdul = fits.HDUList()
hdul1 = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul1[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_primary_to_non_empty_list(self):
"""Tests inserting a Simple PrimaryHDU to a non-empty HDUList."""
hdul = fits.open(self.data('arange.fits'))
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 7, (11, 10, 7), 'int32', ''),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_extension_to_non_empty_list(self):
"""Tests inserting a Simple ExtensionHDU to a non-empty HDUList."""
hdul = fits.open(self.data('tb.fits'))
hdul.insert(1, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 11, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_groupshdu_to_non_empty_list(self):
"""Tests inserting a Simple GroupsHDU to an empty HDUList."""
hdul = fits.HDUList()
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
hdu = fits.GroupsHDU()
with pytest.raises(ValueError):
hdul.insert(1, hdu)
info = [(0, 'PRIMARY', 1, 'GroupsHDU', 8, (), '',
'1 Groups 0 Parameters'),
(1, '', 1, 'ImageHDU', 6, (100,), 'int32', '')]
hdul.insert(0, hdu)
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
@raises(ValueError)
def test_insert_groupshdu_to_begin_of_hdulist_with_groupshdu(self):
"""
Tests inserting a Simple GroupsHDU to the beginning of an HDUList
that that already contains a GroupsHDU.
"""
hdul = fits.HDUList()
hdu = fits.GroupsHDU()
hdul.insert(0, hdu)
hdul.insert(0, hdu)
def test_insert_extension_to_primary_in_non_empty_list(self):
# Tests inserting a Simple ExtensionHDU to a non-empty HDUList.
hdul = fits.open(self.data('tb.fits'))
hdul.insert(0, hdul[1])
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 4, (), '', ''),
(1, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', ''),
(2, '', 1, 'ImageHDU', 12, (), '', ''),
(3, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_insert_image_extension_to_primary_in_non_empty_list(self):
"""
Tests inserting a Simple Image ExtensionHDU to a non-empty HDUList
as the primary HDU.
"""
hdul = fits.open(self.data('tb.fits'))
hdu = fits.ImageHDU(np.arange(100, dtype=np.int32))
hdul.insert(0, hdu)
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', ''),
(1, '', 1, 'ImageHDU', 12, (), '', ''),
(2, '', 1, 'BinTableHDU', 24, '2R x 4C', '[1J, 3A, 1E, 1L]', '')]
assert hdul.info(output=False) == info
hdul.writeto(self.temp('test-insert.fits'))
assert fits.info(self.temp('test-insert.fits'), output=False) == info
def test_filename(self):
"""Tests the HDUList filename method."""
hdul = fits.open(self.data('tb.fits'))
name = hdul.filename()
assert name == self.data('tb.fits')
def test_file_like(self):
"""
Tests the use of a file like object with no tell or seek methods
in HDUList.writeto(), HDULIST.flush() or astropy.io.fits.writeto()
"""
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
hdul = fits.HDUList()
hdul.append(hdu)
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul.writeto(tmpfile)
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_2(self):
hdu = fits.PrimaryHDU(np.arange(100, dtype=np.int32))
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
hdul = fits.open(tmpfile, mode='ostream')
hdul.append(hdu)
hdul.flush()
tmpfile.close()
hdul.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_file_like_3(self):
tmpfile = open(self.temp('tmpfile.fits'), 'wb')
fits.writeto(tmpfile, np.arange(100, dtype=np.int32))
tmpfile.close()
info = [(0, 'PRIMARY', 1, 'PrimaryHDU', 5, (100,), 'int32', '')]
assert fits.info(self.temp('tmpfile.fits'), output=False) == info
def test_shallow_copy(self):
"""
Tests that `HDUList.__copy__()` and `HDUList.copy()` return a
shallow copy (regression test for #7211).
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
for hdulcopy in (hdul.copy(), copy.copy(hdul)):
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
assert hdulcopy[0] is hdul[0]
assert hdulcopy[1] is hdul[1]
def test_deep_copy(self):
"""
Tests that `HDUList.__deepcopy__()` returns a deep copy.
"""
n = np.arange(10.0)
primary_hdu = fits.PrimaryHDU(n)
hdu = fits.ImageHDU(n)
hdul = fits.HDUList([primary_hdu, hdu])
hdulcopy = copy.deepcopy(hdul)
assert isinstance(hdulcopy, fits.HDUList)
assert hdulcopy is not hdul
for index in range(len(hdul)):
assert hdulcopy[index] is not hdul[index]
assert hdulcopy[index].header == hdul[index].header
np.testing.assert_array_equal(hdulcopy[index].data, hdul[index].data)
def test_new_hdu_extname(self):
"""
Tests that new extension HDUs that are added to an HDUList can be
properly indexed by their EXTNAME/EXTVER (regression test for
ticket:48).
"""
f = fits.open(self.data('test0.fits'))
hdul = fits.HDUList()
hdul.append(f[0].copy())
hdul.append(fits.ImageHDU(header=f[1].header))
assert hdul[1].header['EXTNAME'] == 'SCI'
assert hdul[1].header['EXTVER'] == 1
assert hdul.index_of(('SCI', 1)) == 1
def test_update_filelike(self):
"""Test opening a file-like object in update mode and resizing the
HDU.
"""
sf = io.BytesIO()
arr = np.zeros((100, 100))
hdu = fits.PrimaryHDU(data=arr)
hdu.writeto(sf)
sf.seek(0)
arr = np.zeros((200, 200))
hdul = fits.open(sf, mode='update')
hdul[0].data = arr
hdul.flush()
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_flush_readonly(self):
"""Test flushing changes to a file opened in a read only mode."""
oldmtime = os.stat(self.data('test0.fits')).st_mtime
hdul = fits.open(self.data('test0.fits'))
hdul[0].header['FOO'] = 'BAR'
with catch_warnings(AstropyUserWarning) as w:
hdul.flush()
assert len(w) == 1
assert 'mode is not supported' in str(w[0].message)
assert oldmtime == os.stat(self.data('test0.fits')).st_mtime
def test_fix_extend_keyword(self):
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
del hdul[0].header['EXTEND']
hdul.verify('silentfix')
assert 'EXTEND' in hdul[0].header
assert hdul[0].header['EXTEND'] is True
def test_fix_malformed_naxisj(self):
"""
Tests that malformed NAXISj values are fixed sensibly.
"""
hdu = fits.open(self.data('arange.fits'))
# Malform NAXISj header data
hdu[0].header['NAXIS1'] = 11.0
hdu[0].header['NAXIS2'] = '10.0'
hdu[0].header['NAXIS3'] = '7'
# Axes cache needs to be malformed as well
hdu[0]._axes = [11.0, '10.0', '7']
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 11
assert hdu[0].header['NAXIS2'] == 10
assert hdu[0].header['NAXIS3'] == 7
def test_fix_wellformed_naxisj(self):
"""
Tests that wellformed NAXISj values are not modified.
"""
hdu = fits.open(self.data('arange.fits'))
# Fake new NAXISj header data
hdu[0].header['NAXIS1'] = 768
hdu[0].header['NAXIS2'] = 64
hdu[0].header['NAXIS3'] = 8
# Axes cache needs to be faked as well
hdu[0]._axes = [768, 64, 8]
# Perform verification including the fix
hdu.verify('silentfix')
# Check that malformed data was converted
assert hdu[0].header['NAXIS1'] == 768
assert hdu[0].header['NAXIS2'] == 64
assert hdu[0].header['NAXIS3'] == 8
def test_new_hdulist_extend_keyword(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/114
Tests that adding a PrimaryHDU to a new HDUList object updates the
EXTEND keyword on that HDU.
"""
h0 = fits.Header()
hdu = fits.PrimaryHDU(header=h0)
sci = fits.ImageHDU(data=np.array(10))
image = fits.HDUList([hdu, sci])
image.writeto(self.temp('temp.fits'))
assert 'EXTEND' in hdu.header
assert hdu.header['EXTEND'] is True
def test_replace_memmaped_array(self):
# Copy the original before we modify it
hdul = fits.open(self.data('test0.fits'))
hdul.writeto(self.temp('temp.fits'))
hdul = fits.open(self.temp('temp.fits'), mode='update', memmap=True)
old_data = hdul[1].data.copy()
hdul[1].data = hdul[1].data + 1
hdul.close()
hdul = fits.open(self.temp('temp.fits'), memmap=True)
assert ((old_data + 1) == hdul[1].data).all()
def test_open_file_with_end_padding(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/106
Open files with end padding bytes.
"""
hdul = fits.open(self.data('test0.fits'),
do_not_scale_image_data=True)
info = hdul.info(output=False)
hdul.writeto(self.temp('temp.fits'))
with open(self.temp('temp.fits'), 'ab') as f:
f.seek(0, os.SEEK_END)
f.write(b'\0' * 2880)
with ignore_warnings():
assert info == fits.info(self.temp('temp.fits'), output=False,
do_not_scale_image_data=True)
def test_open_file_with_bad_header_padding(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/136
Open files with nulls for header block padding instead of spaces.
"""
a = np.arange(100).reshape(10, 10)
hdu = fits.PrimaryHDU(data=a)
hdu.writeto(self.temp('temp.fits'))
# Figure out where the header padding begins and fill it with nulls
end_card_pos = str(hdu.header).index('END' + ' ' * 77)
padding_start = end_card_pos + 80
padding_len = 2880 - padding_start
with open(self.temp('temp.fits'), 'r+b') as f:
f.seek(padding_start)
f.write('\0'.encode('ascii') * padding_len)
with catch_warnings(AstropyUserWarning) as w:
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == a).all()
assert ('contains null bytes instead of spaces' in
str(w[0].message))
assert len(w) == 1
assert len(hdul) == 1
assert str(hdul[0].header) == str(hdu.header)
def test_update_with_truncated_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/148
Test that saving an update where the header is shorter than the
original header doesn't leave a stump from the old header in the file.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(hdu.header) < 34:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
hdu.writeto(self.temp('temp.fits'), checksum=True)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
# Modify the header, forcing it to be rewritten
hdul[0].header['TEST1'] = 2
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data).all()
@pytest.mark.xfail(platform.system() == 'Windows',
reason='https://github.com/astropy/astropy/issues/5797')
def test_update_resized_header(self):
"""
Test saving updates to a file where the header is one block smaller
than before, and in the case where the heade ris one block larger than
before.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
idx = 1
while len(str(hdu.header)) <= 2880:
hdu.header['TEST{}'.format(idx)] = idx
idx += 1
orig_header = hdu.header.copy()
hdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
while len(str(hdul[0].header)) > 2880:
del hdul[0].header[-1]
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header == orig_header[:-1]
assert (hdul[0].data == data).all()
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 101
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
# Touch something in the data too so that it has to be rewritten
hdul[0].data[0] = 27
with fits.open(self.temp('temp.fits')) as hdul:
assert hdul[0].header[:-37] == orig_header[:-1]
assert hdul[0].data[0] == 27
assert (hdul[0].data[1:] == data[1:]).all()
def test_update_resized_header2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/150
This is similar to test_update_resized_header, but specifically tests a
case of multiple consecutive flush() calls on the same HDUList object,
where each flush() requires a resize.
"""
data1 = np.arange(100)
data2 = np.arange(100) + 100
phdu = fits.PrimaryHDU(data=data1)
hdu = fits.ImageHDU(data=data2)
phdu.writeto(self.temp('temp.fits'))
with fits.open(self.temp('temp.fits'), mode='append') as hdul:
hdul.append(hdu)
with fits.open(self.temp('temp.fits'), mode='update') as hdul:
idx = 1
while len(str(hdul[0].header)) <= 2880 * 2:
hdul[0].header['TEST{}'.format(idx)] = idx
idx += 1
hdul.flush()
hdul.append(hdu)
with fits.open(self.temp('temp.fits')) as hdul:
assert (hdul[0].data == data1).all()
assert hdul[1].header == hdu.header
assert (hdul[1].data == data2).all()
assert (hdul[2].data == data2).all()
@ignore_warnings()
def test_hdul_fromstring(self):
"""
Test creating the HDUList structure in memory from a string containing
an entire FITS file. This is similar to test_hdu_fromstring but for an
entire multi-extension FITS file at once.
"""
# Tests HDUList.fromstring for all of Astropy's built in test files
def test_fromstring(filename):
with fits.open(filename) as hdul:
orig_info = hdul.info(output=False)
with open(filename, 'rb') as f:
dat = f.read()
hdul2 = fits.HDUList.fromstring(dat)
assert orig_info == hdul2.info(output=False)
for idx in range(len(hdul)):
assert hdul[idx].header == hdul2[idx].header
if hdul[idx].data is None or hdul2[idx].data is None:
assert hdul[idx].data == hdul2[idx].data
elif (hdul[idx].data.dtype.fields and
hdul2[idx].data.dtype.fields):
# Compare tables
for n in hdul[idx].data.names:
c1 = hdul[idx].data[n]
c2 = hdul2[idx].data[n]
assert (c1 == c2).all()
elif (any(dim == 0 for dim in hdul[idx].data.shape) or
any(dim == 0 for dim in hdul2[idx].data.shape)):
# For some reason some combinations of Python and Numpy
# on Windows result in MemoryErrors when trying to work
# on memmap arrays with more than one dimension but
# some dimensions of size zero, so include a special
# case for that
return hdul[idx].data.shape == hdul2[idx].data.shape
else:
np.testing.assert_array_equal(hdul[idx].data,
hdul2[idx].data)
for filename in glob.glob(os.path.join(self.data_dir, '*.fits')):
if sys.platform == 'win32' and filename == 'zerowidth.fits':
# Running this test on this file causes a crash in some
# versions of Numpy on Windows. See ticket:
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/174
continue
elif filename.endswith('variable_length_table.fits'):
# Comparing variable length arrays is non-trivial and thus
# skipped at this point.
# TODO: That's probably possible, so one could make it work.
continue
test_fromstring(filename)
# Test that creating an HDUList from something silly raises a TypeError
pytest.raises(TypeError, fits.HDUList.fromstring, ['a', 'b', 'c'])
def test_save_backup(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/121
Save backup of file before flushing changes.
"""
self.copy_file('scale.fits')
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# Make some changes to the original file to force its header
# and data to be rewritten
hdul[0].header['TEST'] = 'TEST'
hdul[0].data[0] = 0
assert os.path.exists(self.temp('scale.fits.bak'))
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul1:
with fits.open(self.temp('scale.fits.bak'),
do_not_scale_image_data=True) as hdul2:
assert hdul1[0].header == hdul2[0].header
assert (hdul1[0].data == hdul2[0].data).all()
with ignore_warnings():
with fits.open(self.temp('scale.fits'), mode='update',
save_backup=True) as hdul:
# One more time to see if multiple backups are made
hdul[0].header['TEST2'] = 'TEST'
hdul[0].data[0] = 1
assert os.path.exists(self.temp('scale.fits.bak'))
assert os.path.exists(self.temp('scale.fits.bak.1'))
def test_replace_mmap_data(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
hdu_a = fits.PrimaryHDU(data=arr_a)
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.PrimaryHDU(data=arr_b)
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[0].data = hdul_b[0].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert np.all(hdul_a[0].data == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_replace_mmap_data_2(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/25
Replacing the mmap'd data of one file with mmap'd data from a
different file should work. Like test_replace_mmap_data but with
table data instead of image data.
"""
arr_a = np.arange(10)
arr_b = arr_a * 2
def test(mmap_a, mmap_b):
col_a = fits.Column(name='a', format='J', array=arr_a)
col_b = fits.Column(name='b', format='J', array=arr_b)
hdu_a = fits.BinTableHDU.from_columns([col_a])
hdu_a.writeto(self.temp('test_a.fits'), overwrite=True)
hdu_b = fits.BinTableHDU.from_columns([col_b])
hdu_b.writeto(self.temp('test_b.fits'), overwrite=True)
hdul_a = fits.open(self.temp('test_a.fits'), mode='update',
memmap=mmap_a)
hdul_b = fits.open(self.temp('test_b.fits'), memmap=mmap_b)
hdul_a[1].data = hdul_b[1].data
hdul_a.close()
hdul_b.close()
hdul_a = fits.open(self.temp('test_a.fits'))
assert 'b' in hdul_a[1].columns.names
assert 'a' not in hdul_a[1].columns.names
assert np.all(hdul_a[1].data['b'] == arr_b)
with ignore_warnings():
test(True, True)
# Repeat the same test but this time don't mmap A
test(False, True)
# Finally, without mmaping B
test(True, False)
def test_extname_in_hdulist(self):
"""
Tests to make sure that the 'in' operator works.
Regression test for https://github.com/astropy/astropy/issues/3060
"""
hdulist = fits.HDUList()
hdulist.append(fits.ImageHDU(name='a'))
assert 'a' in hdulist
assert 'A' in hdulist
assert ('a', 1) in hdulist
assert ('A', 1) in hdulist
assert 'b' not in hdulist
assert ('a', 2) not in hdulist
assert ('b', 1) not in hdulist
assert ('b', 2) not in hdulist
def test_overwrite_vs_clobber(self):
hdulist = fits.HDUList([fits.PrimaryHDU()])
hdulist.writeto(self.temp('test_overwrite.fits'))
hdulist.writeto(self.temp('test_overwrite.fits'), overwrite=True)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
hdulist.writeto(self.temp('test_overwrite.fits'), clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
def test_invalid_hdu_key_in_contains(self):
"""
Make sure invalid keys in the 'in' operator return False.
Regression test for https://github.com/astropy/astropy/issues/5583
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU())
hdulist.append(fits.ImageHDU())
# A more or less random assortment of things which are not valid keys.
bad_keys = [None, 3.5, {}]
for key in bad_keys:
assert not (key in hdulist)
def test_iteration_of_lazy_loaded_hdulist(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5585
"""
hdulist = fits.HDUList(fits.PrimaryHDU())
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='SCI'))
hdulist.append(fits.ImageHDU(name='nada'))
hdulist.append(fits.ImageHDU(name='SCI'))
filename = self.temp('many_extension.fits')
hdulist.writeto(filename)
f = fits.open(filename)
# Check that all extensions are read if f is not sliced
all_exts = [ext for ext in f]
assert len(all_exts) == 5
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Try a simple slice with no conditional on the ext. This is essentially
# the reported failure.
all_exts_but_zero = [ext for ext in f[1:]]
assert len(all_exts_but_zero) == 4
# Reload the file to ensure we are still lazy loading
f.close()
f = fits.open(filename)
# Check whether behavior is proper if the upper end of the slice is not
# omitted.
read_exts = [ext for ext in f[1:4] if ext.header['EXTNAME'] == 'SCI']
assert len(read_exts) == 2
def test_proper_error_raised_on_non_fits_file_with_unicode(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5594
The failure shows up when (in python 3+) you try to open a file
with unicode content that is not actually a FITS file. See:
https://github.com/astropy/astropy/issues/5594#issuecomment-266583218
"""
import codecs
filename = self.temp('not-fits-with-unicode.fits')
with codecs.open(filename, mode='w', encoding='utf=8') as f:
f.write(u'Ce\xe7i ne marche pas')
# This should raise an OSError because there is no end card.
with pytest.raises(OSError):
fits.open(filename)
def test_no_resource_warning_raised_on_non_fits_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6168
The ResourceWarning shows up when (in python 3+) you try to
open a non-FITS file when using a filename.
"""
# To avoid creating the file multiple times the tests are
# all included in one test file. See the discussion to the
# PR at https://github.com/astropy/astropy/issues/6168
#
filename = self.temp('not-fits.fits')
with open(filename, mode='w') as f:
f.write('# header line\n')
f.write('0.1 0.2\n')
# Opening the file should raise an OSError however the file
# is opened (there are two distinct code paths, depending on
# whether ignore_missing_end is True or False).
#
# Explicit tests are added to make sure the file handle is not
# closed when passed in to fits.open. In this case the ResourceWarning
# was not raised, but a check is still included.
#
with catch_warnings(ResourceWarning) as ws:
# Make sure that files opened by the user are not closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=False)
assert not f.closed
with open(filename, mode='rb') as f:
with pytest.raises(OSError):
fits.open(f, ignore_missing_end=True)
assert not f.closed
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=False)
with pytest.raises(OSError):
fits.open(filename, ignore_missing_end=True)
assert len(ws) == 0
def test_pop_with_lazy_load(self):
filename = self.data('checksum.fits')
hdul = fits.open(filename)
# Try popping the hdulist before doing anything else. This makes sure
# that https://github.com/astropy/astropy/issues/7185 is fixed.
hdu = hdul.pop()
assert len(hdul) == 1
# Read the file again and try popping from the beginning
hdul2 = fits.open(filename)
hdu2 = hdul2.pop(0)
assert len(hdul2) == 1
# Just a sanity check
hdul3 = fits.open(filename)
assert len(hdul3) == 2
assert hdul3[0].header == hdu2.header
assert hdul3[1].header == hdu.header
def test_pop_extname(self):
hdul = fits.open(self.data('o4sp040b0_raw.fits'))
assert len(hdul) == 7
hdu1 = hdul[1]
hdu4 = hdul[4]
hdu_popped = hdul.pop(('SCI', 2))
assert len(hdul) == 6
assert hdu_popped is hdu4
hdu_popped = hdul.pop('SCI')
assert len(hdul) == 5
assert hdu_popped is hdu1
|
b6b8e3800810002d9b9c7164bb7743b3dcec0ab49310b53752d6cf5cd6debc3b | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import os
import warnings
import pytest
import numpy as np
from ....io import fits
from ....table import Table
from .. import printdiff
from ....tests.helper import catch_warnings
from . import FitsTestCase
class TestConvenience(FitsTestCase):
def test_resource_warning(self):
warnings.simplefilter('always', ResourceWarning)
with catch_warnings() as w:
data = fits.getdata(self.data('test0.fits'))
assert len(w) == 0
with catch_warnings() as w:
header = fits.getheader(self.data('test0.fits'))
assert len(w) == 0
def test_fileobj_not_closed(self):
"""
Tests that file-like objects are not closed after being passed
to convenience functions.
Regression test for https://github.com/astropy/astropy/issues/5063
"""
f = open(self.data('test0.fits'), 'rb')
data = fits.getdata(f)
assert not f.closed
f.seek(0)
header = fits.getheader(f)
assert not f.closed
def test_table_to_hdu(self):
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table['a'].unit = 'm/s'
table['b'].unit = 'not-a-unit'
with catch_warnings() as w:
hdu = fits.table_to_hdu(table)
assert len(w) == 1
assert str(w[0].message).startswith("'not-a-unit' did not parse as"
" fits unit")
# Check that TUNITn cards appear in the correct order
# (https://github.com/astropy/astropy/pull/5720)
assert hdu.header.index('TUNIT1') < hdu.header.index('TTYPE2')
assert isinstance(hdu, fits.BinTableHDU)
filename = self.temp('test_table_to_hdu.fits')
hdu.writeto(filename, overwrite=True)
def test_table_to_hdu_convert_comment_convention(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
table = Table([[1, 2, 3], ['a', 'b', 'c'], [2.3, 4.5, 6.7]],
names=['a', 'b', 'c'], dtype=['i', 'U1', 'f'])
table.meta['comments'] = ['This', 'is', 'a', 'comment']
hdu = fits.table_to_hdu(table)
assert hdu.header.get('comment') == ['This', 'is', 'a', 'comment']
with pytest.raises(ValueError):
hdu.header.index('comments')
def test_table_writeto_header(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5988
"""
data = np.zeros((5, ), dtype=[('x', float), ('y', int)])
h_in = fits.Header()
h_in['ANSWER'] = (42.0, 'LTU&E')
filename = self.temp('tabhdr42.fits')
fits.writeto(filename, data=data, header=h_in, overwrite=True)
h_out = fits.getheader(filename, ext=1)
assert h_out['ANSWER'] == 42
def test_image_extension_update_header(self):
"""
Test that _makehdu correctly includes the header. For example in the
fits.update convenience function.
"""
filename = self.temp('twoextension.fits')
hdus = [fits.PrimaryHDU(np.zeros((10, 10))),
fits.ImageHDU(np.zeros((10, 10)))]
fits.HDUList(hdus).writeto(filename)
fits.update(filename,
np.zeros((10, 10)),
header=fits.Header([('WHAT', 100)]),
ext=1)
h_out = fits.getheader(filename, ext=1)
assert h_out['WHAT'] == 100
def test_printdiff(self):
"""
Test that FITSDiff can run the different inputs without crashing.
"""
# Testing different string input options
assert printdiff(self.data('arange.fits'),
self.data('blank.fits')) is None
assert printdiff(self.data('arange.fits'),
self.data('blank.fits'), ext=0) is None
assert printdiff(self.data('o4sp040b0_raw.fits'),
self.data('o4sp040b0_raw.fits'),
extname='sci') is None
# This may seem weird, but check printdiff to see, need to test
# incorrect second file
with pytest.raises(OSError):
printdiff('o4sp040b0_raw.fits', 'fakefile.fits', extname='sci')
# Test HDU object inputs
with fits.open(self.data('stddata.fits'), mode='readonly') as in1:
with fits.open(self.data('checksum.fits'), mode='readonly') as in2:
assert printdiff(in1[0], in2[0]) is None
with pytest.raises(ValueError):
printdiff(in1[0], in2[0], ext=0)
assert printdiff(in1, in2) is None
with pytest.raises(NotImplementedError):
printdiff(in1, in2, 0)
def test_tabledump(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6937
"""
# copy fits file to the temp directory
self.copy_file('tb.fits')
# test without datafile
fits.tabledump(self.temp('tb.fits'))
assert os.path.isfile(self.temp('tb_1.txt'))
# test with datafile
fits.tabledump(self.temp('tb.fits'), datafile=self.temp('test_tb.txt'))
assert os.path.isfile(self.temp('test_tb.txt'))
|
f6e7edf1b6e3551ddc993b4fad2ab32d6346df730813fe5ff176a97c28de688f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import signal
import gzip
import pytest
import numpy as np
from numpy.testing import assert_equal
try:
from PIL import Image
HAS_PIL = True
except ImportError:
HAS_PIL = False
from ....tests.helper import catch_warnings
from .. import util
from ..util import ignore_sigint, _rstrip_inplace
from . import FitsTestCase
class TestUtils(FitsTestCase):
@pytest.mark.skipif("sys.platform.startswith('win')")
def test_ignore_sigint(self):
@ignore_sigint
def test():
with catch_warnings(UserWarning) as w:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
# One more time, for good measure
os.kill(pid, signal.SIGINT)
assert len(w) == 2
assert (str(w[0].message) ==
'KeyboardInterrupt ignored until test is complete!')
pytest.raises(KeyboardInterrupt, test)
def test_realign_dtype(self):
"""
Tests a few corner-cases for numpy dtype creation.
These originally were the reason for having a realign_dtype hack.
"""
dt = np.dtype([('a', np.int32), ('b', np.int16)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0]})
assert dt2.itemsize == 4
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 1]})
assert dt2.itemsize == 4
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [1, 0]})
assert dt2.itemsize == 5
dt = np.dtype([('a', np.float64), ('b', np.int8), ('c', np.int8)])
names = dt.names
formats = [dt.fields[name][0] for name in names]
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 0]})
assert dt2.itemsize == 8
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 1]})
assert dt2.itemsize == 8
dt2 = np.dtype({'names': names, 'formats': formats,
'offsets': [0, 0, 27]})
assert dt2.itemsize == 28
class TestUtilMode(FitsTestCase):
"""
The high-level tests are partially covered by
test_core.TestConvenienceFunctions.test_fileobj_mode_guessing
but added some low-level tests as well.
"""
def test_mode_strings(self):
# A string signals that the file should be opened so the function
# should return None, because it's simply not opened yet.
assert util.fileobj_mode('tmp1.fits') is None
@pytest.mark.skipif("not HAS_PIL")
def test_mode_pil_image(self):
img = np.random.randint(0, 255, (5, 5, 3)).astype(np.uint8)
result = Image.fromarray(img)
result.save(self.temp('test_simple.jpg'))
# PIL doesn't support append mode. So it will allways use binary read.
with Image.open(self.temp('test_simple.jpg')) as fileobj:
assert util.fileobj_mode(fileobj) == 'rb'
def test_mode_gzip(self):
# Open a gzip in every possible (gzip is binary or "touch" only) way
# and check if the mode was correctly identified.
# The lists consist of tuples: filenumber, given mode, identified mode
# The filenumber must be given because read expects the file to exist
# and x expects it to NOT exist.
num_mode_resmode = [(0, 'a', 'ab'), (0, 'ab', 'ab'),
(0, 'w', 'wb'), (0, 'wb', 'wb'),
(1, 'x', 'xb'),
(1, 'r', 'rb'), (1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp('test{0}.gz'.format(num))
with gzip.GzipFile(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normal_buffering(self):
# Use the python IO with buffering parameter. Binary mode only:
# see "test_mode_gzip" for explanation of tuple meanings.
num_mode_resmode = [(0, 'ab', 'ab'),
(0, 'wb', 'wb'),
(1, 'xb', 'xb'),
(1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp('test1{0}.dat'.format(num))
with open(filename, mode, buffering=0) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normal_no_buffering(self):
# Python IO without buffering
# see "test_mode_gzip" for explanation of tuple meanings.
num_mode_resmode = [(0, 'a', 'a'), (0, 'ab', 'ab'),
(0, 'w', 'w'), (0, 'wb', 'wb'),
(1, 'x', 'x'),
(1, 'r', 'r'), (1, 'rb', 'rb')]
for num, mode, res in num_mode_resmode:
filename = self.temp('test2{0}.dat'.format(num))
with open(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_mode_normalization(self):
# Use the normal python IO in append mode with all possible permutation
# of the "mode" letters.
# Tuple gives a file name suffix, the given mode and the functions
# return. The filenumber is only for consistency with the other
# test functions. Append can deal with existing and not existing files.
for num, mode, res in [(0, 'a', 'a'),
(0, 'a+', 'a+'),
(0, 'ab', 'ab'),
(0, 'a+b', 'ab+'),
(0, 'ab+', 'ab+')]:
filename = self.temp('test3{0}.dat'.format(num))
with open(filename, mode) as fileobj:
assert util.fileobj_mode(fileobj) == res
def test_rstrip_inplace():
# Incorrect type
s = np.array([1, 2, 3])
with pytest.raises(TypeError) as exc:
_rstrip_inplace(s)
assert exc.value.args[0] == 'This function can only be used on string arrays'
# Bytes array
s = np.array(['a ', ' b', ' c c '], dtype='S6')
_rstrip_inplace(s)
assert_equal(s, np.array(['a', ' b', ' c c'], dtype='S6'))
# Unicode array
s = np.array(['a ', ' b', ' c c '], dtype='U6')
_rstrip_inplace(s)
assert_equal(s, np.array(['a', ' b', ' c c'], dtype='U6'))
# 2-dimensional array
s = np.array([['a ', ' b'], [' c c ', ' a ']], dtype='S6')
_rstrip_inplace(s)
assert_equal(s, np.array([['a', ' b'], [' c c', ' a']], dtype='S6'))
# 3-dimensional array
s = np.repeat(' a a ', 24).reshape((2, 3, 4))
_rstrip_inplace(s)
assert_equal(s, ' a a')
# 3-dimensional non-contiguous array
s = np.repeat(' a a ', 1000).reshape((10, 10, 10))[:2, :3, :4]
_rstrip_inplace(s)
assert_equal(s, ' a a')
|
01f5dc11f351f5386e5d54e34d158106e50f5b5826e78b1e60ef99f56a3049a7 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ..column import Column
from ..diff import (FITSDiff, HeaderDiff, ImageDataDiff, TableDataDiff,
HDUDiff)
from ..hdu import HDUList, PrimaryHDU, ImageHDU
from ..hdu.table import BinTableHDU
from ..header import Header
from ....tests.helper import catch_warnings
from ....utils.exceptions import AstropyDeprecationWarning
from ....io import fits
from . import FitsTestCase
class TestDiff(FitsTestCase):
def test_identical_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha.tostring(), hb.tostring()).identical
with pytest.raises(TypeError):
HeaderDiff(1, 2)
def test_slightly_different_headers(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
assert not HeaderDiff(ha, hb).identical
def test_common_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
assert HeaderDiff(ha, hb).common_keywords == ['A', 'B', 'C']
def test_different_keyword_count(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
del hb['B']
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_count == (3, 2)
# But make sure the common keywords are at least correct
assert diff.common_keywords == ['A', 'C']
def test_different_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
hb['D'] = (5, 'Comment')
ha['E'] = (6, 'Comment')
ha['F'] = (7, 'Comment')
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keywords == (['E', 'F'], ['D'])
def test_different_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 4)]}
def test_different_keyword_comments(self):
ha = Header([('A', 1), ('B', 2), ('C', 3, 'comment 1')])
hb = ha.copy()
hb.comments['C'] = 'comment 2'
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_comments ==
{'C': [('comment 1', 'comment 2')]})
def test_different_keyword_values_with_duplicate(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('C', 4))
hb.append(('C', 5))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [None, (4, 5)]}
def test_asymmetric_duplicate_keywords(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
ha.append(('A', 2, 'comment 1'))
ha.append(('A', 3, 'comment 2'))
hb.append(('B', 4, 'comment 3'))
hb.append(('C', 5, 'comment 4'))
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert diff.diff_keyword_values == {}
assert (diff.diff_duplicate_keywords ==
{'A': (3, 1), 'B': (1, 2), 'C': (1, 2)})
report = diff.report()
assert ("Inconsistent duplicates of keyword 'A' :\n"
" Occurs 3 time(s) in a, 1 times in (b)") in report
def test_floating_point_rtol(self):
ha = Header([('A', 1), ('B', 2.00001), ('C', 3.000001)])
hb = ha.copy()
hb['B'] = 2.00002
hb['C'] = 3.000002
diff = HeaderDiff(ha, hb)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(2.00001, 2.00002)], 'C': [(3.000001, 3.000002)]})
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert diff.diff_keyword_values == {'B': [(2.00001, 2.00002)]}
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert diff.identical
def test_floating_point_atol(self):
ha = Header([('A', 1), ('B', 1.0), ('C', 0.0)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.000001
diff = HeaderDiff(ha, hb, rtol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)], 'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'C': [(0.0, 0.000001)]})
diff = HeaderDiff(ha, hb, atol=1e-6)
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, atol=1e-5) # strict inequality
assert not diff.identical
assert (diff.diff_keyword_values ==
{'B': [(1.0, 1.00001)]})
diff = HeaderDiff(ha, hb, rtol=1e-5, atol=1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, atol=1.1e-5)
assert diff.identical
diff = HeaderDiff(ha, hb, rtol=1e-6, atol=1e-6)
assert not diff.identical
def test_deprecation_tolerance(self):
"""Verify uses of tolerance and rtol.
This test should be removed in the next astropy version."""
ha = Header([('B', 1.0), ('C', 0.1)])
hb = ha.copy()
hb['B'] = 1.00001
hb['C'] = 0.100001
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diff = HeaderDiff(ha, hb, tolerance=1e-6)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert (diff.diff_keyword_values == {'C': [(0.1, 0.100001)],
'B': [(1.0, 1.00001)]})
assert not diff.identical
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
# `rtol` is always ignored when `tolerance` is provided
diff = HeaderDiff(ha, hb, rtol=1e-6, tolerance=1e-5)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"tolerance" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "rtol" instead.')
assert diff.identical
def test_ignore_blanks(self):
with fits.conf.set_temp('strip_header_whitespace', False):
ha = Header([('A', 1), ('B', 2), ('C', 'A ')])
hb = ha.copy()
hb['C'] = 'A'
assert ha['C'] != hb['C']
diff = HeaderDiff(ha, hb)
# Trailing blanks are ignored by default
assert diff.identical
assert diff.diff_keyword_values == {}
# Don't ignore blanks
diff = HeaderDiff(ha, hb, ignore_blanks=False)
assert not diff.identical
assert diff.diff_keyword_values == {'C': [('A ', 'A')]}
def test_ignore_blank_cards(self):
"""Test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/152
Ignore blank cards.
"""
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = Header([('A', 1), ('', ''), ('B', 2), ('', ''), ('C', 3)])
hc = ha.copy()
hc.append()
hc.append()
# We now have a header with interleaved blanks, and a header with end
# blanks, both of which should ignore the blanks
assert HeaderDiff(ha, hb).identical
assert HeaderDiff(ha, hc).identical
assert HeaderDiff(hb, hc).identical
assert not HeaderDiff(ha, hb, ignore_blank_cards=False).identical
assert not HeaderDiff(ha, hc, ignore_blank_cards=False).identical
# Both hb and hc have the same number of blank cards; since order is
# currently ignored, these should still be identical even if blank
# cards are not ignored
assert HeaderDiff(hb, hc, ignore_blank_cards=False).identical
hc.append()
# But now there are different numbers of blanks, so they should not be
# ignored:
assert not HeaderDiff(hb, hc, ignore_blank_cards=False).identical
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy()
ha = Header([('A', 1), ('B', 2), ('C', 3)])
xa = np.array([(1.0, 1), (3.0, 4)], dtype=[('x', float), ('y', int)])
xb = np.array([(1.0, 2), (3.0, 5)], dtype=[('x', float), ('y', int)])
phdu = PrimaryHDU(header=ha)
ihdua = ImageHDU(data=a, name='SCI')
ihdub = ImageHDU(data=b, name='SCI')
bhdu1 = BinTableHDU(data=xa, name='ASDF')
bhdu2 = BinTableHDU(data=xb, name='ASDF')
hdula = HDUList([phdu, ihdua, bhdu1])
hdulb = HDUList([phdu, ihdub, bhdu2])
# ASDF extension should be different
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 2
# ASDF extension should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert diff.identical, diff.report()
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASD*'])
assert diff.identical, diff.report()
# SCI extension should be different
hdulb['SCI'].data += 1
diff = FITSDiff(hdula, hdulb, ignore_hdus=['ASDF'])
assert not diff.identical
# SCI and ASDF extensions should be ignored
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert diff.identical, diff.report()
# All EXTVER of SCI should be ignored
ihduc = ImageHDU(data=a, name='SCI', ver=2)
hdulb.append(ihduc)
diff = FITSDiff(hdula, hdulb, ignore_hdus=['SCI', 'ASDF'])
assert not any(diff.diff_hdus), diff.report()
assert any(diff.diff_hdu_count), diff.report()
def test_ignore_keyword_values(self):
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['B'] = 4
hb['C'] = 5
diff = HeaderDiff(ha, hb, ignore_keywords=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_keywords=['B'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
report = diff.report()
assert 'Keyword B has different values' not in report
assert 'Keyword C has different values' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_keywords=['b'])
assert not diff.identical
assert diff.diff_keyword_values == {'C': [(3, 5)]}
def test_ignore_keyword_comments(self):
ha = Header([('A', 1, 'A'), ('B', 2, 'B'), ('C', 3, 'C')])
hb = ha.copy()
hb.comments['B'] = 'D'
hb.comments['C'] = 'E'
diff = HeaderDiff(ha, hb, ignore_comments=['*'])
assert diff.identical
diff = HeaderDiff(ha, hb, ignore_comments=['B'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
report = diff.report()
assert 'Keyword B has different comments' not in report
assert 'Keyword C has different comments' in report
# Test case-insensitivity
diff = HeaderDiff(ha, hb, ignore_comments=['b'])
assert not diff.identical
assert diff.diff_keyword_comments == {'C': [('C', 'E')]}
def test_trivial_identical_images(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
diff = ImageDataDiff(ia, ib)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_relative_tolerance(self):
ia = np.ones((10, 10)) - 0.00001
ib = np.ones((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_absolute_tolerance(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-4)
assert not diff.identical
assert diff.diff_total == 100
diff = ImageDataDiff(ia, ib, atol=1.0e-4)
assert diff.identical
assert diff.diff_total == 0
def test_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-5)
assert diff.identical
assert diff.diff_total == 0
def test_not_identical_within_rtol_and_atol(self):
ia = np.zeros((10, 10)) - 0.00001
ib = np.zeros((10, 10)) - 0.00002
diff = ImageDataDiff(ia, ib, rtol=1.0e-5, atol=1.0e-6)
assert not diff.identical
assert diff.diff_total == 100
def test_identical_comp_image_hdus(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/189
For this test we mostly just care that comparing to compressed images
does not crash, and returns the correct results. Two compressed images
will be considered identical if the decompressed data is the same.
Obviously we test whether or not the same compression was used by
looking for (or ignoring) header differences.
"""
data = np.arange(100.0).reshape(10, 10)
hdu = fits.CompImageHDU(data=data)
hdu.writeto(self.temp('test.fits'))
hdula = fits.open(self.temp('test.fits'))
hdulb = fits.open(self.temp('test.fits'))
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_different_dimensions(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100) - 1
# Although ib could be reshaped into the same dimensions, for now the
# data is not compared anyways
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ((10, 10), (100,))
assert diff.diff_total == 0
report = diff.report()
assert 'Data dimensions differ' in report
assert 'a: 10 x 10' in report
assert 'b: 100' in report
assert 'No further data comparison performed.'
def test_different_pixels(self):
ia = np.arange(100).reshape(10, 10)
ib = np.arange(100).reshape(10, 10)
ib[0, 0] = 10
ib[5, 5] = 20
diff = ImageDataDiff(ia, ib)
assert not diff.identical
assert diff.diff_dimensions == ()
assert diff.diff_total == 2
assert diff.diff_ratio == 0.02
assert diff.diff_pixels == [((0, 0), (0, 10)), ((5, 5), (55, 20))]
def test_identical_tables(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('D', format='J', bscale=2.0, array=[0, 1])
c5 = Column('E', format='A3', array=['abc', 'def'])
c6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
c7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
c8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
c9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
c10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
columns = [c1, c2, c3, c4, c5, c6, c7, c8, c9, c10]
ta = BinTableHDU.from_columns(columns)
tb = BinTableHDU.from_columns([c.copy() for c in columns])
diff = TableDataDiff(ta.data, tb.data)
assert diff.identical
assert len(diff.common_columns) == 10
assert diff.common_column_names == set('abcdefghij')
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_diff_empty_tables(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/178
Ensure that diffing tables containing empty data doesn't crash.
"""
c1 = Column('D', format='J')
c2 = Column('E', format='J')
thdu = BinTableHDU.from_columns([c1, c2], nrows=0)
hdula = fits.HDUList([thdu])
hdulb = fits.HDUList([thdu])
diff = FITSDiff(hdula, hdulb)
assert diff.identical
def test_ignore_table_fields(self):
c1 = Column('A', format='L', array=[True, False])
c2 = Column('B', format='X', array=[[0], [1]])
c3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
c4 = Column('B', format='X', array=[[1], [0]])
c5 = Column('C', format='4I', dim='(2, 2)',
array=[[1, 2, 3, 4], [5, 6, 7, 8]])
ta = BinTableHDU.from_columns([c1, c2, c3])
tb = BinTableHDU.from_columns([c1, c4, c5])
diff = TableDataDiff(ta.data, tb.data, ignore_fields=['B', 'C'])
assert diff.identical
# The only common column should be c1
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_ratio == 0
assert diff.diff_total == 0
def test_different_table_field_names(self):
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([ca, cb])
tb = BinTableHDU.from_columns([ca, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'a'}
assert diff.diff_column_names == (['B'], ['C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert 'Extra column B of format L in a' in report
assert 'Extra column C of format L in b' in report
def test_different_table_field_counts(self):
"""
Test tables with some common columns, but different number of columns
overall.
"""
ca = Column('A', format='L', array=[True, False])
cb = Column('B', format='L', array=[True, False])
cc = Column('C', format='L', array=[True, False])
ta = BinTableHDU.from_columns([cb])
tb = BinTableHDU.from_columns([ca, cb, cc])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == (1, 3)
assert len(diff.common_columns) == 1
assert diff.common_column_names == {'b'}
assert diff.diff_column_names == ([], ['A', 'C'])
assert diff.diff_ratio == 0
assert diff.diff_total == 0
report = diff.report()
assert ' Tables have different number of columns:' in report
assert ' a: 1\n b: 3' in report
def test_different_table_rows(self):
"""
Test tables that are otherwise identical but one has more rows than the
other.
"""
ca1 = Column('A', format='L', array=[True, False])
cb1 = Column('B', format='L', array=[True, False])
ca2 = Column('A', format='L', array=[True, False, True])
cb2 = Column('B', format='L', array=[True, False, True])
ta = BinTableHDU.from_columns([ca1, cb1])
tb = BinTableHDU.from_columns([ca2, cb2])
diff = TableDataDiff(ta.data, tb.data)
assert not diff.identical
assert diff.diff_column_count == ()
assert len(diff.common_columns) == 2
assert diff.diff_rows == (2, 3)
assert diff.diff_values == []
report = diff.report()
assert 'Table rows differ' in report
assert 'a: 2' in report
assert 'b: 3' in report
assert 'No further data comparison performed.'
def test_different_table_data(self):
"""
Test diffing table data on columns of several different data formats
and dimensions.
"""
ca1 = Column('A', format='L', array=[True, False])
ca2 = Column('B', format='X', array=[[0], [1]])
ca3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [4, 5, 6, 7]])
ca4 = Column('D', format='J', bscale=2.0, array=[0.0, 2.0])
ca5 = Column('E', format='A3', array=['abc', 'def'])
ca6 = Column('F', format='E', unit='m', array=[0.0, 1.0])
ca7 = Column('G', format='D', bzero=-0.1, array=[0.0, 1.0])
ca8 = Column('H', format='C', array=[0.0+1.0j, 2.0+3.0j])
ca9 = Column('I', format='M', array=[4.0+5.0j, 6.0+7.0j])
ca10 = Column('J', format='PI(2)', array=[[0, 1], [2, 3]])
cb1 = Column('A', format='L', array=[False, False])
cb2 = Column('B', format='X', array=[[0], [0]])
cb3 = Column('C', format='4I', dim='(2, 2)',
array=[[0, 1, 2, 3], [5, 6, 7, 8]])
cb4 = Column('D', format='J', bscale=2.0, array=[2.0, 2.0])
cb5 = Column('E', format='A3', array=['abc', 'ghi'])
cb6 = Column('F', format='E', unit='m', array=[1.0, 2.0])
cb7 = Column('G', format='D', bzero=-0.1, array=[2.0, 3.0])
cb8 = Column('H', format='C', array=[1.0+1.0j, 2.0+3.0j])
cb9 = Column('I', format='M', array=[5.0+5.0j, 6.0+7.0j])
cb10 = Column('J', format='PI(2)', array=[[1, 2], [3, 4]])
ta = BinTableHDU.from_columns([ca1, ca2, ca3, ca4, ca5, ca6, ca7,
ca8, ca9, ca10])
tb = BinTableHDU.from_columns([cb1, cb2, cb3, cb4, cb5, cb6, cb7,
cb8, cb9, cb10])
diff = TableDataDiff(ta.data, tb.data, numdiffs=20)
assert not diff.identical
# The column definitions are the same, but not the column values
assert diff.diff_columns == ()
assert diff.diff_values[0] == (('A', 0), (True, False))
assert diff.diff_values[1] == (('B', 1), ([1], [0]))
assert diff.diff_values[2][0] == ('C', 1)
assert (diff.diff_values[2][1][0] == [[4, 5], [6, 7]]).all()
assert (diff.diff_values[2][1][1] == [[5, 6], [7, 8]]).all()
assert diff.diff_values[3] == (('D', 0), (0, 2.0))
assert diff.diff_values[4] == (('E', 1), ('def', 'ghi'))
assert diff.diff_values[5] == (('F', 0), (0.0, 1.0))
assert diff.diff_values[6] == (('F', 1), (1.0, 2.0))
assert diff.diff_values[7] == (('G', 0), (0.0, 2.0))
assert diff.diff_values[8] == (('G', 1), (1.0, 3.0))
assert diff.diff_values[9] == (('H', 0), (0.0+1.0j, 1.0+1.0j))
assert diff.diff_values[10] == (('I', 0), (4.0+5.0j, 5.0+5.0j))
assert diff.diff_values[11][0] == ('J', 0)
assert (diff.diff_values[11][1][0] == [0, 1]).all()
assert (diff.diff_values[11][1][1] == [1, 2]).all()
assert diff.diff_values[12][0] == ('J', 1)
assert (diff.diff_values[12][1][0] == [2, 3]).all()
assert (diff.diff_values[12][1][1] == [3, 4]).all()
assert diff.diff_total == 13
assert diff.diff_ratio == 0.65
report = diff.report()
assert ('Column A data differs in row 0:\n'
' a> True\n'
' b> False') in report
assert ('...and at 1 more indices.\n'
' Column D data differs in row 0:') in report
assert ('13 different table data element(s) found (65.00% different)'
in report)
assert report.count('more indices') == 1
def test_identical_files_basic(self):
"""Test identicality of two simple, extensionless files."""
a = np.arange(100).reshape(10, 10)
hdu = PrimaryHDU(data=a)
hdu.writeto(self.temp('testa.fits'))
hdu.writeto(self.temp('testb.fits'))
diff = FITSDiff(self.temp('testa.fits'), self.temp('testb.fits'))
assert diff.identical
report = diff.report()
# Primary HDUs should contain no differences
assert 'Primary HDU' not in report
assert 'Extension HDU' not in report
assert 'No differences found.' in report
a = np.arange(10)
ehdu = ImageHDU(data=a)
diff = HDUDiff(ehdu, ehdu)
assert diff.identical
report = diff.report()
assert 'No differences found.' in report
def test_partially_identical_files1(self):
"""
Test files that have some identical HDUs but a different extension
count.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
hdula = HDUList([phdu, ehdu])
hdulb = HDUList([phdu, ehdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == (2, 3)
# diff_hdus should be empty, since the third extension in hdulb
# has nothing to compare against
assert diff.diff_hdus == []
report = diff.report()
assert 'Files contain different numbers of HDUs' in report
assert 'a: 2\n b: 3' in report
assert 'No differences found between common HDUs' in report
def test_partially_identical_files2(self):
"""
Test files that have some identical HDUs but one different HDU.
"""
a = np.arange(100).reshape(10, 10)
phdu = PrimaryHDU(data=a)
ehdu = ImageHDU(data=a)
ehdu2 = ImageHDU(data=(a + 1))
hdula = HDUList([phdu, ehdu, ehdu])
hdulb = HDUList([phdu, ehdu2, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdu_count == ()
assert len(diff.diff_hdus) == 1
assert diff.diff_hdus[0][0] == 1
hdudiff = diff.diff_hdus[0][1]
assert not hdudiff.identical
assert hdudiff.diff_extnames == ()
assert hdudiff.diff_extvers == ()
assert hdudiff.diff_extension_types == ()
assert hdudiff.diff_headers.identical
assert hdudiff.diff_data is not None
datadiff = hdudiff.diff_data
assert isinstance(datadiff, ImageDataDiff)
assert not datadiff.identical
assert datadiff.diff_dimensions == ()
assert (datadiff.diff_pixels ==
[((0, y), (y, y + 1)) for y in range(10)])
assert datadiff.diff_ratio == 1.0
assert datadiff.diff_total == 100
report = diff.report()
# Primary HDU and 2nd extension HDU should have no differences
assert 'Primary HDU' not in report
assert 'Extension HDU 2' not in report
assert 'Extension HDU 1' in report
assert 'Headers contain differences' not in report
assert 'Data contains differences' in report
for y in range(10):
assert 'Data differs at [{}, 1]'.format(y + 1) in report
assert '100 different pixels found (100.00% different).' in report
def test_partially_identical_files3(self):
"""
Test files that have some identical HDUs but a different extension
name.
"""
phdu = PrimaryHDU()
ehdu = ImageHDU(name='FOO')
hdula = HDUList([phdu, ehdu])
ehdu = BinTableHDU(name='BAR')
ehdu.header['EXTVER'] = 2
ehdu.header['EXTLEVEL'] = 3
hdulb = HDUList([phdu, ehdu])
diff = FITSDiff(hdula, hdulb)
assert not diff.identical
assert diff.diff_hdus[0][0] == 1
hdu_diff = diff.diff_hdus[0][1]
assert hdu_diff.diff_extension_types == ('IMAGE', 'BINTABLE')
assert hdu_diff.diff_extnames == ('FOO', 'BAR')
assert hdu_diff.diff_extvers == (1, 2)
assert hdu_diff.diff_extlevels == (1, 3)
report = diff.report()
assert 'Extension types differ' in report
assert 'a: IMAGE\n b: BINTABLE' in report
assert 'Extension names differ' in report
assert 'a: FOO\n b: BAR' in report
assert 'Extension versions differ' in report
assert 'a: 1\n b: 2' in report
assert 'Extension levels differ' in report
assert 'a: 1\n b: 2' in report
def test_diff_nans(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/204
"""
# First test some arrays that should be equivalent....
arr = np.empty((10, 10), dtype=np.float64)
arr[:5] = 1.0
arr[5:] = np.nan
arr2 = arr.copy()
table = np.rec.array([(1.0, 2.0), (3.0, np.nan), (np.nan, np.nan)],
names=['cola', 'colb']).view(fits.FITS_rec)
table2 = table.copy()
assert ImageDataDiff(arr, arr2).identical
assert TableDataDiff(table, table2).identical
# Now let's introduce some differences, where there are nans and where
# there are not nans
arr2[0][0] = 2.0
arr2[5][0] = 2.0
table2[0][0] = 2.0
table2[1][1] = 2.0
diff = ImageDataDiff(arr, arr2)
assert not diff.identical
assert diff.diff_pixels[0] == ((0, 0), (1.0, 2.0))
assert diff.diff_pixels[1][0] == (5, 0)
assert np.isnan(diff.diff_pixels[1][1][0])
assert diff.diff_pixels[1][1][1] == 2.0
diff = TableDataDiff(table, table2)
assert not diff.identical
assert diff.diff_values[0] == (('cola', 0), (1.0, 2.0))
assert diff.diff_values[1][0] == ('colb', 1)
assert np.isnan(diff.diff_values[1][1][0])
assert diff.diff_values[1][1][1] == 2.0
def test_file_output_from_path_string(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
assert open(outpath).read() == report_as_string
def test_file_output_overwrite_safety(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with pytest.raises(OSError):
diffobj.report(fileobj=outpath)
def test_file_output_overwrite_success(self):
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
report_as_string = diffobj.report()
diffobj.report(fileobj=outpath, overwrite=True)
assert open(outpath).read() == report_as_string, (
"overwritten output file is not identical to report string")
def test_file_output_overwrite_vs_clobber(self):
"""Verify uses of clobber and overwrite."""
outpath = self.temp('diff_output.txt')
ha = Header([('A', 1), ('B', 2), ('C', 3)])
hb = ha.copy()
hb['C'] = 4
diffobj = HeaderDiff(ha, hb)
diffobj.report(fileobj=outpath)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
diffobj.report(fileobj=outpath, clobber=True)
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) == '"clobber" was '
'deprecated in version 2.0 and will be removed in a '
'future version. Use argument "overwrite" instead.')
|
16fcdd66b35bb7aa4d3e879d85cd4f67f4a21ad5e2719fff9ddbc87dd290bd5b | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
import os
from . import FitsTestCase
from ..convenience import writeto
from ..hdu import PrimaryHDU, hdulist
from .. import Header, ImageHDU, HDUList
from ..scripts import fitsdiff
from ....tests.helper import catch_warnings
from ....utils.exceptions import AstropyDeprecationWarning
from ....version import version
class TestFITSDiff_script(FitsTestCase):
def test_noargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main()
assert e.value.code == 2
def test_oneargargs(self):
with pytest.raises(SystemExit) as e:
fitsdiff.main(["file1"])
assert e.value.code == 2
def test_nodiff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
def test_onediff(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
def test_manydiff(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a + 1
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 9',
' b> 10',
' ...',
' 100 different pixels found (100.00% different).']
numdiff = fitsdiff.main(['-n', '1', tmp_a, tmp_b])
out, err = capsys.readouterr()
assert numdiff == 1
assert out.splitlines()[-4:] == [
' a> 0',
' b> 1',
' ...',
' 100 different pixels found (100.00% different).']
def test_outputfile(self):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 12
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(['-o', self.temp('diff.txt'), tmp_a, tmp_b])
assert numdiff == 1
with open(self.temp('diff.txt')) as f:
out = f.read()
assert out.splitlines()[-4:] == [
' Data differs at [1, 2]:',
' a> 10',
' b> 12',
' 1 different pixels found (1.00% different).']
def test_atol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-a", "1", tmp_a, tmp_b])
assert numdiff == 0
numdiff = fitsdiff.main(["--exact", "-a", "1", tmp_a, tmp_b])
assert numdiff == 1
def test_rtol(self):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-1", tmp_a, tmp_b])
assert numdiff == 0
def test_rtol_diff(self, capsys):
a = np.arange(100, dtype=float).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
b[1, 0] = 11
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-r", "1e-2", tmp_a, tmp_b])
assert numdiff == 1
out, err = capsys.readouterr()
assert out == """
fitsdiff: {}
a: {}
b: {}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
Primary HDU:\n\n Data contains differences:
Data differs at [1, 2]:
a> 10.0
? ^
b> 11.0
? ^
1 different pixels found (1.00% different).\n""".format(version, tmp_a, tmp_b)
assert err == ""
def test_fitsdiff_script_both_d_and_r(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
with catch_warnings(AstropyDeprecationWarning) as warning_lines:
fitsdiff.main(["-r", "1e-4", "-d", "1e-2", tmp_a, tmp_b])
# `rtol` is always ignored when `tolerance` is provided
assert warning_lines[0].category == AstropyDeprecationWarning
assert (str(warning_lines[0].message) ==
'"-d" ("--difference-tolerance") was deprecated in version 2.0 '
'and will be removed in a future version. '
'Use "-r" ("--relative-tolerance") instead.')
out, err = capsys.readouterr()
assert out == """
fitsdiff: {}
a: {}
b: {}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.01, Absolute tolerance: 0.0
No differences found.\n""".format(version, tmp_a, tmp_b)
def test_wildcard(self):
tmp1 = self.temp("tmp_file1")
with pytest.raises(SystemExit) as e:
fitsdiff.main([tmp1+"*", "ACME"])
assert e.value.code == 2
def test_not_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == """
fitsdiff: {}
a: {}
b: {}
Maximum number of different data values to be reported: 10
Relative tolerance: 0.0, Absolute tolerance: 0.0
No differences found.\n""".format(version, tmp_a, tmp_b)
assert err == ""
def test_quiet(self, capsys):
a = np.arange(100).reshape(10, 10)
hdu_a = PrimaryHDU(data=a)
b = a.copy()
hdu_b = PrimaryHDU(data=b)
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdu_a.writeto(tmp_a)
hdu_b.writeto(tmp_b)
numdiff = fitsdiff.main(["-q", tmp_a, tmp_b])
assert numdiff == 0
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_path(self, capsys):
os.mkdir(self.temp('sub/'))
tmp_b = self.temp('sub/ascii.fits')
tmp_g = self.temp('sub/group.fits')
tmp_h = self.data('group.fits')
with hdulist.fitsopen(tmp_h) as hdu_b:
hdu_b.writeto(tmp_g)
writeto(tmp_b, np.arange(100).reshape(10, 10))
# one modified file and a directory
assert fitsdiff.main(["-q", self.data_dir, tmp_b]) == 1
assert fitsdiff.main(["-q", tmp_b, self.data_dir]) == 1
# two directories
tmp_d = self.temp('sub/')
assert fitsdiff.main(["-q", self.data_dir, tmp_d]) == 1
assert fitsdiff.main(["-q", tmp_d, self.data_dir]) == 1
assert fitsdiff.main(["-q", self.data_dir, self.data_dir]) == 0
# no match
tmp_c = self.data('arange.fits')
fitsdiff.main([tmp_c, tmp_d])
out, err = capsys.readouterr()
assert "'arange.fits' has no match in" in err
# globbing
assert fitsdiff.main(["-q", self.data_dir+'/*.fits', self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir+'/g*.fits', tmp_d]) == 0
# one file and a directory
tmp_f = self.data('tb.fits')
assert fitsdiff.main(["-q", tmp_f, self.data_dir]) == 0
assert fitsdiff.main(["-q", self.data_dir, tmp_f]) == 0
def test_ignore_hdus(self):
a = np.arange(100).reshape(10, 10)
b = a.copy() + 1
ha = Header([('A', 1), ('B', 2), ('C', 3)])
phdu_a = PrimaryHDU(header=ha)
phdu_b = PrimaryHDU(header=ha)
ihdu_a = ImageHDU(data=a, name='SCI')
ihdu_b = ImageHDU(data=b, name='SCI')
hdulist_a = HDUList([phdu_a, ihdu_a])
hdulist_b = HDUList([phdu_b, ihdu_b])
tmp_a = self.temp('testa.fits')
tmp_b = self.temp('testb.fits')
hdulist_a.writeto(tmp_a)
hdulist_b.writeto(tmp_b)
numdiff = fitsdiff.main([tmp_a, tmp_b])
assert numdiff == 1
numdiff = fitsdiff.main([tmp_a, tmp_b, "-u", "SCI"])
assert numdiff == 0
|
68ecbbdf965acf35aa8202128c4ba5ac91e6e65090f5df08433607b07c0a9e89 | import os
import gc
import pathlib
import warnings
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ..column import _parse_tdisp_format, _fortran_to_python_format, \
python_to_tdisp
from .. import HDUList, PrimaryHDU, BinTableHDU
from ... import fits
from .... import units as u
from ....table import Table, QTable, NdarrayMixin, Column
from ....table.table_helpers import simple_table
from ....tests.helper import catch_warnings
from ....units.format.fits import UnitScaleError
from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation
from ....time import Time, TimeDelta
from ....units import allclose as quantity_allclose
from ....units.quantity import QuantityInfo
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
def equal_data(a, b):
for name in a.dtype.names:
if not np.all(a[name] == b[name]):
return False
return True
class TestSingleTable:
def setup_class(self):
self.data = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)])
def test_simple(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_pathlib(self, tmpdir):
filename = pathlib.Path(str(tmpdir.join('test_simple.fit')))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
def test_simple_meta(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['A'] = 1
t1.meta['B'] = 2.3
t1.meta['C'] = 'spam'
t1.meta['comments'] = ['this', 'is', 'a', 'long', 'comment']
t1.meta['HISTORY'] = ['first', 'second', 'third']
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert equal_data(t1, t2)
for key in t1.meta:
if isinstance(t1.meta, list):
for i in range(len(t1.meta[key])):
assert t1.meta[key][i] == t2.meta[key][i]
else:
assert t1.meta[key] == t2.meta[key]
def test_simple_meta_conflicting(self, tmpdir):
filename = str(tmpdir.join('test_simple.fits'))
t1 = Table(self.data)
t1.meta['ttype1'] = 'spam'
with catch_warnings() as l:
t1.write(filename, overwrite=True)
assert len(l) == 1
assert str(l[0].message).startswith(
'Meta-data keyword ttype1 will be ignored since it conflicts with a FITS reserved keyword')
def test_simple_noextension(self, tmpdir):
"""
Test that file type is recognized without extension
"""
filename = str(tmpdir.join('test_simple'))
t1 = Table(self.data)
t1.write(filename, overwrite=True, format='fits')
t2 = Table.read(filename)
assert equal_data(t1, t2)
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_units(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_units.fits'))
t1 = table_type(self.data)
t1['a'].unit = u.m
t1['c'].unit = u.km / u.s
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].unit == u.m
assert t2['c'].unit == u.km / u.s
@pytest.mark.parametrize('table_type', (Table, QTable))
def test_with_format(self, table_type, tmpdir):
filename = str(tmpdir.join('test_with_format.fits'))
t1 = table_type(self.data)
t1['a'].format = '{:5d}'
t1['b'].format = '{:>20}'
t1['c'].format = '{:6.2f}'
t1.write(filename, overwrite=True)
t2 = table_type.read(filename)
assert equal_data(t1, t2)
assert t2['a'].format == '{:5d}'
assert t2['b'].format == '{:>20}'
assert t2['c'].format == '{:6.2f}'
def test_masked(self, tmpdir):
filename = str(tmpdir.join('test_masked.fits'))
t1 = Table(self.data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.mask['c'] = [0, 1, 1, 0]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2.masked
assert equal_data(t1, t2)
assert np.all(t1['a'].mask == t2['a'].mask)
# Disabled for now, as there is no obvious way to handle masking of
# non-integer columns in FITS
# TODO: Re-enable these tests if some workaround for this can be found
# assert np.all(t1['b'].mask == t2['b'].mask)
# assert np.all(t1['c'].mask == t2['c'].mask)
def test_masked_nan(self, tmpdir):
filename = str(tmpdir.join('test_masked_nan.fits'))
data = np.array(list(zip([5.2, 8.4, 3.9, 6.3],
[2.3, 4.5, 6.7, 8.9])),
dtype=[(str('a'), np.float64), (str('b'), np.float32)])
t1 = Table(data, masked=True)
t1.mask['a'] = [1, 0, 1, 0]
t1.mask['b'] = [1, 0, 0, 1]
t1.write(filename, overwrite=True)
t2 = Table.read(filename)
np.testing.assert_array_almost_equal(t2['a'], [np.nan, 8.4, np.nan, 6.3])
np.testing.assert_array_almost_equal(t2['b'], [np.nan, 4.5, 6.7, np.nan])
# assert t2.masked
# t2.masked = false currently, as the only way to determine whether a table is masked
# while reading is to check whether col.null is present. For float columns, col.null
# is not initialized
def test_read_from_fileobj(self, tmpdir):
filename = str(tmpdir.join('test_read_from_fileobj.fits'))
hdu = BinTableHDU(self.data)
hdu.writeto(filename, overwrite=True)
with open(filename, 'rb') as f:
t = Table.read(f)
assert equal_data(t, self.data)
def test_read_with_nonstandard_units(self):
hdu = BinTableHDU(self.data)
hdu.columns[0].unit = 'RADIANS'
hdu.columns[1].unit = 'spam'
hdu.columns[2].unit = 'millieggs'
t = Table.read(hdu)
assert equal_data(t, self.data)
def test_memmap(self, tmpdir):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, memmap=False)
t3 = Table.read(filename, memmap=True)
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
@pytest.mark.parametrize('memmap', (False, True))
def test_character_as_bytes(self, tmpdir, memmap):
filename = str(tmpdir.join('test_simple.fts'))
t1 = Table(self.data)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)
t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)
assert t2['b'].dtype.kind == 'U'
assert t3['b'].dtype.kind == 'S'
assert equal_data(t2, t3)
# To avoid issues with --open-files, we need to remove references to
# data that uses memory mapping and force the garbage collection
del t1, t2, t3
gc.collect()
class TestMultipleHDU:
def setup_class(self):
self.data1 = np.array(list(zip([1, 2, 3, 4],
['a', 'b', 'c', 'd'],
[2.3, 4.5, 6.7, 8.9])),
dtype=[(str('a'), int), (str('b'), str('U1')), (str('c'), float)])
self.data2 = np.array(list(zip([1.4, 2.3, 3.2, 4.7],
[2.3, 4.5, 6.7, 8.9])),
dtype=[(str('p'), float), (str('q'), float)])
hdu1 = PrimaryHDU()
hdu2 = BinTableHDU(self.data1, name='first')
hdu3 = BinTableHDU(self.data2, name='second')
self.hdus = HDUList([hdu1, hdu2, hdu3])
def teardown_class(self):
del self.hdus
def setup_method(self, method):
warnings.filterwarnings('always')
def test_read(self, tmpdir):
filename = str(tmpdir.join('test_read.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename)
assert len(l) == 1
assert str(l[0].message).startswith(
'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')
assert equal_data(t, self.data1)
def test_read_with_hdu_0(self, tmpdir):
filename = str(tmpdir.join('test_read_with_hdu_0.fits'))
self.hdus.writeto(filename)
with pytest.raises(ValueError) as exc:
Table.read(filename, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_with_hdu_1(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_1.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_with_hdu_2(self, tmpdir, hdu):
filename = str(tmpdir.join('test_read_with_hdu_2.fits'))
self.hdus.writeto(filename)
with catch_warnings() as l:
t = Table.read(filename, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data2)
def test_read_from_hdulist(self):
with catch_warnings() as l:
t = Table.read(self.hdus)
assert len(l) == 1
assert str(l[0].message).startswith(
'hdu= was not specified but multiple tables are present, reading in first available table (hdu=1)')
assert equal_data(t, self.data1)
def test_read_from_hdulist_with_hdu_0(self, tmpdir):
with pytest.raises(ValueError) as exc:
Table.read(self.hdus, hdu=0)
assert exc.value.args[0] == 'No table found in hdu=0'
@pytest.mark.parametrize('hdu', [1, 'first'])
def test_read_from_hdulist_with_hdu_1(self, tmpdir, hdu):
with catch_warnings() as l:
t = Table.read(self.hdus, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data1)
@pytest.mark.parametrize('hdu', [2, 'second'])
def test_read_from_hdulist_with_hdu_2(self, tmpdir, hdu):
with catch_warnings() as l:
t = Table.read(self.hdus, hdu=hdu)
assert len(l) == 0
assert equal_data(t, self.data2)
def test_read_from_single_hdu(self):
with catch_warnings() as l:
t = Table.read(self.hdus[1])
assert len(l) == 0
assert equal_data(t, self.data1)
def test_masking_regression_1795():
"""
Regression test for #1795 - this bug originally caused columns where TNULL
was not defined to have their first element masked.
"""
t = Table.read(os.path.join(DATA, 'tb.fits'))
assert np.all(t['c1'].mask == np.array([False, False]))
assert np.all(t['c2'].mask == np.array([False, False]))
assert np.all(t['c3'].mask == np.array([False, False]))
assert np.all(t['c4'].mask == np.array([False, False]))
assert np.all(t['c1'].data == np.array([1, 2]))
assert np.all(t['c2'].data == np.array([b'abc', b'xy ']))
assert_allclose(t['c3'].data, np.array([3.70000007153, 6.6999997139]))
assert np.all(t['c4'].data == np.array([False, True]))
def test_scale_error():
a = [1, 4, 5]
b = [2.0, 5.0, 8.2]
c = ['x', 'y', 'z']
t = Table([a, b, c], names=('a', 'b', 'c'), meta={'name': 'first table'})
t['a'].unit = '1.2'
with pytest.raises(UnitScaleError) as exc:
t.write('t.fits', format='fits', overwrite=True)
assert exc.value.args[0] == "The column 'a' could not be stored in FITS format because it has a scale '(1.2)' that is not recognized by the FITS standard. Either scale the data or change the units."
@pytest.mark.parametrize('tdisp_str, format_return',
[('EN10.5', ('EN', '10', '5', None)),
('F6.2', ('F', '6', '2', None)),
('B5.10', ('B', '5', '10', None)),
('E10.5E3', ('E', '10', '5', '3')),
('A21', ('A', '21', None, None))])
def test_parse_tdisp_format(tdisp_str, format_return):
assert _parse_tdisp_format(tdisp_str) == format_return
@pytest.mark.parametrize('tdisp_str, format_str_return',
[('G15.4E2', '{:15.4g}'),
('Z5.10', '{:5x}'),
('I6.5', '{:6d}'),
('L8', '{:>8}'),
('E20.7', '{:20.7e}')])
def test_fortran_to_python_format(tdisp_str, format_str_return):
assert _fortran_to_python_format(tdisp_str) == format_str_return
@pytest.mark.parametrize('fmt_str, tdisp_str',
[('{:3d}', 'I3'),
('3d', 'I3'),
('7.3f', 'F7.3'),
('{:>4}', 'A4'),
('{:7.4f}', 'F7.4'),
('%5.3g', 'G5.3'),
('%10s', 'A10'),
('%.4f', 'F13.4')])
def test_python_to_tdisp(fmt_str, tdisp_str):
assert python_to_tdisp(fmt_str) == tdisp_str
def test_logical_python_to_tdisp():
assert python_to_tdisp('{:>7}', logical_dtype=True) == 'L7'
def test_bool_column(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/1953
Ensures that Table columns of bools are properly written to a FITS table.
"""
arr = np.ones(5, dtype=bool)
arr[::2] == np.False_
t = Table([arr])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert hdul[1].data['col0'].dtype == np.dtype('bool')
assert np.all(hdul[1].data['col0'] == arr)
def test_unicode_column(tmpdir):
"""
Test that a column of unicode strings is still written as one
byte-per-character in the FITS table (so long as the column can be ASCII
encoded).
Regression test for one of the issues fixed in
https://github.com/astropy/astropy/pull/4228
"""
t = Table([np.array([u'a', u'b', u'cd'])])
t.write(str(tmpdir.join('test.fits')), overwrite=True)
with fits.open(str(tmpdir.join('test.fits'))) as hdul:
assert np.all(hdul[1].data['col0'] == ['a', 'b', 'cd'])
assert hdul[1].header['TFORM1'] == '2A'
t2 = Table([np.array([u'\N{SNOWMAN}'])])
with pytest.raises(UnicodeEncodeError):
t2.write(str(tmpdir.join('test.fits')), overwrite=True)
def test_unit_warnings_read_write(tmpdir):
filename = str(tmpdir.join('test_unit.fits'))
t1 = Table([[1, 2], [3, 4]], names=['a', 'b'])
t1['a'].unit = 'm/s'
t1['b'].unit = 'not-a-unit'
with catch_warnings() as l:
t1.write(filename, overwrite=True)
assert len(l) == 1
assert str(l[0].message).startswith("'not-a-unit' did not parse as fits unit")
with catch_warnings() as l:
Table.read(filename, hdu=1)
assert len(l) == 0
def test_convert_comment_convention(tmpdir):
"""
Regression test for https://github.com/astropy/astropy/issues/6079
"""
filename = os.path.join(DATA, 'stddata.fits')
t = Table.read(filename)
assert t.meta['comments'] == [
'',
' *** End of mandatory fields ***',
'',
'',
' *** Column names ***',
'',
'',
' *** Column formats ***',
''
]
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
assert np.all(a1 == a2)
# Testing FITS table read/write with mixins. This is mostly
# copied from ECSV mixin testing.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scc = sc.copy()
scc.representation = 'cartesian'
tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)
mixin_cols = {
'tm': tm,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scc': scc,
'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
'q': [1, 2] * u.m,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': Angle([1, 2] * u.deg),
'el2': el2,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation', 'frame.name'],
'scc': ['x', 'y', 'z', 'representation', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'],
'q': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el2': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
}
@pytest.mark.skipif('not HAS_YAML')
def test_fits_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='fits')
t2 = Table.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.fits'))
names = sorted(mixin_cols)
serialized_names = ['ang',
'dt.jd1', 'dt.jd2',
'el2.x', 'el2.y', 'el2.z',
'lat',
'lon',
'q',
'sc.ra', 'sc.dec',
'scc.x', 'scc.y', 'scc.z',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2',
'tm', # serialize_method is formatted_value
]
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['HISTORY'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via fits and confirm column names
hdus = fits.open(filename)
assert hdus[1].columns.names == serialized_names
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_fits_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.fits'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my \n\n\n description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
t.write(filename, format="fits")
t2 = table_cls.read(filename, format='fits', astropy_native=True)
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.skipif('HAS_YAML')
def test_warn_for_dropped_info_attributes(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table([[1, 2]])
t['col0'].info.description = 'hello'
with catch_warnings() as warns:
t.write(filename, overwrite=True)
assert len(warns) == 1
assert str(warns[0].message).startswith(
"table contains column(s) with defined 'format'")
@pytest.mark.skipif('HAS_YAML')
def test_error_for_mixins_but_no_yaml(tmpdir):
filename = str(tmpdir.join('test.fits'))
t = Table([mixin_cols['sc']])
with pytest.raises(TypeError) as err:
t.write(filename)
assert "cannot write type SkyCoord column 'col0' to FITS without PyYAML" in str(err)
@pytest.mark.skipif('not HAS_YAML')
def test_info_attributes_with_no_mixins(tmpdir):
"""Even if there are no mixin columns, if there is metadata that would be lost it still
gets serialized
"""
filename = str(tmpdir.join('test.fits'))
t = Table([[1.0, 2.0]])
t['col0'].description = 'hello' * 40
t['col0'].format = '{:8.4f}'
t['col0'].meta['a'] = {'b': 'c'}
t.write(filename, overwrite=True)
t2 = Table.read(filename)
assert t2['col0'].description == 'hello' * 40
assert t2['col0'].format == '{:8.4f}'
assert t2['col0'].meta['a'] == {'b': 'c'}
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('method', ['set_cols', 'names', 'class'])
def test_round_trip_masked_table_serialize_mask(tmpdir, method):
"""
Same as previous test but set the serialize_method to 'data_mask' so mask is
written out and the behavior is all correct.
"""
filename = str(tmpdir.join('test.fits'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
if method == 'set_cols':
for col in t.itercols():
col.info.serialize_method['fits'] = 'data_mask'
t.write(filename)
elif method == 'names':
t.write(filename, serialize_method={'a': 'data_mask', 'b': 'data_mask',
'c': 'data_mask'})
elif method == 'class':
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is True
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
|
256ce6d504fe72357aaebc2928b0d9c7ba07ce65647ba8a27dba000f1ced3327 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import gzip
import bz2
import io
import mmap
import os
import pathlib
import warnings
import zipfile
import pytest
import numpy as np
from . import FitsTestCase
from ..convenience import _getext
from ..diff import FITSDiff
from ..file import _File, GZIP_MAGIC
from ....io import fits
from ....tests.helper import raises, catch_warnings, ignore_warnings
from ....utils.data import conf, get_pkg_data_filename
from ....utils import data
class TestCore(FitsTestCase):
def test_with_statement(self):
with fits.open(self.data('ascii.fits')) as f:
pass
@raises(OSError)
def test_missing_file(self):
fits.open(self.temp('does-not-exist.fits'))
def test_filename_is_bytes_object(self):
with pytest.raises(TypeError):
fits.open(self.data('ascii.fits').encode())
def test_naxisj_check(self):
hdulist = fits.open(self.data('o4sp040b0_raw.fits'))
hdulist[1].header['NAXIS3'] = 500
assert 'NAXIS3' in hdulist[1].header
hdulist.verify('silentfix')
assert 'NAXIS3' not in hdulist[1].header
def test_byteswap(self):
p = fits.PrimaryHDU()
l = fits.HDUList()
n = np.zeros(3, dtype='i2')
n[0] = 1
n[1] = 60000
n[2] = 2
c = fits.Column(name='foo', format='i2', bscale=1, bzero=32768,
array=n)
t = fits.BinTableHDU.from_columns([c])
l.append(p)
l.append(t)
l.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as p:
assert p[1].data[1]['foo'] == 60000.0
def test_fits_file_path_object(self):
"""
Testing when fits file is passed as pathlib.Path object #4412.
"""
fpath = pathlib.Path(get_pkg_data_filename('data/tdim.fits'))
hdulist = fits.open(fpath)
assert hdulist[0].filebytes() == 2880
assert hdulist[1].filebytes() == 5760
hdulist2 = fits.open(self.data('tdim.fits'))
assert FITSDiff(hdulist2, hdulist).identical is True
def test_add_del_columns(self):
p = fits.ColDefs([])
p.add_col(fits.Column(name='FOO', format='3J'))
p.add_col(fits.Column(name='BAR', format='1I'))
assert p.names == ['FOO', 'BAR']
p.del_col('FOO')
assert p.names == ['BAR']
def test_add_del_columns2(self):
hdulist = fits.open(self.data('tb.fits'))
table = hdulist[1]
assert table.data.dtype.names == ('c1', 'c2', 'c3', 'c4')
assert table.columns.names == ['c1', 'c2', 'c3', 'c4']
table.columns.del_col(str('c1'))
assert table.data.dtype.names == ('c2', 'c3', 'c4')
assert table.columns.names == ['c2', 'c3', 'c4']
table.columns.del_col(str('c3'))
assert table.data.dtype.names == ('c2', 'c4')
assert table.columns.names == ['c2', 'c4']
table.columns.add_col(fits.Column(str('foo'), str('3J')))
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
hdulist.writeto(self.temp('test.fits'), overwrite=True)
with ignore_warnings():
# TODO: The warning raised by this test is actually indication of a
# bug and should *not* be ignored. But as it is a known issue we
# hide it for now. See
# https://github.com/spacetelescope/PyFITS/issues/44
with fits.open(self.temp('test.fits')) as hdulist:
table = hdulist[1]
assert table.data.dtype.names == ('c2', 'c4', 'foo')
assert table.columns.names == ['c2', 'c4', 'foo']
def test_update_header_card(self):
"""A very basic test for the Header.update method--I'd like to add a
few more cases to this at some point.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
header['BITPIX'] = (16, comment)
assert 'BITPIX' in header
assert header['BITPIX'] == 16
assert header.comments['BITPIX'] == comment
header.update(BITPIX=32)
assert header['BITPIX'] == 32
assert header.comments['BITPIX'] == ''
def test_set_card_value(self):
"""Similar to test_update_header_card(), but tests the the
`header['FOO'] = 'bar'` method of updating card values.
"""
header = fits.Header()
comment = 'number of bits per data pixel'
card = fits.Card.fromstring('BITPIX = 32 / {}'.format(comment))
header.append(card)
header['BITPIX'] = 32
assert 'BITPIX' in header
assert header['BITPIX'] == 32
assert header.cards[0].keyword == 'BITPIX'
assert header.cards[0].value == 32
assert header.cards[0].comment == comment
def test_uint(self):
hdulist_f = fits.open(self.data('o4sp040b0_raw.fits'), uint=False)
hdulist_i = fits.open(self.data('o4sp040b0_raw.fits'), uint=True)
assert hdulist_f[1].data.dtype == np.float32
assert hdulist_i[1].data.dtype == np.uint16
assert np.all(hdulist_f[1].data == hdulist_i[1].data)
def test_fix_missing_card_append(self):
hdu = fits.ImageHDU()
errs = hdu.req_cards('TESTKW', None, None, 'foo', 'silentfix', [])
assert len(errs) == 1
assert 'TESTKW' in hdu.header
assert hdu.header['TESTKW'] == 'foo'
assert hdu.header.cards[-1].keyword == 'TESTKW'
def test_fix_invalid_keyword_value(self):
hdu = fits.ImageHDU()
hdu.header['TESTKW'] = 'foo'
errs = hdu.req_cards('TESTKW', None,
lambda v: v == 'foo', 'foo', 'ignore', [])
assert len(errs) == 0
# Now try a test that will fail, and ensure that an error will be
# raised in 'exception' mode
errs = hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar',
'exception', [])
assert len(errs) == 1
assert errs[0][1] == "'TESTKW' card has invalid value 'foo'."
# See if fixing will work
hdu.req_cards('TESTKW', None, lambda v: v == 'bar', 'bar', 'silentfix',
[])
assert hdu.header['TESTKW'] == 'bar'
@raises(fits.VerifyError)
def test_unfixable_missing_card(self):
class TestHDU(fits.hdu.base.NonstandardExtHDU):
def _verify(self, option='warn'):
errs = super()._verify(option)
hdu.req_cards('TESTKW', None, None, None, 'fix', errs)
return errs
@classmethod
def match_header(cls, header):
# Since creating this HDU class adds it to the registry we
# don't want the file reader to possibly think any actual
# HDU from a file should be handled by this class
return False
hdu = TestHDU(header=fits.Header())
hdu.verify('fix')
@raises(fits.VerifyError)
def test_exception_on_verification_error(self):
hdu = fits.ImageHDU()
del hdu.header['XTENSION']
hdu.verify('exception')
def test_ignore_verification_error(self):
hdu = fits.ImageHDU()
# The default here would be to issue a warning; ensure that no warnings
# or exceptions are raised
with catch_warnings():
warnings.simplefilter('error')
del hdu.header['NAXIS']
try:
hdu.verify('ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: {}'.format(exc))
# Make sure the error wasn't fixed either, silently or otherwise
assert 'NAXIS' not in hdu.header
@raises(ValueError)
def test_unrecognized_verify_option(self):
hdu = fits.ImageHDU()
hdu.verify('foobarbaz')
def test_errlist_basic(self):
# Just some tests to make sure that _ErrList is setup correctly.
# No arguments
error_list = fits.verify._ErrList()
assert error_list == []
# Some contents - this is not actually working, it just makes sure they
# are kept.
error_list = fits.verify._ErrList([1, 2, 3])
assert error_list == [1, 2, 3]
def test_combined_verify_options(self):
"""
Test verify options like fix+ignore.
"""
def make_invalid_hdu():
hdu = fits.ImageHDU()
# Add one keyword to the header that contains a fixable defect, and one
# with an unfixable defect
c1 = fits.Card.fromstring("test = ' test'")
c2 = fits.Card.fromstring("P.I. = ' Hubble'")
hdu.header.append(c1)
hdu.header.append(c2)
return hdu
# silentfix+ignore should be completely silent
hdu = make_invalid_hdu()
with catch_warnings():
warnings.simplefilter('error')
try:
hdu.verify('silentfix+ignore')
except Exception as exc:
self.fail('An exception occurred when the verification error '
'should have been ignored: {}'.format(exc))
# silentfix+warn should be quiet about the fixed HDU and only warn
# about the unfixable one
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('silentfix+warn')
assert len(w) == 4
assert 'Illegal keyword name' in str(w[2].message)
# silentfix+exception should only mention the unfixable error in the
# exception
hdu = make_invalid_hdu()
try:
hdu.verify('silentfix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' not in str(exc)
else:
self.fail('An exception should have been raised.')
# fix+ignore is not too useful, but it should warn about the fixed
# problems while saying nothing about the unfixable problems
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('fix+ignore')
assert len(w) == 4
assert 'not upper case' in str(w[2].message)
# fix+warn
hdu = make_invalid_hdu()
with catch_warnings() as w:
hdu.verify('fix+warn')
assert len(w) == 6
assert 'not upper case' in str(w[2].message)
assert 'Illegal keyword name' in str(w[4].message)
# fix+exception
hdu = make_invalid_hdu()
try:
hdu.verify('fix+exception')
except fits.VerifyError as exc:
assert 'Illegal keyword name' in str(exc)
assert 'not upper case' in str(exc)
else:
self.fail('An exception should have been raised.')
def test_getext(self):
"""
Test the various different ways of specifying an extension header in
the convenience functions.
"""
hl, ext = _getext(self.data('test0.fits'), 'readonly', 1)
assert ext == 1
pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
1, 2)
pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
(1, 2))
pytest.raises(ValueError, _getext, self.data('test0.fits'), 'readonly',
'sci', 'sci')
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, 2, 3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=1)
assert ext == 1
hl, ext = _getext(self.data('test0.fits'), 'readonly', ext=('sci', 2))
assert ext == ('sci', 2)
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
1, ext=('sci', 2), extver=3)
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
ext=('sci', 2), extver=3)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci', 1)
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', ('sci', 1))
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', 'sci',
extver=1, do_not_scale_image_data=True)
assert ext == ('sci', 1)
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', ext=1)
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
'sci', 1, extver=2)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci')
assert ext == ('sci', 1)
hl, ext = _getext(self.data('test0.fits'), 'readonly', extname='sci',
extver=1)
assert ext == ('sci', 1)
pytest.raises(TypeError, _getext, self.data('test0.fits'), 'readonly',
extver=1)
def test_extension_name_case_sensitive(self):
"""
Tests that setting fits.conf.extension_name_case_sensitive at
runtime works.
"""
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
with fits.conf.set_temp('extension_name_case_sensitive', True):
hdu = fits.ImageHDU()
hdu.name = 'sCi'
assert hdu.name == 'sCi'
assert hdu.header['EXTNAME'] == 'sCi'
hdu.name = 'sCi'
assert hdu.name == 'SCI'
assert hdu.header['EXTNAME'] == 'SCI'
def test_hdu_fromstring(self):
"""
Tests creating a fully-formed HDU object from a string containing the
bytes of the HDU.
"""
dat = open(self.data('test0.fits'), 'rb').read()
offset = 0
with fits.open(self.data('test0.fits')) as hdul:
hdulen = hdul[0]._data_offset + hdul[0]._data_size
hdu = fits.PrimaryHDU.fromstring(dat[:hdulen])
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header == hdu.header
assert hdu.data is None
hdu.header['TEST'] = 'TEST'
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdu, fits.PrimaryHDU)
assert hdul[0].header[:-1] == hdu.header[:-1]
assert hdul[0].header['TEST'] == 'TEST'
assert hdu.data is None
with fits.open(self.data('test0.fits'))as hdul:
for ext_hdu in hdul[1:]:
offset += hdulen
hdulen = len(str(ext_hdu.header)) + ext_hdu._data_size
hdu = fits.ImageHDU.fromstring(dat[offset:offset + hdulen])
assert isinstance(hdu, fits.ImageHDU)
assert ext_hdu.header == hdu.header
assert (ext_hdu.data == hdu.data).all()
def test_nonstandard_hdu(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/157
Tests that "Nonstandard" HDUs with SIMPLE = F are read and written
without prepending a superfluous and unwanted standard primary HDU.
"""
data = np.arange(100, dtype=np.uint8)
hdu = fits.PrimaryHDU(data=data)
hdu.header['SIMPLE'] = False
hdu.writeto(self.temp('test.fits'))
info = [(0, '', 1, 'NonstandardHDU', 5, (), '', '')]
with fits.open(self.temp('test.fits')) as hdul:
assert hdul.info(output=False) == info
# NonstandardHDUs just treat the data as an unspecified array of
# bytes. The first 100 bytes should match the original data we
# passed in...the rest should be zeros padding out the rest of the
# FITS block
assert (hdul[0].data[:100] == data).all()
assert (hdul[0].data[100:] == 0).all()
def test_extname(self):
"""Test getting/setting the EXTNAME of an HDU."""
h1 = fits.PrimaryHDU()
assert h1.name == 'PRIMARY'
# Normally a PRIMARY HDU should not have an EXTNAME, though it should
# have a default .name attribute
assert 'EXTNAME' not in h1.header
# The current version of the FITS standard does allow PRIMARY HDUs to
# have an EXTNAME, however.
h1.name = 'NOTREAL'
assert h1.name == 'NOTREAL'
assert h1.header.get('EXTNAME') == 'NOTREAL'
# Updating the EXTNAME in the header should update the .name
h1.header['EXTNAME'] = 'TOOREAL'
assert h1.name == 'TOOREAL'
# If we delete an EXTNAME keyword from a PRIMARY HDU it should go back
# to the default
del h1.header['EXTNAME']
assert h1.name == 'PRIMARY'
# For extension HDUs the situation is a bit simpler:
h2 = fits.ImageHDU()
assert h2.name == ''
assert 'EXTNAME' not in h2.header
h2.name = 'HELLO'
assert h2.name == 'HELLO'
assert h2.header.get('EXTNAME') == 'HELLO'
h2.header['EXTNAME'] = 'GOODBYE'
assert h2.name == 'GOODBYE'
def test_extver_extlevel(self):
"""Test getting/setting the EXTVER and EXTLEVEL of and HDU."""
# EXTVER and EXTNAME work exactly the same; their semantics are, for
# now, to be inferred by the user. Although they should never be less
# than 1, the standard does not explicitly forbid any value so long as
# it's an integer
h1 = fits.PrimaryHDU()
assert h1.ver == 1
assert h1.level == 1
assert 'EXTVER' not in h1.header
assert 'EXTLEVEL' not in h1.header
h1.ver = 2
assert h1.header.get('EXTVER') == 2
h1.header['EXTVER'] = 3
assert h1.ver == 3
del h1.header['EXTVER']
h1.ver == 1
h1.level = 2
assert h1.header.get('EXTLEVEL') == 2
h1.header['EXTLEVEL'] = 3
assert h1.level == 3
del h1.header['EXTLEVEL']
assert h1.level == 1
pytest.raises(TypeError, setattr, h1, 'ver', 'FOO')
pytest.raises(TypeError, setattr, h1, 'level', 'BAR')
def test_consecutive_writeto(self):
"""
Regression test for an issue where calling writeto twice on the same
HDUList could write a corrupted file.
https://github.com/spacetelescope/PyFITS/issues/40 is actually a
particular instance of this problem, though isn't unique to sys.stdout.
"""
with fits.open(self.data('test0.fits')) as hdul1:
# Add a bunch of header keywords so that the data will be forced to
# new offsets within the file:
for idx in range(40):
hdul1[1].header['TEST{}'.format(idx)] = 'test'
hdul1.writeto(self.temp('test1.fits'))
hdul1.writeto(self.temp('test2.fits'))
# Open a second handle to the original file and compare it to hdul1
# (We only compare part of the one header that was modified)
# Compare also with the second writeto output
with fits.open(self.data('test0.fits')) as hdul2:
with fits.open(self.temp('test2.fits')) as hdul3:
for hdul in (hdul1, hdul3):
for idx, hdus in enumerate(zip(hdul2, hdul)):
hdu2, hdu = hdus
if idx != 1:
assert hdu.header == hdu2.header
else:
assert (hdu2.header ==
hdu.header[:len(hdu2.header)])
assert np.all(hdu.data == hdu2.data)
class TestConvenienceFunctions(FitsTestCase):
def test_writeto(self):
"""
Simple test for writing a trivial header and some data to a file
with the `writeto()` convenience function.
"""
data = np.zeros((100, 100))
header = fits.Header()
fits.writeto(self.temp('array.fits'), data, header=header,
overwrite=True)
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
def test_writeto_2(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/107
Test of `writeto()` with a trivial header containing a single keyword.
"""
data = np.zeros((100, 100))
header = fits.Header()
header.set('CRPIX1', 1.)
fits.writeto(self.temp('array.fits'), data, header=header,
overwrite=True, output_verify='silentfix')
hdul = fits.open(self.temp('array.fits'))
assert len(hdul) == 1
assert (data == hdul[0].data).all()
assert 'CRPIX1' in hdul[0].header
assert hdul[0].header['CRPIX1'] == 1.0
class TestFileFunctions(FitsTestCase):
"""
Tests various basic I/O operations, specifically in the
astropy.io.fits.file._File class.
"""
def test_open_nonexistent(self):
"""Test that trying to open a non-existent file results in an
OSError (and not some other arbitrary exception).
"""
try:
fits.open(self.temp('foobar.fits'))
except OSError as e:
assert 'No such file or directory' in str(e)
# But opening in ostream or append mode should be okay, since they
# allow writing new files
for mode in ('ostream', 'append'):
with fits.open(self.temp('foobar.fits'), mode=mode) as h:
pass
assert os.path.exists(self.temp('foobar.fits'))
os.remove(self.temp('foobar.fits'))
def test_open_file_handle(self):
# Make sure we can open a FITS file from an open file handle
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as fitsfile:
pass
with open(self.temp('temp.fits'), 'wb') as handle:
with fits.open(handle, mode='ostream') as fitsfile:
pass
# Opening without explicitly specifying binary mode should fail
with pytest.raises(ValueError):
with open(self.data('test0.fits')) as handle:
with fits.open(handle) as fitsfile:
pass
# All of these read modes should fail
for mode in ['r', 'rt']:
with pytest.raises(ValueError):
with open(self.data('test0.fits'), mode=mode) as handle:
with fits.open(handle) as fitsfile:
pass
# These update or write modes should fail as well
for mode in ['w', 'wt', 'w+', 'wt+', 'r+', 'rt+',
'a', 'at', 'a+', 'at+']:
with pytest.raises(ValueError):
with open(self.temp('temp.fits'), mode=mode) as handle:
with fits.open(handle) as fitsfile:
pass
def test_fits_file_handle_mode_combo(self):
# This should work fine since no mode is given
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle) as fitsfile:
pass
# This should work fine since the modes are compatible
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='readonly') as fitsfile:
pass
# This should not work since the modes conflict
with pytest.raises(ValueError):
with open(self.data('test0.fits'), 'rb') as handle:
with fits.open(handle, mode='ostream') as fitsfile:
pass
def test_open_from_url(self):
import urllib.request
file_url = "file:///" + self.data('test0.fits')
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj) as fits_handle:
pass
# It will not be possible to write to a file that is from a URL object
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(file_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
pass
@pytest.mark.remote_data(source='astropy')
def test_open_from_remote_url(self):
import urllib.request
for dataurl in (conf.dataurl, conf.dataurl_mirror):
remote_url = '{}/{}'.format(dataurl, 'allsky/allsky_rosat.fits')
try:
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj) as fits_handle:
assert len(fits_handle) == 1
for mode in ('ostream', 'append', 'update'):
with pytest.raises(ValueError):
with urllib.request.urlopen(remote_url) as urlobj:
with fits.open(urlobj, mode=mode) as fits_handle:
assert len(fits_handle) == 1
except (urllib.error.HTTPError, urllib.error.URLError):
continue
else:
break
else:
raise Exception("Could not download file")
def test_open_gzipped(self):
gzip_file = self._make_gzip_file()
with ignore_warnings():
with fits.open(gzip_file) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
with fits.open(gzip.GzipFile(gzip_file)) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_open_gzipped_from_handle(self):
with open(self._make_gzip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'gzip'
def test_detect_gzipped(self):
"""Test detection of a gzip file when the extension is not .gz."""
with ignore_warnings():
with fits.open(self._make_gzip_file('test0.fz')) as fits_handle:
assert fits_handle._file.compression == 'gzip'
assert len(fits_handle) == 5
def test_writeto_append_mode_gzip(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/33
Check that a new GzipFile opened in append mode can be used to write
out a new FITS file.
"""
# Note: when opening a GzipFile the 'b+' is superfluous, but this was
# still how the original test case looked
# Note: with statement not supported on GzipFile in older Python
# versions
fileobj = gzip.GzipFile(self.temp('test.fits.gz'), 'ab+')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.gz')) as hdul:
assert hdul[0].header == h.header
def test_fits_update_mode_gzip(self):
"""Test updating a GZipped FITS file"""
with fits.open(self._make_gzip_file('update.gz'), mode='update') as fits_handle:
hdu = fits.ImageHDU(data=[x for x in range(100)])
fits_handle.append(hdu)
with fits.open(self.temp('update.gz')) as new_handle:
assert len(new_handle) == 6
assert (new_handle[-1].data == [x for x in range(100)]).all()
def test_fits_append_mode_gzip(self):
"""Make sure that attempting to open an existing GZipped FITS file in
'append' mode raises an error"""
with pytest.raises(OSError):
with fits.open(self._make_gzip_file('append.gz'), mode='append') as fits_handle:
pass
def test_open_bzipped(self):
bzip_file = self._make_bzip2_file()
with ignore_warnings():
with fits.open(bzip_file) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
with fits.open(bz2.BZ2File(bzip_file)) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_open_bzipped_from_handle(self):
with open(self._make_bzip2_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_detect_bzipped(self):
"""Test detection of a bzip2 file when the extension is not .bz2."""
with ignore_warnings():
with fits.open(self._make_bzip2_file('test0.xx')) as fits_handle:
assert fits_handle._file.compression == 'bzip2'
assert len(fits_handle) == 5
def test_writeto_bzip2_fileobj(self):
"""Test writing to a bz2.BZ2File file like object"""
fileobj = bz2.BZ2File(self.temp('test.fits.bz2'), 'w')
h = fits.PrimaryHDU()
try:
h.writeto(fileobj)
finally:
fileobj.close()
with fits.open(self.temp('test.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_writeto_bzip2_filename(self):
"""Test writing to a bzip2 file by name"""
filename = self.temp('testname.fits.bz2')
h = fits.PrimaryHDU()
h.writeto(filename)
with fits.open(self.temp('testname.fits.bz2')) as hdul:
assert hdul[0].header == h.header
def test_open_zipped(self):
zip_file = self._make_zip_file()
with ignore_warnings():
with fits.open(zip_file) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
with fits.open(zipfile.ZipFile(zip_file)) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_open_zipped_from_handle(self):
with open(self._make_zip_file(), 'rb') as handle:
with fits.open(handle) as fits_handle:
assert fits_handle._file.compression == 'zip'
assert len(fits_handle) == 5
def test_detect_zipped(self):
"""Test detection of a zip file when the extension is not .zip."""
zf = self._make_zip_file(filename='test0.fz')
with ignore_warnings():
assert len(fits.open(zf)) == 5
def test_open_zipped_writeable(self):
"""Opening zipped files in a writeable mode should fail."""
zf = self._make_zip_file()
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
zf = zipfile.ZipFile(zf, 'a')
pytest.raises(OSError, fits.open, zf, 'update')
pytest.raises(OSError, fits.open, zf, 'append')
def test_read_open_astropy_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2774
This tests reading from a ``GzipFile`` object from Astropy's
compatibility copy of the ``gzip`` module.
"""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
@raises(OSError)
def test_open_multiple_member_zipfile(self):
"""
Opening zip files containing more than one member files should fail
as there's no obvious way to specify which file is the FITS file to
read.
"""
zfile = zipfile.ZipFile(self.temp('test0.zip'), 'w')
zfile.write(self.data('test0.fits'))
zfile.writestr('foo', 'bar')
zfile.close()
fits.open(zfile.filename)
def test_read_open_file(self):
"""Read from an existing file object."""
with open(self.data('test0.fits'), 'rb') as f:
assert len(fits.open(f)) == 5
def test_read_closed_file(self):
"""Read from an existing file object that's been closed."""
f = open(self.data('test0.fits'), 'rb')
f.close()
assert len(fits.open(f)) == 5
def test_read_open_gzip_file(self):
"""Read from an open gzip file object."""
gf = gzip.GzipFile(self._make_gzip_file())
try:
assert len(fits.open(gf)) == 5
finally:
gf.close()
def test_open_gzip_file_for_writing(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/195."""
gf = self._make_gzip_file()
with fits.open(gf, mode='update') as h:
h[0].header['EXPFLAG'] = 'ABNORMAL'
h[1].data[0, 0] = 1
with fits.open(gf) as h:
# Just to make sur ethe update worked; if updates work
# normal writes should work too...
assert h[0].header['EXPFLAG'] == 'ABNORMAL'
assert h[1].data[0, 0] == 1
def test_write_read_gzip_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2794
Ensure files written through gzip are readable.
"""
data = np.arange(100)
hdu = fits.PrimaryHDU(data=data)
hdu.writeto(self.temp('test.fits.gz'))
with open(self.temp('test.fits.gz'), 'rb') as f:
assert f.read(3) == GZIP_MAGIC
with fits.open(self.temp('test.fits.gz')) as hdul:
assert np.all(hdul[0].data == data)
def test_read_file_like_object(self):
"""Test reading a FITS file from a file-like object."""
filelike = io.BytesIO()
with open(self.data('test0.fits'), 'rb') as f:
filelike.write(f.read())
filelike.seek(0)
with ignore_warnings():
assert len(fits.open(filelike)) == 5
def test_updated_file_permissions(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/79
Tests that when a FITS file is modified in update mode, the file
permissions are preserved.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul.writeto(filename)
old_mode = os.stat(filename).st_mode
hdul = fits.open(filename, mode='update')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
hdul.close()
assert old_mode == os.stat(filename).st_mode
def test_fileobj_mode_guessing(self):
"""Tests whether a file opened without a specified io.fits mode
('readonly', etc.) is opened in a mode appropriate for the given file
object.
"""
self.copy_file('test0.fits')
# Opening in text mode should outright fail
for mode in ('r', 'w', 'a'):
with open(self.temp('test0.fits'), mode) as f:
pytest.raises(ValueError, fits.HDUList.fromfile, f)
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'readonly'
for mode in ('wb', 'ab'):
with open(self.temp('test0.fits'), mode) as f:
with fits.HDUList.fromfile(f) as h:
# Basically opening empty files for output streaming
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'wb+') as f:
with fits.HDUList.fromfile(f) as h:
# wb+ still causes an existing file to be overwritten so there
# are no HDUs
assert len(h) == 0
# Need to re-copy the file since opening it in 'w' mode blew it away
self.copy_file('test0.fits')
with open(self.temp('test0.fits'), 'rb+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'update'
with open(self.temp('test0.fits'), 'ab+') as f:
with fits.HDUList.fromfile(f) as h:
assert h.fileinfo(0)['filemode'] == 'append'
def test_mmap_unwriteable(self):
"""Regression test for https://github.com/astropy/astropy/issues/968
Temporarily patches mmap.mmap to exhibit platform-specific bad
behavior.
"""
class MockMmap(mmap.mmap):
def flush(self):
raise OSError('flush is broken on this platform')
old_mmap = mmap.mmap
mmap.mmap = MockMmap
# Force the mmap test to be rerun
_File.__dict__['_mmap_available']._cache.clear()
try:
self.copy_file('test0.fits')
with catch_warnings() as w:
with fits.open(self.temp('test0.fits'), mode='update',
memmap=True) as h:
h[1].data[0, 0] = 999
assert len(w) == 1
assert 'mmap.flush is unavailable' in str(w[0].message)
# Double check that writing without mmap still worked
with fits.open(self.temp('test0.fits')) as h:
assert h[1].data[0, 0] == 999
finally:
mmap.mmap = old_mmap
_File.__dict__['_mmap_available']._cache.clear()
def test_mmap_closing(self):
"""
Tests that the mmap reference is closed/removed when there aren't any
HDU data references left.
"""
if not _File._mmap_available:
pytest.xfail('not expected to work on platforms without mmap '
'support')
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
assert hdul._file._mmap is None
hdul[1].data
assert hdul._file._mmap is not None
del hdul[1].data
# Should be no more references to data in the file so close the
# mmap
assert hdul._file._mmap is None
hdul[1].data
hdul[2].data
del hdul[1].data
# hdul[2].data is still references so keep the mmap open
assert hdul._file._mmap is not None
del hdul[2].data
assert hdul._file._mmap is None
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
hdul[1].data
# When the only reference to the data is on the hdu object, and the
# hdulist it belongs to has been closed, the mmap should be closed as
# well
assert hdul._file._mmap is None
with fits.open(self.data('test0.fits'), memmap=True) as hdul:
data = hdul[1].data
# also make a copy
data_copy = data.copy()
# The HDUList is closed; in fact, get rid of it completely
del hdul
# The data array should still work though...
assert np.all(data == data_copy)
def test_uncloseable_file(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2356
Demonstrates that FITS files can still be read from "file-like" objects
that don't have an obvious "open" or "closed" state.
"""
class MyFileLike:
def __init__(self, foobar):
self._foobar = foobar
def read(self, n):
return self._foobar.read(n)
def seek(self, offset, whence=os.SEEK_SET):
self._foobar.seek(offset, whence)
def tell(self):
return self._foobar.tell()
with open(self.data('test0.fits'), 'rb') as f:
fileobj = MyFileLike(f)
with fits.open(fileobj) as hdul1:
with fits.open(self.data('test0.fits')) as hdul2:
assert hdul1.info(output=False) == hdul2.info(output=False)
for hdu1, hdu2 in zip(hdul1, hdul2):
assert hdu1.header == hdu2.header
if hdu1.data is not None and hdu2.data is not None:
assert np.all(hdu1.data == hdu2.data)
def test_write_bytesio_discontiguous(self):
"""
Regression test related to
https://github.com/astropy/astropy/issues/2794#issuecomment-55441539
Demonstrates that writing an HDU containing a discontiguous Numpy array
should work properly.
"""
data = np.arange(100)[::3]
hdu = fits.PrimaryHDU(data=data)
fileobj = io.BytesIO()
hdu.writeto(fileobj)
fileobj.seek(0)
with fits.open(fileobj) as h:
assert np.all(h[0].data == data)
def test_write_bytesio(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2463
Test againt `io.BytesIO`. `io.StringIO` is not supported.
"""
self._test_write_string_bytes_io(io.BytesIO())
@pytest.mark.skipif(str('sys.platform.startswith("win32")'))
def test_filename_with_colon(self):
"""
Test reading and writing a file with a colon in the filename.
Regression test for https://github.com/astropy/astropy/issues/3122
"""
# Skip on Windows since colons in filenames makes NTFS sad.
filename = 'APEXHET.2014-04-01T15:18:01.000.fits'
hdu = fits.PrimaryHDU(data=np.arange(10))
hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert np.all(hdul[0].data == hdu.data)
def test_writeto_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to write an hdulist
to a full disk.
"""
def _writeto(self, array):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
with pytest.raises(OSError) as exc:
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writeto", _writeto)
monkeypatch.setattr(data, "get_free_space_in_dir", get_free_space_in_dir)
n = np.arange(0, 1000, dtype='int64')
hdu = fits.PrimaryHDU(n)
hdulist = fits.HDUList(hdu)
filename = self.temp('test.fits')
with open(filename, mode='wb') as fileobj:
hdulist.writeto(fileobj)
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def test_flush_full_disk(self, monkeypatch):
"""
Test that it gives a readable error when trying to update an hdulist
to a full disk.
"""
filename = self.temp('test.fits')
hdul = [fits.PrimaryHDU(), fits.ImageHDU()]
hdul = fits.HDUList(hdul)
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.writeto(filename)
def _writedata(self, fileobj):
raise OSError("Fake error raised when writing file.")
def get_free_space_in_dir(path):
return 0
monkeypatch.setattr(fits.hdu.base._BaseHDU, "_writedata", _writedata)
monkeypatch.setattr(data, "get_free_space_in_dir",
get_free_space_in_dir)
with pytest.raises(OSError) as exc:
with fits.open(filename, mode='update') as hdul:
hdul[0].data = np.arange(0, 1000, dtype='int64')
hdul.insert(1, fits.ImageHDU())
hdul.flush()
assert ("Not enough space on disk: requested 8000, available 0. "
"Fake error raised when writing file.") == exc.value.args[0]
def _test_write_string_bytes_io(self, fileobj):
"""
Implemented for both test_write_stringio and test_write_bytesio.
"""
with fits.open(self.data('test0.fits')) as hdul:
hdul.writeto(fileobj)
hdul2 = fits.HDUList.fromstring(fileobj.getvalue())
assert FITSDiff(hdul, hdul2).identical
def _make_gzip_file(self, filename='test0.fits.gz'):
gzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
gz = gzip.open(gzfile, 'wb')
gz.write(f.read())
gz.close()
return gzfile
def _make_zip_file(self, mode='copyonwrite', filename='test0.fits.zip'):
zfile = zipfile.ZipFile(self.temp(filename), 'w')
zfile.write(self.data('test0.fits'))
zfile.close()
return zfile.filename
def _make_bzip2_file(self, filename='test0.fits.bz2'):
bzfile = self.temp(filename)
with open(self.data('test0.fits'), 'rb') as f:
bz = bz2.BZ2File(bzfile, 'w')
bz.write(f.read())
bz.close()
return bzfile
class TestStreamingFunctions(FitsTestCase):
"""Test functionality of the StreamingHDU class."""
def test_streaming_hdu(self):
shdu = self._make_streaming_hdu(self.temp('new.fits'))
assert isinstance(shdu.size, int)
assert shdu.size == 100
@raises(ValueError)
def test_streaming_hdu_file_wrong_mode(self):
"""
Test that streaming an HDU to a file opened in the wrong mode fails as
expected.
"""
with open(self.temp('new.fits'), 'wb') as f:
header = fits.Header()
fits.StreamingHDU(f, header)
def test_streaming_hdu_write_file(self):
"""Test streaming an HDU to an open file object."""
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
hdul = fits.open(self.temp('new.fits'))
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_write_file_like(self):
"""Test streaming an HDU to an open file-like object."""
arr = np.zeros((5, 5), dtype=np.int32)
# The file-like object underlying a StreamingHDU must be in binary mode
sf = io.BytesIO()
shdu = self._make_streaming_hdu(sf)
shdu.write(arr)
assert shdu.writecomplete
assert shdu.size == 100
sf.seek(0)
hdul = fits.open(sf)
assert len(hdul) == 1
assert (hdul[0].data == arr).all()
def test_streaming_hdu_append_extension(self):
arr = np.zeros((5, 5), dtype=np.int32)
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
# Doing this again should update the file with an extension
with open(self.temp('new.fits'), 'ab+') as f:
shdu = self._make_streaming_hdu(f)
shdu.write(arr)
def test_fix_invalid_extname(self, capsys):
phdu = fits.PrimaryHDU()
ihdu = fits.ImageHDU()
ihdu.header['EXTNAME'] = 12345678
hdul = fits.HDUList([phdu, ihdu])
pytest.raises(fits.VerifyError, hdul.writeto, self.temp('temp.fits'),
output_verify='exception')
hdul.writeto(self.temp('temp.fits'), output_verify='fix')
with fits.open(self.temp('temp.fits')):
assert hdul[1].name == '12345678'
assert hdul[1].header['EXTNAME'] == '12345678'
def _make_streaming_hdu(self, fileobj):
hd = fits.Header()
hd['SIMPLE'] = (True, 'conforms to FITS standard')
hd['BITPIX'] = (32, 'array data type')
hd['NAXIS'] = (2, 'number of array dimensions')
hd['NAXIS1'] = 5
hd['NAXIS2'] = 5
hd['EXTEND'] = True
return fits.StreamingHDU(fileobj, hd)
def test_blank_ignore(self):
with fits.open(self.data('blank.fits'), ignore_blank=True) as f:
assert f[0].data.flat[0] == 2
def test_error_if_memmap_impossible(self):
pth = self.data('blank.fits')
with pytest.raises(ValueError):
fits.open(pth, memmap=True)[0].data
# However, it should not fail if do_not_scale_image_data was used:
# See https://github.com/astropy/astropy/issues/3766
hdul = fits.open(pth, memmap=True, do_not_scale_image_data=True)
hdul[0].data # Just make sure it doesn't crash
|
332fccf230a031c9b57f544f2c9a19dae0fbcd5d6808df09784c3b5deaac8da6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
from . import FitsTestCase
from ..fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword
from ....coordinates import EarthLocation
from ....io import fits
from ....table import Table, QTable
from ....time import Time, TimeDelta
from ....time.core import BARYCENTRIC_SCALES
from ....time.formats import FITS_DEPRECATED_SCALES
from ....tests.helper import catch_warnings
class TestFitsTime(FitsTestCase):
def setup_class(self):
self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'])
self.time_3d = np.array([[[1, 2], [1, 3], [3, 4]]])
def test_is_time_column_keyword(self):
# Time column keyword without column number
assert is_time_column_keyword('TRPOS') is False
# Global time column keyword
assert is_time_column_keyword('TIMESYS') is False
# Valid time column keyword
assert is_time_column_keyword('TRPOS12') is True
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_loc(self, table_types):
"""
Test all the unusual conditions for locations of ``Time``
columns in a ``Table``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = Time(self.time, format='isot', scale='tt')
# Check that vectorized location is stored using Green Bank convention
t['a'].location = EarthLocation([1., 2.], [2., 3.], [3., 4.],
unit='Mm')
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
assert tm['b'].location == t['b'].location
# Check that multiple Time columns with different locations raise an exception
t['a'].location = EarthLocation(1, 2, 3)
t['b'].location = EarthLocation(2, 3, 4)
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Multiple Time Columns with different geocentric' in str(err.value)
# Check that Time column with no location specified will assume global location
t['b'].location = None
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Time Column "b" has no specified '
'location, but global Time Position '
'is present')
# Check that multiple Time columns with same location can be written
t['b'].location = EarthLocation(1, 2, 3)
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 0
# Check compatibility of Time Scales and Reference Positions
for scale in BARYCENTRIC_SCALES:
t.replace_column('a', getattr(t['a'], scale))
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Earth Location "TOPOCENTER" '
'for Time Column')
# Check that multidimensional vectorized location (ndim=3) is stored
# using Green Bank convention.
t = table_types()
location = EarthLocation([[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]],
[[[1., 2.], [1., 3.], [3., 4.]]], unit='Mm')
t['a'] = Time(self.time_3d, format='jd', location=location)
table, hdr = time_to_fits(t)
assert (table['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')).all()
assert (table['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')).all()
assert (table['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')).all()
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert (tm['a'].location == t['a'].location).all()
# Check that singular location with ndim>1 can be written
t['a'] = Time(self.time, location=EarthLocation([[[1.]]], [[[2.]]],
[[[3.]]], unit='Mm'))
table, hdr = time_to_fits(t)
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
assert tm['a'].location == t['a'].location
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_header(self, table_types):
"""
Test the header and metadata returned by ``time_to_fits``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(-2446354,
4237210, 4077985, unit='m'))
t['b'] = Time([1,2], format='cxcsec', scale='tt')
ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value,
'OBSGEO-Y' : t['a'].location.y.value,
'OBSGEO-Z' : t['a'].location.z.value}
table, hdr = time_to_fits(t)
# Check the global time keywords in hdr
for key, value in GLOBAL_TIME_INFO.items():
assert hdr[key] == value[0]
assert hdr.comments[key] == value[1]
hdr.remove(key)
for key, value in ideal_col_hdr.items():
assert hdr[key] == value
hdr.remove(key)
# Check the column-specific time metadata
coord_info = table.meta['__coordinate_columns__']
for colname in coord_info:
assert coord_info[colname]['coord_type'] == t[colname].scale.upper()
assert coord_info[colname]['coord_unit'] == 'd'
assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'
assert len(hdr) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_meta(self, table_types):
"""
Test that the relevant global time metadata is read into
``Table.meta`` as ``Time``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t.meta['DATE'] = '1999-01-01T00:00:00'
t.meta['MJD-OBS'] = 56670
# Test for default write behaviour (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'
assert tm.meta['DATE'].format == 'fits'
# Default time scale according to the FITS standard is UTC
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].format == 'mjd'
assert tm.meta['MJD-OBS'].scale == 'utc'
# Explicitly specified Time Scale
t.meta['TIMESYS'] = 'ET'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]
# Test for conversion of time data to its value, as defined by its format
t['a'].info.serialize_method['fits'] = 'formatted_value'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits')
# Test DATE
assert not isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'] == t.meta['DATE']
# Test MJD-xxx
assert not isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']
assert (tm['a'] == t['a'].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_loc_unit(self, table_types):
"""
Test that ``location`` specified by using any valid unit
(length/angle) in ``Time`` columns gets stored in FITS
as ITRS Cartesian coordinates (X, Y, Z), each in m.
Test that it round-trips through FITS.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(1,2,3, unit='km'))
table, hdr = time_to_fits(t)
# Check the header
assert hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
assert hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
assert hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Check the round-trip of location
assert (tm['a'].location == t['a'].location).all()
assert tm['a'].location.x.value == t['a'].location.x.to_value(unit='m')
assert tm['a'].location.y.value == t['a'].location.y.to_value(unit='m')
assert tm['a'].location.z.value == t['a'].location.z.to_value(unit='m')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits(self, table_types):
"""
Test that FITS table with time columns (standard compliant)
can be read by io.fits as a table with Time columns.
This tests the following:
1. The special-case where a column has the name 'TIME' and a
time unit
2. Time from Epoch (Reference time) is appropriately converted.
3. Coordinate columns (corresponding to coordinate keywords in the header)
other than time, that is, spatial coordinates, are not mistaken
to be time.
"""
filename = self.data('chandra_time.fits')
tm = table_types.read(filename, astropy_native=True)
# Test case 1
assert isinstance(tm['time'], Time)
assert tm['time'].scale == 'tt'
assert tm['time'].format == 'mjd'
non_native = table_types.read(filename)
# Test case 2
ref_time = Time(non_native.meta['MJDREF'], format='mjd',
scale=non_native.meta['TIMESYS'].lower())
delta_time = TimeDelta(non_native['time'])
assert (ref_time + delta_time == tm['time']).all()
# Test case 3
for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']:
assert not isinstance(tm[colname], Time)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_datetime(self, table_types):
"""
Test that ISO-8601 Datetime String Columns are read correctly.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TCG',
time_ref_pos='GEOCENTER', array=self.time)
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].scale == 'tcg'
assert tm['datetime'].format == 'fits'
assert (tm['datetime'] == self.time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location(self, table_types):
"""
Test that geocentric/geodetic observatory position is read
properly, as and when it is specified.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
# Observatory position in ITRS Cartesian coordinates (geocentric)
cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210),
('OBSGEO-Z', 4077985)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.x.value == -2446354
assert tm['datetime'].location.y.value == 4237210
assert tm['datetime'].location.z.value == 4077985
# Observatory position in geodetic coordinates
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.lon.value == 0
assert tm['datetime'].location.lat.value == 0
assert np.isclose(tm['datetime'].location.height.value, 0,
rtol=0, atol=1e-9)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_scale(self, table_types):
"""
Test handling of 'GPS' and 'LOCAL' time scales which are
recognized by the FITS standard but are not native to astropy.
"""
# GPS scale column
gps_time = np.array([630720013, 630720014])
c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS',
coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time)
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert 'FITS recognized time scale value "GPS"' in str(w[0].message)
assert isinstance(tm['gps_time'], Time)
assert tm['gps_time'].format == 'gps'
assert tm['gps_time'].scale == 'tai'
assert (tm['gps_time'].value == gps_time).all()
# LOCAL scale column
local_time = np.array([1, 2])
c = fits.Column(name='local_time', format='D', unit='d',
coord_type='LOCAL', coord_unit='d',
time_ref_pos='RELOCATABLE', array=local_time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['local_time'], Time)
assert tm['local_time'].format == 'mjd'
assert tm['local_time'].scale == 'local'
assert (tm['local_time'].value == local_time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location_warnings(self, table_types):
"""
Test warnings for time column reference position.
"""
# Time reference position "TOPOCENTER" without corresponding
# observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('observatory position is not properly specified' in
str(w[0].message))
# Default value for time reference position is "TOPOCENTER"
c = fits.Column(name='datetime', format='A29', coord_type='TT',
array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('"TRPOSn" is not specified. The default value for '
'it is "TOPOCENTER"' in str(w[0].message))
|
9376575bb37f68b55147bef2ad64a900fbfd7f7dd8168ffd9b4c553c426d7277 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import math
import os
import platform
import re
import time
import warnings
import pytest
import numpy as np
from numpy.testing import assert_equal
from ....io import fits
from ....utils.exceptions import AstropyPendingDeprecationWarning
from ....tests.helper import raises, catch_warnings, ignore_warnings
from ..hdu.compressed import SUBTRACTIVE_DITHER_1, DITHER_SEED_CHECKSUM
from .test_table import comparerecords
from . import FitsTestCase
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class TestImageFunctions(FitsTestCase):
def test_constructor_name_arg(self):
"""Like the test of the same name in test_table.py"""
hdu = fits.ImageHDU()
assert hdu.name == ''
assert 'EXTNAME' not in hdu.header
hdu.name = 'FOO'
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# Passing name to constructor
hdu = fits.ImageHDU(name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
# And overriding a header with a different extname
hdr = fits.Header()
hdr['EXTNAME'] = 'EVENTS'
hdu = fits.ImageHDU(header=hdr, name='FOO')
assert hdu.name == 'FOO'
assert hdu.header['EXTNAME'] == 'FOO'
def test_constructor_ver_arg(self):
def assert_ver_is(hdu, reference_ver):
assert hdu.ver == reference_ver
assert hdu.header['EXTVER'] == reference_ver
hdu = fits.ImageHDU()
assert hdu.ver == 1 # defaults to 1
assert 'EXTVER' not in hdu.header
hdu.ver = 1
assert_ver_is(hdu, 1)
# Passing name to constructor
hdu = fits.ImageHDU(ver=2)
assert_ver_is(hdu, 2)
# And overriding a header with a different extver
hdr = fits.Header()
hdr['EXTVER'] = 3
hdu = fits.ImageHDU(header=hdr, ver=4)
assert_ver_is(hdu, 4)
# The header card is not overridden if ver is None or not passed in
hdr = fits.Header()
hdr['EXTVER'] = 5
hdu = fits.ImageHDU(header=hdr, ver=None)
assert_ver_is(hdu, 5)
hdu = fits.ImageHDU(header=hdr)
assert_ver_is(hdu, 5)
def test_constructor_copies_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/153
Ensure that a header from one HDU is copied when used to initialize new
HDU.
"""
ifd = fits.HDUList(fits.PrimaryHDU())
phdr = ifd[0].header
phdr['FILENAME'] = 'labq01i3q_rawtag.fits'
primary_hdu = fits.PrimaryHDU(header=phdr)
ofd = fits.HDUList(primary_hdu)
ofd[0].header['FILENAME'] = 'labq01i3q_flt.fits'
# Original header should be unchanged
assert phdr['FILENAME'] == 'labq01i3q_rawtag.fits'
def test_open(self):
# The function "open" reads a FITS file into an HDUList object. There
# are three modes to open: "readonly" (the default), "append", and
# "update".
# Open a file read-only (the default mode), the content of the FITS
# file are read into memory.
r = fits.open(self.data('test0.fits')) # readonly
# data parts are latent instantiation, so if we close the HDUList
# without touching data, data can not be accessed.
r.close()
with pytest.raises(IndexError) as exc_info:
r[1].data[:2, :2]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def test_open_2(self):
r = fits.open(self.data('test0.fits'))
info = ([(0, 'PRIMARY', 1, 'PrimaryHDU', 138, (), '', '')] +
[(x, 'SCI', x, 'ImageHDU', 61, (40, 40), 'int16', '')
for x in range(1, 5)])
try:
assert r.info(output=False) == info
finally:
r.close()
def test_open_3(self):
# Test that HDUs cannot be accessed after the file was closed
r = fits.open(self.data('test0.fits'))
r.close()
with pytest.raises(IndexError) as exc_info:
r[1]
# Check that the exception message is the enhanced version, not the
# default message from list.__getitem__
assert str(exc_info.value) == ('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
# Test that HDUs can be accessed with lazy_load_hdus=False
r = fits.open(self.data('test0.fits'), lazy_load_hdus=False)
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
with pytest.raises(IndexError) as exc_info:
r[6]
assert str(exc_info.value) == 'list index out of range'
# And the same with the global config item
assert fits.conf.lazy_load_hdus # True by default
fits.conf.lazy_load_hdus = False
try:
r = fits.open(self.data('test0.fits'))
r.close()
assert isinstance(r[1], fits.ImageHDU)
assert len(r) == 5
finally:
fits.conf.lazy_load_hdus = True
def test_primary_with_extname(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/151
Tests that the EXTNAME keyword works with Primary HDUs as well, and
interacts properly with the .name attribute. For convenience
hdulist['PRIMARY'] will still refer to the first HDU even if it has an
EXTNAME not equal to 'PRIMARY'.
"""
prihdr = fits.Header([('EXTNAME', 'XPRIMARY'), ('EXTVER', 1)])
hdul = fits.HDUList([fits.PrimaryHDU(header=prihdr)])
assert 'EXTNAME' in hdul[0].header
assert hdul[0].name == 'XPRIMARY'
assert hdul[0].name == hdul[0].header['EXTNAME']
info = [(0, 'XPRIMARY', 1, 'PrimaryHDU', 5, (), '', '')]
assert hdul.info(output=False) == info
assert hdul['PRIMARY'] is hdul['XPRIMARY']
assert hdul['PRIMARY'] is hdul[('XPRIMARY', 1)]
hdul[0].name = 'XPRIMARY2'
assert hdul[0].header['EXTNAME'] == 'XPRIMARY2'
hdul.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[0].name == 'XPRIMARY2'
@pytest.mark.xfail(platform.system() == 'Windows',
reason='https://github.com/astropy/astropy/issues/5797')
def test_io_manipulation(self):
# Get a keyword value. An extension can be referred by name or by
# number. Both extension and keyword names are case insensitive.
with fits.open(self.data('test0.fits')) as r:
assert r['primary'].header['naxis'] == 0
assert r[0].header['naxis'] == 0
# If there are more than one extension with the same EXTNAME value,
# the EXTVER can be used (as the second argument) to distinguish
# the extension.
assert r['sci', 1].header['detector'] == 1
# append (using "update()") a new card
r[0].header['xxx'] = 1.234e56
assert ('\n'.join(str(x) for x in r[0].header.cards[-3:]) ==
"EXPFLAG = 'NORMAL ' / Exposure interruption indicator \n"
"FILENAME= 'vtest3.fits' / File name \n"
"XXX = 1.234E+56 ")
# rename a keyword
r[0].header.rename_keyword('filename', 'fname')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'history')
pytest.raises(ValueError, r[0].header.rename_keyword, 'fname',
'simple')
r[0].header.rename_keyword('fname', 'filename')
# get a subsection of data
assert np.array_equal(r[2].data[:3, :3],
np.array([[349, 349, 348],
[349, 349, 347],
[347, 350, 349]], dtype=np.int16))
# We can create a new FITS file by opening a new file with "append"
# mode.
with fits.open(self.temp('test_new.fits'), mode='append') as n:
# Append the primary header and the 2nd extension to the new
# file.
n.append(r[0])
n.append(r[2])
# The flush method will write the current HDUList object back
# to the newly created file on disk. The HDUList is still open
# and can be further operated.
n.flush()
assert n[1].data[1, 1] == 349
# modify a data point
n[1].data[1, 1] = 99
# When the file is closed, the most recent additions of
# extension(s) since last flush() will be appended, but any HDU
# already existed at the last flush will not be modified
del n
# If an existing file is opened with "append" mode, like the
# readonly mode, the HDU's will be read into the HDUList which can
# be modified in memory but can not be written back to the original
# file. A file opened with append mode can only add new HDU's.
os.rename(self.temp('test_new.fits'),
self.temp('test_append.fits'))
with fits.open(self.temp('test_append.fits'), mode='append') as a:
# The above change did not take effect since this was made
# after the flush().
assert a[1].data[1, 1] == 349
a.append(r[1])
del a
# When changes are made to an HDUList which was opened with
# "update" mode, they will be written back to the original file
# when a flush/close is called.
os.rename(self.temp('test_append.fits'),
self.temp('test_update.fits'))
with fits.open(self.temp('test_update.fits'), mode='update') as u:
# When the changes do not alter the size structures of the
# original (or since last flush) HDUList, the changes are
# written back "in place".
assert u[0].header['rootname'] == 'U2EQ0201T'
u[0].header['rootname'] = 'abc'
assert u[1].data[1, 1] == 349
u[1].data[1, 1] = 99
u.flush()
# If the changes affect the size structure, e.g. adding or
# deleting HDU(s), header was expanded or reduced beyond
# existing number of blocks (2880 bytes in each block), or
# change the data size, the HDUList is written to a temporary
# file, the original file is deleted, and the temporary file is
# renamed to the original file name and reopened in the update
# mode. To a user, these two kinds of updating writeback seem
# to be the same, unless the optional argument in flush or
# close is set to 1.
del u[2]
u.flush()
# the write method in HDUList class writes the current HDUList,
# with all changes made up to now, to a new file. This method
# works the same disregard the mode the HDUList was opened
# with.
u.append(r[3])
u.writeto(self.temp('test_new.fits'))
del u
# Another useful new HDUList method is readall. It will "touch" the
# data parts in all HDUs, so even if the HDUList is closed, we can
# still operate on the data.
with fits.open(self.data('test0.fits')) as r:
r.readall()
assert r[1].data[1, 1] == 315
# create an HDU with data only
data = np.ones((3, 5), dtype=np.float32)
hdu = fits.ImageHDU(data=data, name='SCI')
assert np.array_equal(hdu.data,
np.array([[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]],
dtype=np.float32))
# create an HDU with header and data
# notice that the header has the right NAXIS's since it is constructed
# with ImageHDU
hdu2 = fits.ImageHDU(header=r[1].header, data=np.array([1, 2],
dtype='int32'))
assert ('\n'.join(str(x) for x in hdu2.header.cards[1:5]) ==
"BITPIX = 32 / array data type \n"
"NAXIS = 1 / number of array dimensions \n"
"NAXIS1 = 2 \n"
"PCOUNT = 0 / number of parameters ")
def test_memory_mapping(self):
# memory mapping
f1 = fits.open(self.data('test0.fits'), memmap=1)
f1.close()
def test_verification_on_output(self):
# verification on output
# make a defect HDUList first
x = fits.ImageHDU()
hdu = fits.HDUList(x) # HDUList can take a list or one single HDU
with catch_warnings() as w:
hdu.verify()
text = "HDUList's 0th element is not a primary HDU."
assert len(w) == 3
assert text in str(w[1].message)
with catch_warnings() as w:
hdu.writeto(self.temp('test_new2.fits'), 'fix')
text = ("HDUList's 0th element is not a primary HDU. "
"Fixed by inserting one as 0th HDU.")
assert len(w) == 3
assert text in str(w[1].message)
def test_section(self):
# section testing
fs = fits.open(self.data('arange.fits'))
assert np.array_equal(fs[0].section[3, 2, 5], 357)
assert np.array_equal(
fs[0].section[3, 2, :],
np.array([352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, 4:],
np.array([356, 357, 358, 359, 360, 361, 362]))
assert np.array_equal(fs[0].section[3, 2, :8],
np.array([352, 353, 354, 355, 356, 357, 358, 359]))
assert np.array_equal(fs[0].section[3, 2, -8:8],
np.array([355, 356, 357, 358, 359]))
assert np.array_equal(
fs[0].section[3, 2:5, :],
np.array([[352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362],
[363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373],
[374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384]]))
assert np.array_equal(fs[0].section[3, :, :][:3, :3],
np.array([[330, 331, 332],
[341, 342, 343],
[352, 353, 354]]))
dat = fs[0].data
assert np.array_equal(fs[0].section[3, 2:5, :8], dat[3, 2:5, :8])
assert np.array_equal(fs[0].section[3, 2:5, 3], dat[3, 2:5, 3])
assert np.array_equal(fs[0].section[3:6, :, :][:3, :3, :3],
np.array([[[330, 331, 332],
[341, 342, 343],
[352, 353, 354]],
[[440, 441, 442],
[451, 452, 453],
[462, 463, 464]],
[[550, 551, 552],
[561, 562, 563],
[572, 573, 574]]]))
assert np.array_equal(fs[0].section[:, :, :][:3, :2, :2],
np.array([[[0, 1],
[11, 12]],
[[110, 111],
[121, 122]],
[[220, 221],
[231, 232]]]))
assert np.array_equal(fs[0].section[:, 2, :], dat[:, 2, :])
assert np.array_equal(fs[0].section[:, 2:5, :], dat[:, 2:5, :])
assert np.array_equal(fs[0].section[3:6, 3, :], dat[3:6, 3, :])
assert np.array_equal(fs[0].section[3:6, 3:7, :], dat[3:6, 3:7, :])
assert np.array_equal(fs[0].section[:, ::2], dat[:, ::2])
assert np.array_equal(fs[0].section[:, [1, 2, 4], 3],
dat[:, [1, 2, 4], 3])
bool_index = np.array([True, False, True, True, False,
False, True, True, False, True])
assert np.array_equal(fs[0].section[:, bool_index, :],
dat[:, bool_index, :])
assert np.array_equal(
fs[0].section[3:6, 3, :, ...], dat[3:6, 3, :, ...])
assert np.array_equal(fs[0].section[..., ::2], dat[..., ::2])
assert np.array_equal(fs[0].section[..., [1, 2, 4], 3],
dat[..., [1, 2, 4], 3])
def test_section_data_single(self):
a = np.array([1])
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
sec = hdul[0].section
dat = hdul[0].data
assert np.array_equal(sec[0], dat[0])
assert np.array_equal(sec[...], dat[...])
assert np.array_equal(sec[..., 0], dat[..., 0])
assert np.array_equal(sec[0, ...], dat[0, ...])
def test_section_data_square(self):
a = np.arange(4).reshape(2, 2)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
def test_section_data_cube(self):
a = np.arange(18).reshape(2, 3, 3)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
# TODO: Generate these perumtions instead of having them all written
# out, yeesh!
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :] == dat[0, :, :]).all()
assert (d.section[1, :, :] == dat[1, :, :]).all()
assert (d.section[0, 0, :] == dat[0, 0, :]).all()
assert (d.section[0, 1, :] == dat[0, 1, :]).all()
assert (d.section[0, 2, :] == dat[0, 2, :]).all()
assert (d.section[1, 0, :] == dat[1, 0, :]).all()
assert (d.section[1, 1, :] == dat[1, 1, :]).all()
assert (d.section[1, 2, :] == dat[1, 2, :]).all()
assert (d.section[0, 0, 0] == dat[0, 0, 0]).all()
assert (d.section[0, 0, 1] == dat[0, 0, 1]).all()
assert (d.section[0, 0, 2] == dat[0, 0, 2]).all()
assert (d.section[0, 1, 0] == dat[0, 1, 0]).all()
assert (d.section[0, 1, 1] == dat[0, 1, 1]).all()
assert (d.section[0, 1, 2] == dat[0, 1, 2]).all()
assert (d.section[0, 2, 0] == dat[0, 2, 0]).all()
assert (d.section[0, 2, 1] == dat[0, 2, 1]).all()
assert (d.section[0, 2, 2] == dat[0, 2, 2]).all()
assert (d.section[1, 0, 0] == dat[1, 0, 0]).all()
assert (d.section[1, 0, 1] == dat[1, 0, 1]).all()
assert (d.section[1, 0, 2] == dat[1, 0, 2]).all()
assert (d.section[1, 1, 0] == dat[1, 1, 0]).all()
assert (d.section[1, 1, 1] == dat[1, 1, 1]).all()
assert (d.section[1, 1, 2] == dat[1, 1, 2]).all()
assert (d.section[1, 2, 0] == dat[1, 2, 0]).all()
assert (d.section[1, 2, 1] == dat[1, 2, 1]).all()
assert (d.section[1, 2, 2] == dat[1, 2, 2]).all()
assert (d.section[:, 0, 0] == dat[:, 0, 0]).all()
assert (d.section[:, 0, 1] == dat[:, 0, 1]).all()
assert (d.section[:, 0, 2] == dat[:, 0, 2]).all()
assert (d.section[:, 1, 0] == dat[:, 1, 0]).all()
assert (d.section[:, 1, 1] == dat[:, 1, 1]).all()
assert (d.section[:, 1, 2] == dat[:, 1, 2]).all()
assert (d.section[:, 2, 0] == dat[:, 2, 0]).all()
assert (d.section[:, 2, 1] == dat[:, 2, 1]).all()
assert (d.section[:, 2, 2] == dat[:, 2, 2]).all()
assert (d.section[0, :, 0] == dat[0, :, 0]).all()
assert (d.section[0, :, 1] == dat[0, :, 1]).all()
assert (d.section[0, :, 2] == dat[0, :, 2]).all()
assert (d.section[1, :, 0] == dat[1, :, 0]).all()
assert (d.section[1, :, 1] == dat[1, :, 1]).all()
assert (d.section[1, :, 2] == dat[1, :, 2]).all()
assert (d.section[:, :, 0] == dat[:, :, 0]).all()
assert (d.section[:, :, 1] == dat[:, :, 1]).all()
assert (d.section[:, :, 2] == dat[:, :, 2]).all()
assert (d.section[:, 0, :] == dat[:, 0, :]).all()
assert (d.section[:, 1, :] == dat[:, 1, :]).all()
assert (d.section[:, 2, :] == dat[:, 2, :]).all()
assert (d.section[:, :, 0:1] == dat[:, :, 0:1]).all()
assert (d.section[:, :, 0:2] == dat[:, :, 0:2]).all()
assert (d.section[:, :, 0:3] == dat[:, :, 0:3]).all()
assert (d.section[:, :, 1:2] == dat[:, :, 1:2]).all()
assert (d.section[:, :, 1:3] == dat[:, :, 1:3]).all()
assert (d.section[:, :, 2:3] == dat[:, :, 2:3]).all()
assert (d.section[0:1, 0:1, 0:1] == dat[0:1, 0:1, 0:1]).all()
assert (d.section[0:1, 0:1, 0:2] == dat[0:1, 0:1, 0:2]).all()
assert (d.section[0:1, 0:1, 0:3] == dat[0:1, 0:1, 0:3]).all()
assert (d.section[0:1, 0:1, 1:2] == dat[0:1, 0:1, 1:2]).all()
assert (d.section[0:1, 0:1, 1:3] == dat[0:1, 0:1, 1:3]).all()
assert (d.section[0:1, 0:1, 2:3] == dat[0:1, 0:1, 2:3]).all()
assert (d.section[0:1, 0:2, 0:1] == dat[0:1, 0:2, 0:1]).all()
assert (d.section[0:1, 0:2, 0:2] == dat[0:1, 0:2, 0:2]).all()
assert (d.section[0:1, 0:2, 0:3] == dat[0:1, 0:2, 0:3]).all()
assert (d.section[0:1, 0:2, 1:2] == dat[0:1, 0:2, 1:2]).all()
assert (d.section[0:1, 0:2, 1:3] == dat[0:1, 0:2, 1:3]).all()
assert (d.section[0:1, 0:2, 2:3] == dat[0:1, 0:2, 2:3]).all()
assert (d.section[0:1, 0:3, 0:1] == dat[0:1, 0:3, 0:1]).all()
assert (d.section[0:1, 0:3, 0:2] == dat[0:1, 0:3, 0:2]).all()
assert (d.section[0:1, 0:3, 0:3] == dat[0:1, 0:3, 0:3]).all()
assert (d.section[0:1, 0:3, 1:2] == dat[0:1, 0:3, 1:2]).all()
assert (d.section[0:1, 0:3, 1:3] == dat[0:1, 0:3, 1:3]).all()
assert (d.section[0:1, 0:3, 2:3] == dat[0:1, 0:3, 2:3]).all()
assert (d.section[0:1, 1:2, 0:1] == dat[0:1, 1:2, 0:1]).all()
assert (d.section[0:1, 1:2, 0:2] == dat[0:1, 1:2, 0:2]).all()
assert (d.section[0:1, 1:2, 0:3] == dat[0:1, 1:2, 0:3]).all()
assert (d.section[0:1, 1:2, 1:2] == dat[0:1, 1:2, 1:2]).all()
assert (d.section[0:1, 1:2, 1:3] == dat[0:1, 1:2, 1:3]).all()
assert (d.section[0:1, 1:2, 2:3] == dat[0:1, 1:2, 2:3]).all()
assert (d.section[0:1, 1:3, 0:1] == dat[0:1, 1:3, 0:1]).all()
assert (d.section[0:1, 1:3, 0:2] == dat[0:1, 1:3, 0:2]).all()
assert (d.section[0:1, 1:3, 0:3] == dat[0:1, 1:3, 0:3]).all()
assert (d.section[0:1, 1:3, 1:2] == dat[0:1, 1:3, 1:2]).all()
assert (d.section[0:1, 1:3, 1:3] == dat[0:1, 1:3, 1:3]).all()
assert (d.section[0:1, 1:3, 2:3] == dat[0:1, 1:3, 2:3]).all()
assert (d.section[1:2, 0:1, 0:1] == dat[1:2, 0:1, 0:1]).all()
assert (d.section[1:2, 0:1, 0:2] == dat[1:2, 0:1, 0:2]).all()
assert (d.section[1:2, 0:1, 0:3] == dat[1:2, 0:1, 0:3]).all()
assert (d.section[1:2, 0:1, 1:2] == dat[1:2, 0:1, 1:2]).all()
assert (d.section[1:2, 0:1, 1:3] == dat[1:2, 0:1, 1:3]).all()
assert (d.section[1:2, 0:1, 2:3] == dat[1:2, 0:1, 2:3]).all()
assert (d.section[1:2, 0:2, 0:1] == dat[1:2, 0:2, 0:1]).all()
assert (d.section[1:2, 0:2, 0:2] == dat[1:2, 0:2, 0:2]).all()
assert (d.section[1:2, 0:2, 0:3] == dat[1:2, 0:2, 0:3]).all()
assert (d.section[1:2, 0:2, 1:2] == dat[1:2, 0:2, 1:2]).all()
assert (d.section[1:2, 0:2, 1:3] == dat[1:2, 0:2, 1:3]).all()
assert (d.section[1:2, 0:2, 2:3] == dat[1:2, 0:2, 2:3]).all()
assert (d.section[1:2, 0:3, 0:1] == dat[1:2, 0:3, 0:1]).all()
assert (d.section[1:2, 0:3, 0:2] == dat[1:2, 0:3, 0:2]).all()
assert (d.section[1:2, 0:3, 0:3] == dat[1:2, 0:3, 0:3]).all()
assert (d.section[1:2, 0:3, 1:2] == dat[1:2, 0:3, 1:2]).all()
assert (d.section[1:2, 0:3, 1:3] == dat[1:2, 0:3, 1:3]).all()
assert (d.section[1:2, 0:3, 2:3] == dat[1:2, 0:3, 2:3]).all()
assert (d.section[1:2, 1:2, 0:1] == dat[1:2, 1:2, 0:1]).all()
assert (d.section[1:2, 1:2, 0:2] == dat[1:2, 1:2, 0:2]).all()
assert (d.section[1:2, 1:2, 0:3] == dat[1:2, 1:2, 0:3]).all()
assert (d.section[1:2, 1:2, 1:2] == dat[1:2, 1:2, 1:2]).all()
assert (d.section[1:2, 1:2, 1:3] == dat[1:2, 1:2, 1:3]).all()
assert (d.section[1:2, 1:2, 2:3] == dat[1:2, 1:2, 2:3]).all()
assert (d.section[1:2, 1:3, 0:1] == dat[1:2, 1:3, 0:1]).all()
assert (d.section[1:2, 1:3, 0:2] == dat[1:2, 1:3, 0:2]).all()
assert (d.section[1:2, 1:3, 0:3] == dat[1:2, 1:3, 0:3]).all()
assert (d.section[1:2, 1:3, 1:2] == dat[1:2, 1:3, 1:2]).all()
assert (d.section[1:2, 1:3, 1:3] == dat[1:2, 1:3, 1:3]).all()
assert (d.section[1:2, 1:3, 2:3] == dat[1:2, 1:3, 2:3]).all()
def test_section_data_four(self):
a = np.arange(256).reshape(4, 4, 4, 4)
hdu = fits.PrimaryHDU(a)
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :, :, :] == dat[:, :, :, :]).all()
assert (d.section[:, :, :] == dat[:, :, :]).all()
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[:] == dat[:]).all()
assert (d.section[0, :, :, :] == dat[0, :, :, :]).all()
assert (d.section[0, :, 0, :] == dat[0, :, 0, :]).all()
assert (d.section[:, :, 0, :] == dat[:, :, 0, :]).all()
assert (d.section[:, 1, 0, :] == dat[:, 1, 0, :]).all()
assert (d.section[:, :, :, 1] == dat[:, :, :, 1]).all()
def test_section_data_scaled(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/143
This is like test_section_data_square but uses a file containing scaled
image data, to test that sections can work correctly with scaled data.
"""
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
dat = hdul[0].data
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
# Test without having accessed the full data first
hdul = fits.open(self.data('scale.fits'))
d = hdul[0]
assert (d.section[:, :] == dat[:, :]).all()
assert (d.section[0, :] == dat[0, :]).all()
assert (d.section[1, :] == dat[1, :]).all()
assert (d.section[:, 0] == dat[:, 0]).all()
assert (d.section[:, 1] == dat[:, 1]).all()
assert (d.section[0, 0] == dat[0, 0]).all()
assert (d.section[0, 1] == dat[0, 1]).all()
assert (d.section[1, 0] == dat[1, 0]).all()
assert (d.section[1, 1] == dat[1, 1]).all()
assert (d.section[0:1, 0:1] == dat[0:1, 0:1]).all()
assert (d.section[0:2, 0:1] == dat[0:2, 0:1]).all()
assert (d.section[0:1, 0:2] == dat[0:1, 0:2]).all()
assert (d.section[0:2, 0:2] == dat[0:2, 0:2]).all()
assert not d._data_loaded
def test_do_not_scale_image_data(self):
hdul = fits.open(self.data('scale.fits'), do_not_scale_image_data=True)
assert hdul[0].data.dtype == np.dtype('>i2')
hdul = fits.open(self.data('scale.fits'))
assert hdul[0].data.dtype == np.dtype('float32')
def test_append_uint_data(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/56
(BZERO and BSCALE added in the wrong location when appending scaled
data)
"""
fits.writeto(self.temp('test_new.fits'), data=np.array([],
dtype='uint8'))
d = np.zeros([100, 100]).astype('uint16')
fits.append(self.temp('test_new.fits'), data=d)
f = fits.open(self.temp('test_new.fits'), uint=True)
assert f[1].data.dtype == 'uint16'
def test_scale_with_explicit_bzero_bscale(self):
"""
Regression test for https://github.com/astropy/astropy/issues/6399
"""
hdu1 = fits.PrimaryHDU()
hdu2 = fits.ImageHDU(np.random.rand(100,100))
# The line below raised an exception in astropy 2.0, so if it does not
# raise an error here, that is progress.
hdu2.scale(type='uint8', bscale=1, bzero=0)
def test_uint_header_consistency(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2305
This ensures that an HDU containing unsigned integer data always has
the apppriate BZERO value in its header.
"""
for int_size in (16, 32, 64):
# Just make an array of some unsigned ints that wouldn't fit in a
# signed int array of the same bit width
max_uint = (2 ** int_size) - 1
if int_size == 64:
max_uint = np.uint64(int_size)
dtype = 'uint{}'.format(int_size)
arr = np.empty(100, dtype=dtype)
arr.fill(max_uint)
arr -= np.arange(100, dtype=dtype)
uint_hdu = fits.PrimaryHDU(data=arr)
assert np.all(uint_hdu.data == arr)
assert uint_hdu.data.dtype.name == 'uint{}'.format(int_size)
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
filename = 'uint{}.fits'.format(int_size)
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename), uint=True) as hdul:
new_uint_hdu = hdul[0]
assert np.all(new_uint_hdu.data == arr)
assert new_uint_hdu.data.dtype.name == 'uint{}'.format(int_size)
assert 'BZERO' in new_uint_hdu.header
assert new_uint_hdu.header['BZERO'] == (2 ** (int_size - 1))
@pytest.mark.parametrize(('from_file'), (False, True))
@pytest.mark.parametrize(('do_not_scale'), (False,))
def test_uint_header_keywords_removed_after_bitpix_change(self,
from_file,
do_not_scale):
"""
Regression test for https://github.com/astropy/astropy/issues/4974
BZERO/BSCALE should be removed if data is converted to a floating
point type.
Currently excluding the case where do_not_scale_image_data=True
because it is not clear what the expectation should be.
"""
arr = np.zeros(100, dtype='uint16')
if from_file:
# To generate the proper input file we always want to scale the
# data before writing it...otherwise when we open it will be
# regular (signed) int data.
tmp_uint = fits.PrimaryHDU(arr)
filename = 'unsigned_int.fits'
tmp_uint.writeto(self.temp(filename))
with fits.open(self.temp(filename),
do_not_scale_image_data=do_not_scale) as f:
uint_hdu = f[0]
# Force a read before we close.
_ = uint_hdu.data
else:
uint_hdu = fits.PrimaryHDU(arr,
do_not_scale_image_data=do_not_scale)
# Make sure appropriate keywords are in the header. See
# https://github.com/astropy/astropy/pull/3916#issuecomment-122414532
# for discussion.
assert 'BSCALE' in uint_hdu.header
assert 'BZERO' in uint_hdu.header
assert uint_hdu.header['BSCALE'] == 1
assert uint_hdu.header['BZERO'] == 32768
# Convert data to floating point...
uint_hdu.data = uint_hdu.data * 1.0
# ...bitpix should be negative.
assert uint_hdu.header['BITPIX'] < 0
# BSCALE and BZERO should NOT be in header any more.
assert 'BSCALE' not in uint_hdu.header
assert 'BZERO' not in uint_hdu.header
# This is the main test...the data values should round trip
# as zero.
filename = 'test_uint_to_float.fits'
uint_hdu.writeto(self.temp(filename))
with fits.open(self.temp(filename)) as hdul:
assert (hdul[0].data == 0).all()
def test_blanks(self):
"""Test image data with blank spots in it (which should show up as
NaNs in the data array.
"""
arr = np.zeros((10, 10), dtype=np.int32)
# One row will be blanks
arr[1] = 999
hdu = fits.ImageHDU(data=arr)
hdu.header['BLANK'] = 999
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
assert np.isnan(hdul[1].data[1]).all()
def test_invalid_blanks(self):
"""
Test that invalid use of the BLANK keyword leads to an appropriate
warning, and that the BLANK keyword is ignored when returning the
HDU data.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
arr = np.arange(5, dtype=np.float64)
hdu = fits.PrimaryHDU(data=arr)
hdu.header['BLANK'] = 2
with catch_warnings() as w:
hdu.writeto(self.temp('test_new.fits'))
# Allow the HDU to be written, but there should be a warning
# when writing a header with BLANK when then data is not
# int
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
# Should also get a warning when opening the file, and the BLANK
# value should not be applied
with catch_warnings() as w:
with fits.open(self.temp('test_new.fits')) as h:
assert len(w) == 1
assert "Invalid 'BLANK' keyword in header" in str(w[0].message)
assert np.all(arr == h[0].data)
def test_scale_back_with_blanks(self):
"""
Test that when auto-rescaling integer data with "blank" values (where
the blanks are replaced by NaN in the float data), that the "BLANK"
keyword is removed from the header.
Further, test that when using the ``scale_back=True`` option the blank
values are restored properly.
Regression test for https://github.com/astropy/astropy/issues/3865
"""
# Make the sample file
arr = np.arange(5, dtype=np.int32)
hdu = fits.PrimaryHDU(data=arr)
hdu.scale('int16', bscale=1.23)
# Creating data that uses BLANK is currently kludgy--a separate issue
# TODO: Rewrite this test when scaling with blank support is better
# supported
# Let's just add a value to the data that should be converted to NaN
# when it is read back in:
hdu.data[0] = 9999
hdu.header['BLANK'] = 9999
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
data = hdul[0].data
assert np.isnan(data[0])
hdul.writeto(self.temp('test2.fits'))
# Now reopen the newly written file. It should not have a 'BLANK'
# keyword
with catch_warnings() as w:
with fits.open(self.temp('test2.fits')) as hdul2:
assert len(w) == 0
assert 'BLANK' not in hdul2[0].header
data = hdul2[0].data
assert np.isnan(data[0])
# Finally, test that scale_back keeps the BLANKs correctly
with fits.open(self.temp('test.fits'), scale_back=True,
mode='update') as hdul3:
data = hdul3[0].data
assert np.isnan(data[0])
with fits.open(self.temp('test.fits'),
do_not_scale_image_data=True) as hdul4:
assert hdul4[0].header['BLANK'] == 9999
assert hdul4[0].header['BSCALE'] == 1.23
assert hdul4[0].data[0] == 9999
def test_bzero_with_floats(self):
"""Test use of the BZERO keyword in an image HDU containing float
data.
"""
arr = np.zeros((10, 10)) - 1
hdu = fits.ImageHDU(data=arr)
hdu.header['BZERO'] = 1.0
hdu.writeto(self.temp('test_new.fits'))
hdul = fits.open(self.temp('test_new.fits'))
arr += 1
assert (hdul[1].data == arr).all()
def test_rewriting_large_scaled_image(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84 and
https://aeon.stsci.edu/ssb/trac/pyfits/ticket/101
"""
hdul = fits.open(self.data('fixed-1890.fits'))
orig_data = hdul[0].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.data('fixed-1890.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[0].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[0].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_image_update_header(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/105
Replacing the original header to an image HDU and saving should update
the NAXISn keywords appropriately and save the image data correctly.
"""
# Copy the original file before saving to it
self.copy_file('test0.fits')
with fits.open(self.temp('test0.fits'), mode='update') as hdul:
orig_data = hdul[1].data.copy()
hdr_copy = hdul[1].header.copy()
del hdr_copy['NAXIS*']
hdul[1].header = hdr_copy
with fits.open(self.temp('test0.fits')) as hdul:
assert (orig_data == hdul[1].data).all()
def test_open_scaled_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/119
(Don't update scaled image data if the data is not read)
This ensures that merely opening and closing a file containing scaled
image data does not cause any change to the data (or the header).
Changes should only occur if the data is accessed.
"""
# Copy the original file before making any possible changes to it
self.copy_file('scale.fits')
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
orig_data = hdul[0].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
assert (orig_data == hdul[0].data).all()
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[0].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[0].shape == (42, 10)
assert hdul[0].data.dtype == np.dtype('>f4')
assert hdul[0].header['BITPIX'] == -32
assert 'BZERO' not in hdul[0].header
assert 'BSCALE' not in hdul[0].header
def test_scale_back(self):
"""A simple test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/120
The scale_back feature for image HDUs.
"""
self.copy_file('scale.fits')
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[0].header['BITPIX']
orig_bzero = hdul[0].header['BZERO']
orig_bscale = hdul[0].header['BSCALE']
orig_data = hdul[0].data.copy()
hdul[0].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[0].header['BITPIX'] == orig_bitpix
assert hdul[0].header['BZERO'] == orig_bzero
assert hdul[0].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[0].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[0].data[1:] == orig_data[1:]).all()
def test_image_none(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/27
"""
with fits.open(self.data('test0.fits')) as h:
h[1].data
h[1].data = None
h[1].writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert h[1].data is None
assert h[1].header['NAXIS'] == 0
assert 'NAXIS1' not in h[1].header
assert 'NAXIS2' not in h[1].header
def test_invalid_blank(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2711
If the BLANK keyword contains an invalid value it should be ignored for
any calculations (though a warning should be issued).
"""
data = np.arange(100, dtype=np.float64)
hdu = fits.PrimaryHDU(data)
hdu.header['BLANK'] = 'nan'
hdu.writeto(self.temp('test.fits'))
with catch_warnings() as w:
with fits.open(self.temp('test.fits')) as hdul:
assert np.all(hdul[0].data == data)
assert len(w) == 2
msg = "Invalid value for 'BLANK' keyword in header"
assert msg in str(w[0].message)
msg = "Invalid 'BLANK' keyword"
assert msg in str(w[1].message)
def test_scaled_image_fromfile(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2710
"""
# Make some sample data
a = np.arange(100, dtype=np.float32)
hdu = fits.PrimaryHDU(data=a.copy())
hdu.scale(bscale=1.1)
hdu.writeto(self.temp('test.fits'))
with open(self.temp('test.fits'), 'rb') as f:
file_data = f.read()
hdul = fits.HDUList.fromstring(file_data)
assert np.allclose(hdul[0].data, a)
def test_set_data(self):
"""
Test data assignment - issue #5087
"""
im = fits.ImageHDU()
ar = np.arange(12)
im.data = ar
def test_scale_bzero_with_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.PrimaryHDU(data=a.copy())
hdu2 = fits.PrimaryHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.PrimaryHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as (hdu,):
hdu.data[:] = 0
assert np.allclose(hdu.data, 0)
class TestCompressedImage(FitsTestCase):
def test_empty(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2595
"""
hdu = fits.CompImageHDU()
assert hdu.data is None
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode='update') as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert hdul[1].data is None
# Now test replacing the empty data with an array and see what
# happens
hdul[1].data = np.arange(100, dtype=np.int32)
with fits.open(self.temp('test.fits')) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
assert np.all(hdul[1].data == np.arange(100, dtype=np.int32))
@pytest.mark.parametrize(
('data', 'compression_type', 'quantize_level'),
[(np.zeros((2, 10, 10), dtype=np.float32), 'RICE_1', 16),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_1', -0.01),
(np.zeros((2, 10, 10), dtype=np.float32), 'GZIP_2', -0.01),
(np.zeros((100, 100)) + 1, 'HCOMPRESS_1', 16),
(np.zeros((10, 10)), 'PLIO_1', 16)])
@pytest.mark.parametrize('byte_order', ['<', '>'])
def test_comp_image(self, data, compression_type, quantize_level,
byte_order):
data = data.newbyteorder(byte_order)
primary_hdu = fits.PrimaryHDU()
ofd = fits.HDUList(primary_hdu)
chdu = fits.CompImageHDU(data, name='SCI',
compression_type=compression_type,
quantize_level=quantize_level)
ofd.append(chdu)
ofd.writeto(self.temp('test_new.fits'), overwrite=True)
ofd.close()
with fits.open(self.temp('test_new.fits')) as fd:
assert (fd[1].data == data).all()
assert fd[1].header['NAXIS'] == chdu.header['NAXIS']
assert fd[1].header['NAXIS1'] == chdu.header['NAXIS1']
assert fd[1].header['NAXIS2'] == chdu.header['NAXIS2']
assert fd[1].header['BITPIX'] == chdu.header['BITPIX']
@pytest.mark.skipif('not HAS_SCIPY')
def test_comp_image_quantize_level(self):
"""
Regression test for https://github.com/astropy/astropy/issues/5969
Test that quantize_level is used.
"""
import scipy.misc
np.random.seed(42)
data = scipy.misc.ascent() + np.random.randn(512, 512)*10
fits.ImageHDU(data).writeto(self.temp('im1.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-1, dither_seed=5)\
.writeto(self.temp('im2.fits'))
fits.CompImageHDU(data, compression_type='RICE_1', quantize_method=1,
quantize_level=-100, dither_seed=5)\
.writeto(self.temp('im3.fits'))
im1 = fits.getdata(self.temp('im1.fits'))
im2 = fits.getdata(self.temp('im2.fits'))
im3 = fits.getdata(self.temp('im3.fits'))
assert not np.array_equal(im2, im3)
assert np.isclose(np.min(im1 - im2), -0.5, atol=1e-3)
assert np.isclose(np.max(im1 - im2), 0.5, atol=1e-3)
assert np.isclose(np.min(im1 - im3), -50, atol=1e-1)
assert np.isclose(np.max(im1 - im3), 50, atol=1e-1)
def test_comp_image_hcompression_1_invalid_data(self):
"""
Tests compression with the HCOMPRESS_1 algorithm with data that is
not 2D and has a non-2D tile size.
"""
pytest.raises(ValueError, fits.CompImageHDU,
np.zeros((2, 10, 10), dtype=np.float32), name='SCI',
compression_type='HCOMPRESS_1', quantize_level=16,
tile_size=[2, 10, 10])
def test_comp_image_hcompress_image_stack(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/171
Tests that data containing more than two dimensions can be
compressed with HCOMPRESS_1 so long as the user-supplied tile size can
be flattened to two dimensions.
"""
cube = np.arange(300, dtype=np.float32).reshape(3, 10, 10)
hdu = fits.CompImageHDU(data=cube, name='SCI',
compression_type='HCOMPRESS_1',
quantize_level=16, tile_size=[5, 5, 1])
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
# HCOMPRESSed images are allowed to deviate from the original by
# about 1/quantize_level of the RMS in each tile.
assert np.abs(hdul['SCI'].data - cube).max() < 1./15.
def test_subtractive_dither_seed(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/32
Ensure that when floating point data is compressed with the
SUBTRACTIVE_DITHER_1 quantization method that the correct ZDITHER0 seed
is added to the header, and that the data can be correctly
decompressed.
"""
array = np.arange(100.0).reshape(10, 10)
csum = (array[0].view('uint8').sum() % 10000) + 1
hdu = fits.CompImageHDU(data=array,
quantize_method=SUBTRACTIVE_DITHER_1,
dither_seed=DITHER_SEED_CHECKSUM)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
assert 'ZQUANTIZ' in hdul[1]._header
assert hdul[1]._header['ZQUANTIZ'] == 'SUBTRACTIVE_DITHER_1'
assert 'ZDITHER0' in hdul[1]._header
assert hdul[1]._header['ZDITHER0'] == csum
assert np.all(hdul[1].data == array)
def test_disable_image_compression(self):
with catch_warnings():
# No warnings should be displayed in this case
warnings.simplefilter('error')
with fits.open(self.data('comp.fits'),
disable_image_compression=True) as hdul:
# The compressed image HDU should show up as a BinTableHDU, but
# *not* a CompImageHDU
assert isinstance(hdul[1], fits.BinTableHDU)
assert not isinstance(hdul[1], fits.CompImageHDU)
with fits.open(self.data('comp.fits')) as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
def test_open_comp_image_in_update_mode(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/167
Similar to test_open_scaled_in_update_mode(), but specifically for
compressed images.
"""
# Copy the original file before making any possible changes to it
self.copy_file('comp.fits')
mtime = os.stat(self.temp('comp.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('comp.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('comp.fits')).st_mtime
def test_open_scaled_in_update_mode_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 2
Identical to test_open_scaled_in_update_mode() but with a compressed
version of the scaled image.
"""
# Copy+compress the original file before making any possible changes to
# it
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
mtime = os.stat(self.temp('scale.fits')).st_mtime
time.sleep(1)
fits.open(self.temp('scale.fits'), mode='update').close()
# Ensure that no changes were made to the file merely by immediately
# opening and closing it.
assert mtime == os.stat(self.temp('scale.fits')).st_mtime
# Insert a slight delay to ensure the mtime does change when the file
# is changed
time.sleep(1)
hdul = fits.open(self.temp('scale.fits'), 'update')
hdul[1].data
hdul.close()
# Now the file should be updated with the rescaled data
assert mtime != os.stat(self.temp('scale.fits')).st_mtime
hdul = fits.open(self.temp('scale.fits'), mode='update')
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
# Try reshaping the data, then closing and reopening the file; let's
# see if all the changes are preseved properly
hdul[1].data.shape = (42, 10)
hdul.close()
hdul = fits.open(self.temp('scale.fits'))
assert hdul[1].shape == (42, 10)
assert hdul[1].data.dtype == np.dtype('float32')
assert hdul[1].header['BITPIX'] == -32
assert 'BZERO' not in hdul[1].header
assert 'BSCALE' not in hdul[1].header
def test_write_comp_hdu_direct_from_existing(self):
with fits.open(self.data('comp.fits')) as hdul:
hdul[1].writeto(self.temp('test.fits'))
with fits.open(self.data('comp.fits')) as hdul1:
with fits.open(self.temp('test.fits')) as hdul2:
assert np.all(hdul1[1].data == hdul2[1].data)
assert comparerecords(hdul1[1].compressed_data,
hdul2[1].compressed_data)
def test_rewriting_large_scaled_image_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 1
Identical to test_rewriting_large_scaled_image() but with a compressed
image.
"""
with fits.open(self.data('fixed-1890.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('fixed-1890-z.fits'))
hdul = fits.open(self.temp('fixed-1890-z.fits'))
orig_data = hdul[1].data
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Just as before, but this time don't touch hdul[0].data before writing
# back out--this is the case that failed in
# https://aeon.stsci.edu/ssb/trac/pyfits/ticket/84
hdul = fits.open(self.temp('fixed-1890-z.fits'))
with ignore_warnings():
hdul.writeto(self.temp('test_new.fits'), overwrite=True)
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul.close()
# Test opening/closing/reopening a scaled file in update mode
hdul = fits.open(self.temp('fixed-1890-z.fits'),
do_not_scale_image_data=True)
hdul.writeto(self.temp('test_new.fits'), overwrite=True,
output_verify='silentfix')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
orig_data = hdul[1].data
hdul.close()
hdul = fits.open(self.temp('test_new.fits'), mode='update')
hdul.close()
hdul = fits.open(self.temp('test_new.fits'))
assert (hdul[1].data == orig_data).all()
hdul = fits.open(self.temp('test_new.fits'))
hdul.close()
def test_scale_back_compressed(self):
"""
Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/88 3
Identical to test_scale_back() but uses a compressed image.
"""
# Create a compressed version of the scaled image
with fits.open(self.data('scale.fits'),
do_not_scale_image_data=True) as hdul:
chdu = fits.CompImageHDU(data=hdul[0].data,
header=hdul[0].header)
chdu.writeto(self.temp('scale.fits'))
with fits.open(self.temp('scale.fits'), mode='update',
scale_back=True) as hdul:
orig_bitpix = hdul[1].header['BITPIX']
orig_bzero = hdul[1].header['BZERO']
orig_bscale = hdul[1].header['BSCALE']
orig_data = hdul[1].data.copy()
hdul[1].data[0] = 0
with fits.open(self.temp('scale.fits'),
do_not_scale_image_data=True) as hdul:
assert hdul[1].header['BITPIX'] == orig_bitpix
assert hdul[1].header['BZERO'] == orig_bzero
assert hdul[1].header['BSCALE'] == orig_bscale
zero_point = int(math.floor(-orig_bzero / orig_bscale))
assert (hdul[1].data[0] == zero_point).all()
with fits.open(self.temp('scale.fits')) as hdul:
assert (hdul[1].data[1:] == orig_data[1:]).all()
# Extra test to ensure that after everything the data is still the
# same as in the original uncompressed version of the image
with fits.open(self.data('scale.fits')) as hdul2:
# Recall we made the same modification to the data in hdul
# above
hdul2[0].data[0] = 0
assert (hdul[1].data == hdul2[0].data).all()
def test_lossless_gzip_compression(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/198"""
noise = np.random.normal(size=(1000, 1000))
chdu1 = fits.CompImageHDU(data=noise, compression_type='GZIP_1')
# First make a test image with lossy compression and make sure it
# wasn't compressed perfectly. This shouldn't happen ever, but just to
# make sure the test non-trivial.
chdu1.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as h:
assert np.abs(noise - h[1].data).max() > 0.0
del h
chdu2 = fits.CompImageHDU(data=noise, compression_type='GZIP_1',
quantize_level=0.0) # No quantization
with ignore_warnings():
chdu2.writeto(self.temp('test.fits'), overwrite=True)
with fits.open(self.temp('test.fits')) as h:
assert (noise == h[1].data).all()
def test_compression_column_tforms(self):
"""Regression test for https://aeon.stsci.edu/ssb/trac/pyfits/ticket/199"""
# Some interestingly tiled data so that some of it is quantized and
# some of it ends up just getting gzip-compressed
data2 = ((np.arange(1, 8, dtype=np.float32) * 10)[:, np.newaxis] +
np.arange(1, 7))
np.random.seed(1337)
data1 = np.random.uniform(size=(6 * 4, 7 * 4))
data1[:data2.shape[0], :data2.shape[1]] = data2
chdu = fits.CompImageHDU(data1, compression_type='RICE_1',
tile_size=(6, 7))
chdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'),
disable_image_compression=True) as h:
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM1'])
assert re.match(r'^1PB\(\d+\)$', h[1].header['TFORM2'])
def test_compression_update_header(self):
"""Regression test for
https://github.com/spacetelescope/PyFITS/issues/23
"""
self.copy_file('comp.fits')
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
assert isinstance(hdul[1], fits.CompImageHDU)
hdul[1].header['test1'] = 'test'
hdul[1]._header['test2'] = 'test2'
with fits.open(self.temp('comp.fits')) as hdul:
assert 'test1' in hdul[1].header
assert hdul[1].header['test1'] == 'test'
assert 'test2' in hdul[1].header
assert hdul[1].header['test2'] == 'test2'
# Test update via index now:
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
hdr[hdr.index('TEST1')] = 'foo'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['TEST1'] == 'foo'
# Test slice updates
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header['TEST*'] = 'qux'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['qux', 'qux']
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
idx = hdr.index('TEST1')
hdr[idx:idx + 2] = 'bar'
with fits.open(self.temp('comp.fits')) as hdul:
assert list(hdul[1].header['TEST*'].values()) == ['bar', 'bar']
# Test updating a specific COMMENT card duplicate
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdul[1].header[('COMMENT', 1)] = 'I am fire. I am death!'
with fits.open(self.temp('comp.fits')) as hdul:
assert hdul[1].header['COMMENT'][1] == 'I am fire. I am death!'
assert hdul[1]._header['COMMENT'][1] == 'I am fire. I am death!'
# Test deleting by keyword and by slice
with fits.open(self.temp('comp.fits'), mode='update') as hdul:
hdr = hdul[1].header
del hdr['COMMENT']
idx = hdr.index('TEST1')
del hdr[idx:idx + 2]
with fits.open(self.temp('comp.fits')) as hdul:
assert 'COMMENT' not in hdul[1].header
assert 'COMMENT' not in hdul[1]._header
assert 'TEST1' not in hdul[1].header
assert 'TEST1' not in hdul[1]._header
assert 'TEST2' not in hdul[1].header
assert 'TEST2' not in hdul[1]._header
def test_compression_update_header_with_reserved(self):
"""
Ensure that setting reserved keywords related to the table data
structure on CompImageHDU image headers fails.
"""
def test_set_keyword(hdr, keyword, value):
with catch_warnings() as w:
hdr[keyword] = value
assert len(w) == 1
assert str(w[0].message).startswith(
"Keyword {!r} is reserved".format(keyword))
assert keyword not in hdr
with fits.open(self.data('comp.fits')) as hdul:
hdr = hdul[1].header
test_set_keyword(hdr, 'TFIELDS', 8)
test_set_keyword(hdr, 'TTYPE1', 'Foo')
test_set_keyword(hdr, 'ZCMPTYPE', 'ASDF')
test_set_keyword(hdr, 'ZVAL1', 'Foo')
def test_compression_header_append(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.append('TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
imghdr.append(('FOO', 'bar', 'qux'), end=True)
assert 'FOO' in imghdr
assert imghdr[-1] == 'bar'
assert 'FOO' in tblhdr
assert tblhdr[-1] == 'bar'
imghdr.append(('CHECKSUM', 'abcd1234'))
assert 'CHECKSUM' in imghdr
assert imghdr['CHECKSUM'] == 'abcd1234'
assert 'CHECKSUM' not in tblhdr
assert 'ZHECKSUM' in tblhdr
assert tblhdr['ZHECKSUM'] == 'abcd1234'
def test_compression_header_append2(self):
"""
Regresion test for issue https://github.com/astropy/astropy/issues/5827
"""
with fits.open(self.data('comp.fits')) as hdul:
header = hdul[1].header
while (len(header) < 1000):
header.append() # pad with grow room
# Append stats to header:
header.append(("Q1_OSAVG", 1, "[adu] quadrant 1 overscan mean"))
header.append(("Q1_OSSTD", 1, "[adu] quadrant 1 overscan stddev"))
header.append(("Q1_OSMED", 1, "[adu] quadrant 1 overscan median"))
def test_compression_header_insert(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
# First try inserting a restricted keyword
with catch_warnings() as w:
imghdr.insert(1000, 'TFIELDS')
assert len(w) == 1
assert 'TFIELDS' not in imghdr
assert tblhdr.count('TFIELDS') == 1
# First try keyword-relative insert
imghdr.insert('TELESCOP', ('OBSERVER', 'Phil Plait'))
assert 'OBSERVER' in imghdr
assert imghdr.index('OBSERVER') == imghdr.index('TELESCOP') - 1
assert 'OBSERVER' in tblhdr
assert tblhdr.index('OBSERVER') == tblhdr.index('TELESCOP') - 1
# Next let's see if an index-relative insert winds up being
# sensible
idx = imghdr.index('OBSERVER')
imghdr.insert('OBSERVER', ('FOO',))
assert 'FOO' in imghdr
assert imghdr.index('FOO') == idx
assert 'FOO' in tblhdr
assert tblhdr.index('FOO') == tblhdr.index('OBSERVER') - 1
def test_compression_header_set_before_after(self):
with fits.open(self.data('comp.fits')) as hdul:
imghdr = hdul[1].header
tblhdr = hdul[1]._header
with catch_warnings() as w:
imghdr.set('ZBITPIX', 77, 'asdf', after='XTENSION')
assert len(w) == 1
assert 'ZBITPIX' not in imghdr
assert tblhdr.count('ZBITPIX') == 1
assert tblhdr['ZBITPIX'] != 77
# Move GCOUNT before PCOUNT (not that there's any reason you'd
# *want* to do that, but it's just a test...)
imghdr.set('GCOUNT', 99, before='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') - 1
assert imghdr['GCOUNT'] == 99
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') - 1
assert tblhdr['ZGCOUNT'] == 99
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
imghdr.set('GCOUNT', 2, after='PCOUNT')
assert imghdr.index('GCOUNT') == imghdr.index('PCOUNT') + 1
assert imghdr['GCOUNT'] == 2
assert tblhdr.index('ZGCOUNT') == tblhdr.index('ZPCOUNT') + 1
assert tblhdr['ZGCOUNT'] == 2
assert tblhdr.index('PCOUNT') == 5
assert tblhdr.index('GCOUNT') == 6
assert tblhdr['GCOUNT'] == 1
def test_compression_header_append_commentary(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2363
"""
hdu = fits.CompImageHDU(np.array([0], dtype=np.int32))
hdu.header['COMMENT'] = 'hello world'
assert hdu.header['COMMENT'] == ['hello world']
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert hdul[1].header['COMMENT'] == ['hello world']
def test_compression_with_gzip_column(self):
"""
Regression test for https://github.com/spacetelescope/PyFITS/issues/71
"""
arr = np.zeros((2, 7000), dtype='float32')
# The first row (which will be the first compressed tile) has a very
# wide range of values that will be difficult to quantize, and should
# result in use of a GZIP_COMPRESSED_DATA column
arr[0] = np.linspace(0, 1, 7000)
arr[1] = np.random.normal(size=7000)
hdu = fits.CompImageHDU(data=arr)
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
comp_hdu = hdul[1]
# GZIP-compressed tile should compare exactly
assert np.all(comp_hdu.data[0] == arr[0])
# The second tile uses lossy compression and may be somewhat off,
# so we don't bother comparing it exactly
def test_duplicate_compression_header_keywords(self):
"""
Regression test for https://github.com/astropy/astropy/issues/2750
Tests that the fake header (for the compressed image) can still be read
even if the real header contained a duplicate ZTENSION keyword (the
issue applies to any keyword specific to the compression convention,
however).
"""
arr = np.arange(100, dtype=np.int32)
hdu = fits.CompImageHDU(data=arr)
header = hdu._header
# append the duplicate keyword
hdu._header.append(('ZTENSION', 'IMAGE'))
hdu.writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits')) as hdul:
assert header == hdul[1]._header
# There's no good reason to have a duplicate keyword, but
# technically it isn't invalid either :/
assert hdul[1]._header.count('ZTENSION') == 2
def test_scale_bzero_with_compressed_int_data(self):
"""
Regression test for https://github.com/astropy/astropy/issues/4600
and https://github.com/astropy/astropy/issues/4588
Identical to test_scale_bzero_with_int_data() but uses a compressed
image.
"""
a = np.arange(100, 200, dtype=np.int16)
hdu1 = fits.CompImageHDU(data=a.copy())
hdu2 = fits.CompImageHDU(data=a.copy())
# Previously the following line would throw a TypeError,
# now it should be identical to the integer bzero case
hdu1.scale('int16', bzero=99.0)
hdu2.scale('int16', bzero=99)
assert np.allclose(hdu1.data, hdu2.data)
def test_scale_back_compressed_uint_assignment(self):
"""
Extend fix for #4600 to assignment to data
Identical to test_scale_back_uint_assignment() but uses a compressed
image.
Suggested by:
https://github.com/astropy/astropy/pull/4602#issuecomment-208713748
"""
a = np.arange(100, 200, dtype=np.uint16)
fits.CompImageHDU(a).writeto(self.temp('test.fits'))
with fits.open(self.temp('test.fits'), mode="update",
scale_back=True) as hdul:
hdul[1].data[:] = 0
assert np.allclose(hdul[1].data, 0)
def test_compressed_header_missing_znaxis(self):
a = np.arange(100, 200, dtype=np.uint16)
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZNAXIS')
with pytest.raises(KeyError):
comp_hdu.compressed_data
comp_hdu = fits.CompImageHDU(a)
comp_hdu._header.pop('ZBITPIX')
with pytest.raises(KeyError):
comp_hdu.compressed_data
@pytest.mark.parametrize(
('keyword', 'dtype', 'expected'),
[('BSCALE', np.uint8, np.float32), ('BSCALE', np.int16, np.float32),
('BSCALE', np.int32, np.float64), ('BZERO', np.uint8, np.float32),
('BZERO', np.int16, np.float32), ('BZERO', np.int32, np.float64)])
def test_compressed_scaled_float(self, keyword, dtype, expected):
"""
If BSCALE,BZERO is set to floating point values, the image
should be floating-point.
https://github.com/astropy/astropy/pull/6492
Parameters
----------
keyword : `str`
Keyword to set to a floating-point value to trigger
floating-point pixels.
dtype : `numpy.dtype`
Type of original array.
expected : `numpy.dtype`
Expected type of uncompressed array.
"""
value = 1.23345 # A floating-point value
hdu = fits.CompImageHDU(np.arange(0, 10, dtype=dtype))
hdu.header[keyword] = value
hdu.writeto(self.temp('test.fits'))
del hdu
with fits.open(self.temp('test.fits')) as hdu:
assert hdu[1].header[keyword] == value
assert hdu[1].data.dtype == expected
def test_comphdu_bscale(tmpdir):
"""
Regression test for a bug that caused extensions that used BZERO and BSCALE
that got turned into CompImageHDU to end up with BZERO/BSCALE before the
TFIELDS.
"""
filename1 = tmpdir.join('3hdus.fits').strpath
filename2 = tmpdir.join('3hdus_comp.fits').strpath
x = np.random.random((100, 100))*100
x0 = fits.PrimaryHDU()
x1 = fits.ImageHDU(np.array(x-50, dtype=int), uint=True)
x1.header['BZERO'] = 20331
x1.header['BSCALE'] = 2.3
hdus = fits.HDUList([x0, x1])
hdus.writeto(filename1)
# fitsverify (based on cfitsio) should fail on this file, only seeing the
# first HDU.
hdus = fits.open(filename1)
hdus[1] = fits.CompImageHDU(data=hdus[1].data.astype(np.uint32),
header=hdus[1].header)
hdus.writeto(filename2)
# open again and verify
hdus = fits.open(filename2)
hdus[1].verify('exception')
def test_scale_implicit_casting():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations.
hdu = fits.ImageHDU(np.array([1], dtype=np.int32))
hdu.scale(bzero=1.3)
def test_bzero_implicit_casting_compressed():
# Regression test for an issue that occurred because Numpy now does not
# allow implicit type casting during inplace operations. Astropy is
# actually not able to produce a file that triggers the failure - the
# issue occurs when using unsigned integer types in the FITS file, in which
# case BZERO should be 32768. But if the keyword is stored as 32768.0, then
# it was possible to trigger the implicit casting error.
filename = os.path.join(os.path.dirname(__file__),
'data', 'compressed_float_bzero.fits')
hdu = fits.open(filename)[1]
hdu.data
def test_bzero_mishandled_info(tmpdir):
# Regression test for #5507:
# Calling HDUList.info() on a dataset which applies a zeropoint
# from BZERO but which astropy.io.fits does not think it needs
# to resize to a new dtype results in an AttributeError.
filename = tmpdir.join('floatimg_with_bzero.fits').strpath
hdu = fits.ImageHDU(np.zeros((10, 10)))
hdu.header['BZERO'] = 10
hdu.writeto(filename, overwrite=True)
hdul = fits.open(filename)
hdul.info()
def test_image_write_readonly(tmpdir):
# Regression test to make sure that we can write out read-only arrays (#5512)
x = np.array([1, 2, 3])
x.setflags(write=False)
ghdu = fits.ImageHDU(data=x)
ghdu.add_datasum()
filename = tmpdir.join('test.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1, 2, 3])
# Same for compressed HDU
x = np.array([1.0, 2.0, 3.0])
x.setflags(write=False)
ghdu = fits.CompImageHDU(data=x)
# add_datasum does not work for CompImageHDU
# ghdu.add_datasum()
filename = tmpdir.join('test2.fits').strpath
ghdu.writeto(filename)
with fits.open(filename) as hdulist:
assert_equal(hdulist[1].data, [1.0, 2.0, 3.0])
|
256fc7ed98ace2412ebe3d17e4aa96d1af01fe85e29b9a124be4238422aab946 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import functools
from io import BytesIO
from textwrap import dedent
import pytest
import numpy as np
from numpy import ma
from ....table import Table, MaskedColumn
from ... import ascii
from ...ascii.core import ParameterError, FastOptionsError, InconsistentTableError
from ...ascii.cparser import CParserError
from ..fastbasic import (
FastBasic, FastCsv, FastTab, FastCommentedHeader, FastRdb, FastNoHeader)
from .common import assert_equal, assert_almost_equal, assert_true
StringIO = lambda x: BytesIO(x.encode('ascii'))
TRAVIS = os.environ.get('TRAVIS', False)
def assert_table_equal(t1, t2, check_meta=False, rtol=1.e-15, atol=1.e-300):
"""
Test equality of all columns in a table, with stricter tolerances for
float columns than the np.allclose default.
"""
assert_equal(len(t1), len(t2))
assert_equal(t1.colnames, t2.colnames)
if check_meta:
assert_equal(t1.meta, t2.meta)
for name in t1.colnames:
if len(t1) != 0:
assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)
if not isinstance(t1[name], MaskedColumn):
for i, el in enumerate(t1[name]):
try:
if not isinstance(el, str) and np.isnan(el):
assert_true(not isinstance(t2[name][i], str) and np.isnan(t2[name][i]))
elif isinstance(el, str):
assert_equal(el, t2[name][i])
else:
assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)
except (TypeError, NotImplementedError):
pass # ignore for now
# Use this counter to create a unique filename for each file created in a test
# if this function is called more than once in a single test
_filename_counter = 0
def _read(tmpdir, table, Reader=None, format=None, parallel=False, check_meta=False, **kwargs):
# make sure we have a newline so table can't be misinterpreted as a filename
global _filename_counter
table += '\n'
reader = Reader(**kwargs)
t1 = reader.read(table)
t2 = reader.read(StringIO(table))
t3 = reader.read(table.splitlines())
t4 = ascii.read(table, format=format, guess=False, **kwargs)
t5 = ascii.read(table, format=format, guess=False, fast_reader=False, **kwargs)
assert_table_equal(t1, t2, check_meta=check_meta)
assert_table_equal(t2, t3, check_meta=check_meta)
assert_table_equal(t3, t4, check_meta=check_meta)
assert_table_equal(t4, t5, check_meta=check_meta)
if parallel:
if TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
elif os.name == 'nt':
pytest.xfail("Multiprocessing is currently unsupported on Windows")
t6 = ascii.read(table, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t6, check_meta=check_meta)
filename = str(tmpdir.join('table{0}.txt'.format(_filename_counter)))
_filename_counter += 1
with open(filename, 'wb') as f:
f.write(table.encode('ascii'))
f.flush()
t7 = ascii.read(filename, format=format, guess=False, **kwargs)
if parallel:
t8 = ascii.read(filename, format=format, guess=False, fast_reader={
'parallel': True}, **kwargs)
assert_table_equal(t1, t7, check_meta=check_meta)
if parallel:
assert_table_equal(t1, t8, check_meta=check_meta)
return t1
@pytest.fixture(scope='function')
def read_basic(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastBasic, format='basic')
@pytest.fixture(scope='function')
def read_csv(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCsv, format='csv')
@pytest.fixture(scope='function')
def read_tab(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastTab, format='tab')
@pytest.fixture(scope='function')
def read_commented_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastCommentedHeader,
format='commented_header')
@pytest.fixture(scope='function')
def read_rdb(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastRdb, format='rdb')
@pytest.fixture(scope='function')
def read_no_header(tmpdir, request):
return functools.partial(_read, tmpdir, Reader=FastNoHeader,
format='no_header')
@pytest.mark.parametrize("parallel", [True, False])
def test_simple_data(parallel, read_basic):
"""
Make sure the fast reader works with basic input data.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
def test_read_types():
"""
Make sure that the read() function takes filenames,
strings, and lists of strings in addition to file-like objects.
"""
t1 = ascii.read("a b c\n1 2 3\n4 5 6", format='fast_basic', guess=False)
# TODO: also read from file
t2 = ascii.read(StringIO("a b c\n1 2 3\n4 5 6"), format='fast_basic', guess=False)
t3 = ascii.read(["a b c", "1 2 3", "4 5 6"], format='fast_basic', guess=False)
assert_table_equal(t1, t2)
assert_table_equal(t2, t3)
@pytest.mark.parametrize("parallel", [True, False])
def test_supplied_names(parallel, read_basic):
"""
If passed as a parameter, names should replace any
column names found in the header.
"""
table = read_basic("A B C\n1 2 3\n4 5 6", names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header(parallel, read_basic, read_no_header):
"""
The header should not be read when header_start=None. Unless names is
passed, the column names should be auto-generated.
"""
# Cannot set header_start=None for basic format
with pytest.raises(ValueError):
read_basic("A B C\n1 2 3\n4 5 6", header_start=None, data_start=0, parallel=parallel)
t2 = read_no_header("A B C\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('col1', 'col2', 'col3'))
assert_table_equal(t2, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_header_supplied_names(parallel, read_basic, read_no_header):
"""
If header_start=None and names is passed as a parameter, header
data should not be read and names should be used instead.
"""
table = read_no_header("A B C\n1 2 3\n4 5 6",
names=('X', 'Y', 'Z'), parallel=parallel)
expected = Table([['A', '1', '4'], ['B', '2', '5'], ['C', '3', '6']], names=('X', 'Y', 'Z'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_comment(parallel, read_basic):
"""
Make sure that line comments are ignored by the C reader.
"""
table = read_basic("# comment\nA B C\n # another comment\n1 2 3\n4 5 6", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_lines(parallel, read_basic):
"""
Make sure that empty lines are ignored by the C reader.
"""
table = read_basic("\n\nA B C\n1 2 3\n\n\n4 5 6\n\n\n\n", parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_lstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the beginning of fields.
"""
text = """
1, 2, \t3
A,\t\t B, C
a, b, c
""" + ' \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_rstrip_whitespace(parallel, read_basic):
"""
Test to make sure the reader ignores whitespace at the end of fields.
"""
text = ' 1 ,2 \t,3 \nA\t,B ,C\t \t \n \ta ,b , c \n'
table = read_basic(text, delimiter=',', parallel=parallel)
expected = Table([['A', 'a'], ['B', 'b'], ['C', 'c']], names=('1', '2', '3'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_conversion(parallel, read_basic):
"""
The reader should try to convert each column to ints. If this fails, the
reader should try to convert to floats. Failing this, it should fall back
to strings.
"""
text = """
A B C D E
1 a 3 4 5
2. 1 9 10 -5.3e4
4 2 -12 .4 six
"""
table = read_basic(text, parallel=parallel)
assert_equal(table['A'].dtype.kind, 'f')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'i')
assert_equal(table['D'].dtype.kind, 'f')
assert table['E'].dtype.kind in ('S', 'U')
@pytest.mark.parametrize("parallel", [True, False])
def test_delimiter(parallel, read_basic):
"""
Make sure that different delimiters work as expected.
"""
text = """
COL1 COL2 COL3
1 A -1
2 B -2
"""
expected = Table([[1, 2], ['A', 'B'], [-1, -2]], names=('COL1', 'COL2', 'COL3'))
for sep in ' ,\t#;':
table = read_basic(text.replace(' ', sep), delimiter=sep, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_names(parallel, read_basic):
"""
If include_names is not None, the parser should read only those columns in include_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", include_names=['A', 'D'], parallel=parallel)
expected = Table([[1, 5], [4, 8]], names=('A', 'D'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_exclude_names(parallel, read_basic):
"""
If exclude_names is not None, the parser should exclude the columns in exclude_names.
"""
table = read_basic("A B C D\n1 2 3 4\n5 6 7 8", exclude_names=['A', 'D'], parallel=parallel)
expected = Table([[2, 6], [3, 7]], names=('B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_include_exclude_names(parallel, read_basic):
"""
Make sure that include_names is applied before exclude_names if both are specified.
"""
text = """
A B C D E F G H
1 2 3 4 5 6 7 8
9 10 11 12 13 14 15 16
"""
table = read_basic(text, include_names=['A', 'B', 'D', 'F', 'H'],
exclude_names=['B', 'F'], parallel=parallel)
expected = Table([[1, 9], [4, 12], [8, 16]], names=('A', 'D', 'H'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_fields(parallel, read_basic):
"""
The character quotechar (default '"') should denote the start of a field which can
contain the field delimiter and newlines.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
"A B" C D
1.5 2.1 -37.1
a b " c
d"
"""
table = read_basic(text, parallel=parallel)
expected = Table([['1.5', 'a'], ['2.1', 'b'], ['-37.1', 'cd']], names=('A B', 'C', 'D'))
assert_table_equal(table, expected)
table = read_basic(text.replace('"', "'"), quotechar="'", parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("key,val", [
('delimiter', ',,'), # multi-char delimiter
('comment', '##'), # multi-char comment
('data_start', None), # data_start=None
('data_start', -1), # data_start negative
('quotechar', '##'), # multi-char quote signifier
('header_start', -1), # negative header_start
('converters', dict((i + 1, ascii.convert_numpy(np.uint)) for i in range(3))), # passing converters
('Inputter', ascii.ContinuationLinesInputter), # passing Inputter
('header_Splitter', ascii.DefaultSplitter), # passing Splitter
('data_Splitter', ascii.DefaultSplitter)])
def test_invalid_parameters(key, val):
"""
Make sure the C reader raises an error if passed parameters it can't handle.
"""
with pytest.raises(ParameterError):
FastBasic(**{key: val}).read('1 2 3\n4 5 6')
with pytest.raises(ParameterError):
ascii.read('1 2 3\n4 5 6',
format='fast_basic', guess=False, **{key: val})
def test_invalid_parameters_other():
with pytest.raises(TypeError):
FastBasic(foo=7).read('1 2 3\n4 5 6') # unexpected argument
with pytest.raises(FastOptionsError): # don't fall back on the slow reader
ascii.read('1 2 3\n4 5 6', format='basic', fast_reader={'foo': 7})
with pytest.raises(ParameterError):
# Outputter cannot be specified in constructor
FastBasic(Outputter=ascii.TableOutputter).read('1 2 3\n4 5 6')
def test_too_many_cols1():
"""
If a row contains too many columns, the C reader should raise an error.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9 10
11 12 13
"""
with pytest.raises(InconsistentTableError) as e:
table = FastBasic().read(text)
assert 'InconsistentTableError: Number of header columns (3) ' \
'inconsistent with data columns in data line 2' in str(e)
def test_too_many_cols2():
text = """\
aaa,bbb
1,2,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
table = FastCsv().read(text)
assert 'InconsistentTableError: Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e)
def test_too_many_cols3():
text = """\
aaa,bbb
1,2,,
3,4,
"""
with pytest.raises(InconsistentTableError) as e:
table = FastCsv().read(text)
assert 'InconsistentTableError: Number of header columns (2) ' \
'inconsistent with data columns in data line 0' in str(e)
@pytest.mark.parametrize("parallel", [True, False])
def test_not_enough_cols(parallel, read_csv):
"""
If a row does not have enough columns, the FastCsv reader should add empty
fields while the FastBasic reader should raise an error.
"""
text = """
A,B,C
1,2,3
4,5
6,7,8
"""
table = read_csv(text, parallel=parallel)
assert table['B'][1] is not ma.masked
assert table['C'][1] is ma.masked
with pytest.raises(InconsistentTableError) as e:
table = FastBasic(delimiter=',').read(text)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_end(parallel, read_basic, read_rdb):
"""
The parameter data_end should specify where data reading ends.
"""
text = """
A B C
1 2 3
4 5 6
7 8 9
10 11 12
"""
table = read_basic(text, data_end=3, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# data_end supports negative indexing
table = read_basic(text, data_end=-2, parallel=parallel)
assert_table_equal(table, expected)
text = """
A\tB\tC
N\tN\tS
1\t2\ta
3\t4\tb
5\t6\tc
"""
# make sure data_end works with RDB
table = read_rdb(text, data_end=-1, parallel=parallel)
expected = Table([[1, 3], [2, 4], ['a', 'b']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# positive index
table = read_rdb(text, data_end=3, parallel=parallel)
expected = Table([[1], [2], ['a']], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
# empty table if data_end is too small
table = read_rdb(text, data_end=1, parallel=parallel)
expected = Table([[], [], []], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_inf_nan(parallel, read_basic):
"""
Test that inf and nan-like values are correctly parsed on all platforms.
Regression test for https://github.com/astropy/astropy/pull/3525
"""
text = dedent("""\
A
nan
+nan
-nan
inf
infinity
+inf
+infinity
-inf
-infinity
""")
expected = Table({'A': [np.nan, np.nan, np.nan,
np.inf, np.inf, np.inf, np.inf,
-np.inf, -np.inf]})
table = read_basic(text, parallel=parallel)
assert table['A'].dtype.kind == 'f'
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_values(parallel, read_basic):
"""
Make sure that the parameter fill_values works as intended. If fill_values
is not specified, the default behavior should be to convert '' to 0.
"""
text = """
A, B, C
, 2, nan
a, -999, -3.4
nan, 5, -9999
8, nan, 7.6e12
"""
table = read_basic(text, delimiter=',', parallel=parallel)
# The empty value in row A should become a masked '0'
assert isinstance(table['A'], MaskedColumn)
assert table['A'][0] is ma.masked
# '0' rather than 0 because there is a string in the column
assert_equal(table['A'].data.data[0], '0')
assert table['A'][1] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=('-999', '0'), parallel=parallel)
assert isinstance(table['B'], MaskedColumn)
assert table['A'][0] is not ma.masked # empty value unaffected
assert table['C'][2] is not ma.masked # -9999 is not an exact match
assert table['B'][1] is ma.masked
# Numeric because the rest of the column contains numeric data
assert_equal(table['B'].data.data[1], 0.0)
assert table['B'][0] is not ma.masked
table = read_basic(text, delimiter=',', fill_values=[], parallel=parallel)
# None of the columns should be masked
for name in 'ABC':
assert not isinstance(table[name], MaskedColumn)
table = read_basic(text, delimiter=',', fill_values=[('', '0', 'A'),
('nan', '999', 'A', 'C')], parallel=parallel)
assert np.isnan(table['B'][3]) # nan filling skips column B
assert table['B'][3] is not ma.masked # should skip masking as well as replacing nan
assert table['A'][0] is ma.masked
assert table['A'][2] is ma.masked
assert_equal(table['A'].data.data[0], '0')
assert_equal(table['A'].data.data[2], '999')
assert table['C'][0] is ma.masked
assert_almost_equal(table['C'].data.data[0], 999.0)
assert_almost_equal(table['C'][1], -3.4) # column is still of type float
@pytest.mark.parametrize("parallel", [True, False])
def test_fill_include_exclude_names(parallel, read_csv):
"""
fill_include_names and fill_exclude_names should filter missing/empty value handling
in the same way that include_names and exclude_names filter output columns.
"""
text = """
A, B, C
, 1, 2
3, , 4
5, 5,
"""
table = read_csv(text, fill_include_names=['A', 'B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is ma.masked
assert table['C'][2] is not ma.masked # C not in fill_include_names
table = read_csv(text, fill_exclude_names=['A', 'B'], parallel=parallel)
assert table['C'][2] is ma.masked
assert table['A'][0] is not ma.masked
assert table['B'][1] is not ma.masked # A and B excluded from fill handling
table = read_csv(text, fill_include_names=['A', 'B'], fill_exclude_names=['B'], parallel=parallel)
assert table['A'][0] is ma.masked
assert table['B'][1] is not ma.masked # fill_exclude_names applies after fill_include_names
assert table['C'][2] is not ma.masked
@pytest.mark.parametrize("parallel", [True, False])
def test_many_rows(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of rows
is large (so that each column string is longer than INITIAL_COL_SIZE).
"""
text = 'A B C\n'
for i in range(500): # create 500 rows
text += ' '.join([str(i) for i in range(3)])
text += '\n'
table = read_basic(text, parallel=parallel)
expected = Table([[0] * 500, [1] * 500, [2] * 500], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_many_columns(parallel, read_basic):
"""
Make sure memory reallocation works okay when the number of columns
is large (so that each header string is longer than INITIAL_HEADER_SIZE).
"""
# create a string with 500 columns and two data rows
text = ' '.join([str(i) for i in range(500)])
text += ('\n' + text + '\n' + text)
table = read_basic(text, parallel=parallel)
expected = Table([[i, i] for i in range(500)], names=[str(i) for i in range(500)])
assert_table_equal(table, expected)
def test_fast_reader():
"""
Make sure that ascii.read() works as expected by default and with
fast_reader specified.
"""
text = 'a b c\n1 2 3\n4 5 6'
with pytest.raises(ParameterError): # C reader can't handle regex comment
ascii.read(text, format='fast_basic', guess=False, comment='##')
# Enable multiprocessing and the fast converter
try:
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': True, 'use_fast_converter': True})
except NotImplementedError:
# Might get this on Windows, try without parallel...
if os.name == 'nt':
ascii.read(text, format='basic', guess=False,
fast_reader={'parallel': False,
'use_fast_converter': True})
else:
raise
# Should raise an error if fast_reader has an invalid key
with pytest.raises(FastOptionsError):
ascii.read(text, format='fast_basic', guess=False, fast_reader={'foo': True})
# Use the slow reader instead
ascii.read(text, format='basic', guess=False, comment='##', fast_reader=False)
# Will try the slow reader afterwards by default
ascii.read(text, format='basic', guess=False, comment='##')
@pytest.mark.parametrize("parallel", [True, False])
def test_read_tab(parallel, read_tab):
"""
The fast reader for tab-separated values should not strip whitespace, unlike
the basic reader.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = '1\t2\t3\n a\t b \t\n c\t" d\n e"\t '
table = read_tab(text, parallel=parallel)
assert_equal(table['1'][0], ' a') # preserve line whitespace
assert_equal(table['2'][0], ' b ') # preserve field whitespace
assert table['3'][0] is ma.masked # empty value should be masked
assert_equal(table['2'][1], ' d e') # preserve whitespace in quoted fields
assert_equal(table['3'][1], ' ') # preserve end-of-line whitespace
@pytest.mark.parametrize("parallel", [True, False])
def test_default_data_start(parallel, read_basic):
"""
If data_start is not explicitly passed to read(), data processing should
beginning right after the header.
"""
text = 'ignore this line\na b c\n1 2 3\n4 5 6'
table = read_basic(text, header_start=1, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_commented_header(parallel, read_commented_header):
"""
The FastCommentedHeader reader should mimic the behavior of the
CommentedHeader by overriding the default header behavior of FastBasic.
"""
text = """
# A B C
1 2 3
4 5 6
"""
t1 = read_commented_header(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('A', 'B', 'C'))
assert_table_equal(t1, expected)
text = '# first commented line\n # second commented line\n\n' + text
t2 = read_commented_header(text, header_start=2, data_start=0, parallel=parallel)
assert_table_equal(t2, expected)
t3 = read_commented_header(text, header_start=-1, data_start=0, parallel=parallel) # negative indexing allowed
assert_table_equal(t3, expected)
text += '7 8 9'
t4 = read_commented_header(text, header_start=2, data_start=2, parallel=parallel)
expected = Table([[7], [8], [9]], names=('A', 'B', 'C'))
assert_table_equal(t4, expected)
with pytest.raises(ParameterError):
read_commented_header(text, header_start=-1, data_start=-1, parallel=parallel) # data_start cannot be negative
@pytest.mark.parametrize("parallel", [True, False])
def test_rdb(parallel, read_rdb):
"""
Make sure the FastRdb reader works as expected.
"""
text = """
A\tB\tC
1n\tS\t4N
1\t 9\t4.3
"""
table = read_rdb(text, parallel=parallel)
expected = Table([[1], [' 9'], [4.3]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
assert_equal(table['A'].dtype.kind, 'i')
assert table['B'].dtype.kind in ('S', 'U')
assert_equal(table['C'].dtype.kind, 'f')
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tS\tN\n4\tb\ta' # C column contains non-numeric data
read_rdb(text, parallel=parallel)
assert 'Column C failed to convert' in str(e)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\n1\t2\t3' # not enough types specified
read_rdb(text, parallel=parallel)
assert 'mismatch between number of column names and column types' in str(e)
with pytest.raises(ValueError) as e:
text = 'A\tB\tC\nN\tN\t5\n1\t2\t3' # invalid type for column C
read_rdb(text, parallel=parallel)
assert 'type definitions do not all match [num](N|S)' in str(e)
@pytest.mark.parametrize("parallel", [True, False])
def test_data_start(parallel, read_basic):
"""
Make sure that data parsing begins at data_start (ignoring empty and
commented lines but not taking quoted values into account).
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = """
A B C
1 2 3
4 5 6
7 8 "9
\t1"
# comment
10 11 12
"""
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
table = read_basic(text, data_start=3, parallel=parallel)
# ignore empty line
expected = Table([[7, 10], [8, 11], [91, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
with pytest.raises(InconsistentTableError) as e:
# tries to begin in the middle of quoted field
read_basic(text, data_start=4, parallel=parallel)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e)
table = read_basic(text, data_start=5, parallel=parallel)
# ignore commented line
expected = Table([[10], [11], [12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
text = """
A B C
1 2 3
4 5 6
7 8 9
# comment
10 11 12
"""
# make sure reading works as expected in parallel
table = read_basic(text, data_start=2, parallel=parallel)
expected = Table([[4, 7, 10], [5, 8, 11], [6, 9, 12]], names=('A', 'B', 'C'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_quoted_empty_values(parallel, read_basic):
"""
Quoted empty values spanning multiple lines should be treated correctly.
"""
if parallel:
pytest.xfail("Multiprocessing can fail with quoted fields")
text = 'a b c\n1 2 " \n "'
table = read_basic(text, parallel=parallel)
assert table['c'][0] is ma.masked # empty value masked by default
@pytest.mark.parametrize("parallel", [True, False])
def test_csv_comment_default(parallel, read_csv):
"""
Unless the comment parameter is specified, the CSV reader should
not treat any lines as comments.
"""
text = 'a,b,c\n#1,2,3\n4,5,6'
table = read_csv(text, parallel=parallel)
expected = Table([['#1', '4'], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_whitespace_before_comment(parallel, read_tab):
"""
Readers that don't strip whitespace from data (Tab, RDB)
should still treat lines with leading whitespace and then
the comment char as comment lines.
"""
text = 'a\tb\tc\n # comment line\n1\t2\t3'
table = read_tab(text, parallel=parallel)
expected = Table([[1], [2], [3]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_strip_line_trailing_whitespace(parallel, read_basic):
"""
Readers that strip whitespace from lines should ignore
trailing whitespace after the last data value of each
row.
"""
text = 'a b c\n1 2 \n3 4 5'
with pytest.raises(InconsistentTableError) as e:
ascii.read(StringIO(text), format='fast_basic', guess=False)
assert 'header columns (3) inconsistent with data columns in data line 0' \
in str(e)
text = 'a b c\n 1 2 3 \t \n 4 5 6 '
table = read_basic(text, parallel=parallel)
expected = Table([[1, 4], [2, 5], [3, 6]], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_no_data(parallel, read_basic):
"""
As long as column names are supplied, the C reader
should return an empty table in the absence of data.
"""
table = read_basic('a b c', parallel=parallel)
expected = Table([[], [], []], names=('a', 'b', 'c'))
assert_table_equal(table, expected)
table = read_basic('a b c\n1 2 3', data_start=2, parallel=parallel)
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_line_endings(parallel, read_basic, read_commented_header, read_rdb):
"""
Make sure the fast reader accepts CR and CR+LF
as newlines.
"""
text = 'a b c\n1 2 3\n4 5 6\n7 8 9\n'
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'))
for newline in ('\r\n', '\r'):
table = read_basic(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
# Make sure the splitlines() method of FileString
# works with CR/CR+LF line endings
text = '#' + text
for newline in ('\r\n', '\r'):
table = read_commented_header(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
expected = Table([[1, 4, 7], [2, 5, 8], [3, 6, 9]], names=('a', 'b', 'c'), masked=True)
expected['a'][0] = np.ma.masked
expected['c'][0] = np.ma.masked
text = 'a\tb\tc\nN\tN\tN\n\t2\t\n4\t5\t6\n7\t8\t9\n'
for newline in ('\r\n', '\r'):
table = read_rdb(text.replace('\n', newline), parallel=parallel)
assert_table_equal(table, expected)
assert np.all(table == expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_store_comments(parallel, read_basic):
"""
Make sure that the output Table produced by the fast
reader stores any comment lines in its meta attribute.
"""
text = """
# header comment
a b c
# comment 2
# comment 3
1 2 3
4 5 6
"""
table = read_basic(text, parallel=parallel, check_meta=True)
assert_equal(table.meta['comments'],
['header comment', 'comment 2', 'comment 3'])
@pytest.mark.parametrize("parallel", [True, False])
def test_empty_quotes(parallel, read_basic):
"""
Make sure the C reader doesn't segfault when the
input data contains empty quotes. [#3407]
"""
table = read_basic('a b\n1 ""\n2 ""', parallel=parallel)
expected = Table([[1, 2], [0, 0]], names=('a', 'b'))
assert_table_equal(table, expected)
@pytest.mark.parametrize("parallel", [True, False])
def test_fast_tab_with_names(parallel, read_tab):
"""
Make sure the C reader doesn't segfault when the header for the
first column is missing [#3545]
"""
content = """#
\tdecDeg\tRate_pn_offAxis\tRate_mos2_offAxis\tObsID\tSourceID\tRADeg\tversion\tCounts_pn\tRate_pn\trun\tRate_mos1\tRate_mos2\tInserted_pn\tInserted_mos2\tbeta\tRate_mos1_offAxis\trcArcsec\tname\tInserted\tCounts_mos1\tInserted_mos1\tCounts_mos2\ty\tx\tCounts\toffAxis\tRot
-3.007559\t0.0000\t0.0010\t0013140201\t0\t213.462574\t0\t2\t0.0002\t0\t0.0001\t0.0001\t0\t1\t0.66\t0.0217\t3.0\tfakeXMMXCS J1413.8-0300\t3\t1\t2\t1\t398.000\t127.000\t5\t13.9\t72.3\t"""
head = ['A{0}'.format(i) for i in range(28)]
table = read_tab(content, data_start=1,
parallel=parallel, names=head)
@pytest.mark.skipif(not os.getenv('TEST_READ_HUGE_FILE'),
reason='Environment variable TEST_READ_HUGE_FILE must be '
'defined to run this test')
def test_read_big_table(tmpdir):
"""Test reading of a huge file.
This test generates a huge CSV file (~2.3Gb) before reading it (see
https://github.com/astropy/astropy/pull/5319). The test is run only if the
environment variable ``TEST_READ_HUGE_FILE`` is defined. Note that running
the test requires quite a lot of memory (~18Gb when reading the file) !!
"""
NB_ROWS = 250000
NB_COLS = 500
filename = str(tmpdir.join("big_table.csv"))
print("Creating a {} rows table ({} columns).".format(NB_ROWS, NB_COLS))
data = np.random.random(NB_ROWS)
t = Table(data=[data]*NB_COLS, names=[str(i) for i in range(NB_COLS)])
data = None
print("Saving the table to {}".format(filename))
t.write(filename, format='ascii.csv', overwrite=True)
t = None
print("Counting the number of lines in the csv, it should be {}"
" + 1 (header).".format(NB_ROWS))
assert sum(1 for line in open(filename)) == NB_ROWS + 1
print("Reading the file with astropy.")
t = Table.read(filename, format='ascii.csv', fast_reader=True)
assert len(t) == NB_ROWS
# Test these both with guessing turned on and off
@pytest.mark.parametrize("guess", [True, False])
# fast_reader configurations: False| 'use_fast_converter'=False|True
@pytest.mark.parametrize('fast_reader', [False, dict(use_fast_converter=False),
dict(use_fast_converter=True)])
# Catch Windows environment since we cannot use _read() with custom fast_reader
@pytest.mark.parametrize("parallel", [False,
pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows"))])
def test_data_out_of_range(parallel, fast_reader, guess):
"""
Numbers with exponents beyond float64 range (|~4.94e-324 to 1.7977e+308|)
shall be returned as 0 and +-inf respectively by the C parser, just like
the Python parser.
Test fast converter only to nominal accuracy.
"""
# Python reader and strtod() are expected to return precise results
rtol = 1.e-30
# Update fast_reader dict
if fast_reader:
fast_reader['parallel'] = parallel
if fast_reader.get('use_fast_converter'):
rtol = 1.e-15
elif np.iinfo(np.int).dtype == np.dtype(np.int32):
# On 32bit the standard C parser (strtod) returns strings for these
pytest.xfail("C parser cannot handle float64 on 32bit systems")
if parallel:
if not fast_reader:
pytest.skip("Multiprocessing only available in fast reader")
elif TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
fields = ['10.1E+199', '3.14e+313', '2048e+306', '0.6E-325', '-2.e345']
values = np.array([1.01e200, np.inf, np.inf, 0.0, -np.inf])
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test some additional corner cases
fields = ['.0101E202', '0.000000314E+314', '1777E+305', '-1799E+305',
'0.2e-323', '5200e-327', ' 0.0000000000000000000001024E+330']
values = np.array([1.01e200, 3.14e307, 1.777e308, -np.inf, 0.0, 4.94e-324, 1.024e308])
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
# Test corner cases again with non-standard exponent_style (auto-detection)
if fast_reader and fast_reader.get('use_fast_converter'):
fast_reader.update({'exponent_style': 'A'})
else:
pytest.skip("Fortran exponent style only available in fast converter")
fields = ['.0101D202', '0.000000314d+314', '1777+305', '-1799E+305',
'0.2e-323', '2500-327', ' 0.0000000000000000000001024Q+330']
t = ascii.read(StringIO(' '.join(fields)), format='no_header',
guess=guess, fast_reader=fast_reader)
read_values = np.array([col[0] for col in t.itercols()])
assert_almost_equal(read_values, values, rtol=rtol, atol=1.e-324)
@pytest.mark.parametrize("guess", [True, False])
# catch Windows environment since we cannot use _read() with custom fast_reader
@pytest.mark.parametrize("parallel", [
pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")),
False])
def test_int_out_of_range(parallel, guess):
"""
Integer numbers outside int range shall be returned as string columns
consistent with the standard (Python) parser (no 'upcasting' to float).
"""
imin = np.iinfo(int).min+1
imax = np.iinfo(int).max-1
huge = '{:d}'.format(imax+2)
text = 'P M S\n {:d} {:d} {:s}'.format(imax, imin, huge)
expected = Table([[imax], [imin], [huge]], names=('P', 'M', 'S'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
# check with leading zeroes to make sure strtol does not read them as octal
text = 'P M S\n000{:d} -0{:d} 00{:s}'.format(imax, -imin, huge)
expected = Table([[imax], [imin], ['00'+huge]], names=('P', 'M', 'S'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
# Mixed columns should be returned as float, but if the out-of-range integer
# shows up first, it will produce a string column - with both readers
pytest.xfail("Integer fallback depends on order of rows")
text = 'A B\n 12.3 {0:d}9\n {0:d}9 45.6e7'.format(imax)
expected = Table([[12.3, 10.*imax], [10.*imax, 4.56e8]],
names=('A', 'B'))
table = ascii.read(text, format='basic', guess=guess,
fast_reader={'parallel': parallel})
assert_table_equal(table, expected)
table = ascii.read(text, format='basic', guess=guess, fast_reader=False)
assert_table_equal(table, expected)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [
pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")),
False])
def test_fortran_reader(parallel, guess):
"""
Make sure that ascii.read() can read Fortran-style exponential notation
using the fast_reader.
"""
# check for nominal np.float64 precision
rtol = 1.e-15
atol = 0.0
text = 'A B C D\n100.01{:s}99 2.0 2.0{:s}-103 3\n' + \
' 4.2{:s}-1 5.0{:s}-1 0.6{:s}4 .017{:s}+309'
expc = Table([[1.0001e101, 0.42], [2, 0.5], [2.e-103, 6.e3], [3, 1.7e307]],
names=('A', 'B', 'C', 'D'))
expstyles = {'e': 6*('E'),
'D': ('D', 'd', 'd', 'D', 'd', 'D'),
'Q': 3*('q', 'Q'),
'Fortran': ('E', '0', 'D', 'Q', 'd', '0')}
# C strtod (not-fast converter) can't handle Fortran exp
with pytest.raises(FastOptionsError) as e:
ascii.read(text.format(*(6*('D'))), format='basic', guess=guess,
fast_reader={'use_fast_converter': False,
'parallel': parallel, 'exponent_style': 'D'})
assert 'fast_reader: exponent_style requires use_fast_converter' in str(e)
# Enable multiprocessing and the fast converter iterate over
# all style-exponent combinations, with auto-detection
for s, c in expstyles.items():
table = ascii.read(text.format(*c), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': s})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
# Additional corner-case checks including triple-exponents without
# any character and mixed whitespace separators
text = 'A B\t\t C D\n1.0001+101 2.0+000\t 0.0002-099 3\n ' + \
'0.42-000 \t 0.5 6.+003 0.000000000000000000000017+330'
table = ascii.read(text, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(table, expc, rtol=rtol, atol=atol)
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize("parallel", [
pytest.param(True, marks=pytest.mark.xfail(os.name == 'nt', reason="Multiprocessing is currently unsupported on Windows")),
False])
def test_fortran_invalid_exp(parallel, guess):
"""
Test Fortran-style exponential notation in the fast_reader with invalid
exponent-like patterns (no triple-digits) to make sure they are returned
as strings instead, as with the standard C parser.
"""
if parallel and TRAVIS:
pytest.xfail("Multiprocessing can sometimes fail on Travis CI")
formats = {'basic': ' ', 'tab': '\t', 'csv': ','}
header = ['S1', 'F2', 'S2', 'F3', 'S3', 'F4', 'F5', 'S4', 'I1', 'F6', 'F7']
# Tested entries and expected returns, first for auto-detect,
# then for different specified exponents
fields = ['1.0001+1', '.42d1', '2.3+10', '0.5', '3+1001', '3000.',
'2', '4.56e-2.3', '8000', '4.2-022', '.00000145e314']
vals_e = ['1.0001+1', '.42d1', '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
vals_d = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', '.00000145e314']
vals_a = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, 4.2e-22, 1.45e308]
vals_v = ['1.0001+1', 4.2, '2.3+10', 0.5, '3+1001', 3.e3,
2, '4.56e-2.3', 8000, '4.2-022', 1.45e308]
# Iterate over supported format types and separators
for f, s in formats.items():
t1 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)),
format=f, guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'A'})
assert_table_equal(t1, Table([[col] for col in vals_a], names=header))
# Non-basic separators require guessing enabled to be detected
if guess:
formats['bar'] = '|'
else:
formats = {'basic': ' '}
for s in formats.values():
t2 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'a'})
assert_table_equal(t2, Table([[col] for col in vals_a], names=header))
# Iterate for (default) expchar 'E'
for s in formats.values():
t3 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': True})
assert_table_equal(t3, Table([[col] for col in vals_e], names=header))
# Iterate for expchar 'D'
for s in formats.values():
t4 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'exponent_style': 'D'})
assert_table_equal(t4, Table([[col] for col in vals_d], names=header))
# Iterate for regular converter (strtod)
for s in formats.values():
t5 = ascii.read(StringIO(s.join(header)+'\n'+s.join(fields)), guess=guess,
fast_reader={'parallel': parallel, 'use_fast_converter': False})
read_values = [col[0] for col in t5.itercols()]
if os.name == 'nt':
# Apparently C strtod() on (some?) MSVC recognises 'd' exponents!
assert read_values == vals_v or read_values == vals_e
else:
assert read_values == vals_e
def test_fortran_reader_notbasic():
"""
Check if readers without a fast option raise a value error when a
fast_reader is asked for (implies the default 'guess=True').
"""
tabstr = dedent("""
a b
1 1.23D4
2 5.67D-8
""")[1:-1]
t1 = ascii.read(tabstr.split('\n'), fast_reader=dict(exponent_style='D'))
assert t1['b'].dtype.kind == 'f'
tabrdb = dedent("""
a\tb
# A simple RDB table
N\tN
1\t 1.23D4
2\t 5.67-008
""")[1:-1]
t2 = ascii.read(tabrdb.split('\n'), format='rdb',
fast_reader=dict(exponent_style='fortran'))
assert t2['b'].dtype.kind == 'f'
tabrst = dedent("""
= =======
a b
= =======
1 1.23E4
2 5.67E-8
= =======
""")[1:-1]
t3 = ascii.read(tabrst.split('\n'), format='rst')
assert t3['b'].dtype.kind == 'f'
t4 = ascii.read(tabrst.split('\n'), guess=True)
assert t4['b'].dtype.kind == 'f'
# In the special case of fast_converter=True (the default),
# incompatibility is ignored
t5 = ascii.read(tabrst.split('\n'), format='rst', fast_reader=True)
assert t5['b'].dtype.kind == 'f'
with pytest.raises(ParameterError):
t6 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader='force')
with pytest.raises(ParameterError):
t7 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(use_fast_converter=False))
tabrst = tabrst.replace('E', 'D')
with pytest.raises(ParameterError):
t8 = ascii.read(tabrst.split('\n'), format='rst', guess=False,
fast_reader=dict(exponent_style='D'))
@pytest.mark.parametrize("guess", [True, False])
@pytest.mark.parametrize('fast_reader', [dict(exponent_style='D'),
dict(exponent_style='A')])
def test_dict_kwarg_integrity(fast_reader, guess):
"""
Check if dictionaries passed as kwargs (fast_reader in this test) are
left intact by ascii.read()
"""
expstyle = fast_reader.get('exponent_style', 'E')
fields = ['10.1D+199', '3.14d+313', '2048d+306', '0.6D-325', '-2.d345']
t = ascii.read(StringIO(' '.join(fields)), guess=guess,
fast_reader=fast_reader)
assert fast_reader.get('exponent_style', None) == expstyle
|
23067c5bca277cc0a2a9527a512f5b4148bf06c410b629d9b158041c7e51cf3e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``ECSV``
reader/writer.
Requires `pyyaml <http://pyyaml.org/>`_ to be installed.
"""
import os
import copy
import sys
from io import StringIO
import pytest
import numpy as np
from ....table import Table, Column, QTable, NdarrayMixin
from ....table.table_helpers import simple_table
from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation
from ....time import Time, TimeDelta
from ....units import allclose as quantity_allclose
from ....units import QuantityInfo
from ....tests.helper import catch_warnings
from ..ecsv import DELIMITERS
from ... import ascii
from .... import units as u
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
DTYPES = ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
'uint64', 'float16', 'float32', 'float64', 'float128',
'str']
if os.name == 'nt' or sys.maxsize <= 2**32:
DTYPES.remove('float128')
T_DTYPES = Table()
for dtype in DTYPES:
if dtype == 'bool':
data = np.array([False, True, False])
elif dtype == 'str':
data = np.array(['ab 0', 'ab, 1', 'ab2'])
else:
data = np.arange(3, dtype=dtype)
c = Column(data, unit='m / s', description='descr_' + dtype,
meta={'meta ' + dtype: 1})
T_DTYPES[dtype] = c
T_DTYPES.meta['comments'] = ['comment1', 'comment2']
# Corresponds to simple_table()
SIMPLE_LINES = ['# %ECSV 0.9',
'# ---',
'# datatype:',
'# - {name: a, datatype: int64}',
'# - {name: b, datatype: float64}',
'# - {name: c, datatype: string}',
'# schema: astropy-2.0',
'a b c',
'1 1.0 c',
'2 2.0 d',
'3 3.0 e']
@pytest.mark.skipif('not HAS_YAML')
def test_write_simple():
"""
Write a simple table with common types. This shows the compact version
of serialization with one line per column.
"""
t = simple_table()
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == SIMPLE_LINES
@pytest.mark.skipif('not HAS_YAML')
def test_write_full():
"""
Write a full-featured table with common types and explicitly checkout output
"""
t = T_DTYPES['bool', 'int64', 'float64', 'str']
lines = ['# %ECSV 0.9',
'# ---',
'# datatype:',
'# - name: bool',
'# unit: m / s',
'# datatype: bool',
'# description: descr_bool',
'# meta: {meta bool: 1}',
'# - name: int64',
'# unit: m / s',
'# datatype: int64',
'# description: descr_int64',
'# meta: {meta int64: 1}',
'# - name: float64',
'# unit: m / s',
'# datatype: float64',
'# description: descr_float64',
'# meta: {meta float64: 1}',
'# - name: str',
'# unit: m / s',
'# datatype: string',
'# description: descr_str',
'# meta: {meta str: 1}',
'# meta: !!omap',
'# - comments: [comment1, comment2]',
'# schema: astropy-2.0',
'bool int64 float64 str',
'False 0 0.0 "ab 0"',
'True 1 1.0 "ab, 1"',
'False 2 2.0 ab2']
out = StringIO()
t.write(out, format='ascii.ecsv')
assert out.getvalue().splitlines() == lines
@pytest.mark.skipif('not HAS_YAML')
def test_write_read_roundtrip():
"""
Write a full-featured table with all types and see that it round-trips on
readback. Use both space and comma delimiters.
"""
t = T_DTYPES
for delimiter in DELIMITERS:
out = StringIO()
t.write(out, format='ascii.ecsv', delimiter=delimiter)
t2s = [Table.read(out.getvalue(), format='ascii.ecsv'),
Table.read(out.getvalue(), format='ascii'),
ascii.read(out.getvalue()),
ascii.read(out.getvalue(), format='ecsv', guess=False),
ascii.read(out.getvalue(), format='ecsv')]
for t2 in t2s:
assert t.meta == t2.meta
for name in t.colnames:
assert t[name].attrs_equal(t2[name])
assert np.all(t[name] == t2[name])
@pytest.mark.skipif('not HAS_YAML')
def test_bad_delimiter():
"""
Passing a delimiter other than space or comma gives an exception
"""
out = StringIO()
with pytest.raises(ValueError) as err:
T_DTYPES.write(out, format='ascii.ecsv', delimiter='|')
assert 'only space and comma are allowed' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_bad_header_start():
"""
Bad header without initial # %ECSV x.x
"""
lines = copy.copy(SIMPLE_LINES)
lines[0] = '# %ECV 0.9'
with pytest.raises(ascii.InconsistentTableError):
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
@pytest.mark.skipif('not HAS_YAML')
def test_bad_delimiter_input():
"""
Illegal delimiter in input
"""
lines = copy.copy(SIMPLE_LINES)
lines.insert(2, '# delimiter: |')
with pytest.raises(ValueError) as err:
Table.read('\n'.join(lines), format='ascii.ecsv', guess=False)
assert 'only space and comma are allowed' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_multidim_input():
"""
Multi-dimensional column in input
"""
t = Table([np.arange(4).reshape(2, 2)], names=['a'])
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='ascii.ecsv')
assert 'ECSV format does not support multidimensional column' in str(err.value)
@pytest.mark.skipif('not HAS_YAML')
def test_round_trip_empty_table():
"""Test fix in #5010 for issue #5009 (ECSV fails for empty type with bool type)"""
t = Table(dtype=[bool, 'i', 'f'], names=['a', 'b', 'c'])
out = StringIO()
t.write(out, format='ascii.ecsv')
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.dtype == t2.dtype
assert len(t2) == 0
@pytest.mark.skipif('not HAS_YAML')
def test_csv_ecsv_colnames_mismatch():
"""
Test that mismatch in column names from normal CSV header vs.
ECSV YAML header raises the expected exception.
"""
lines = copy.copy(SIMPLE_LINES)
header_index = lines.index('a b c')
lines[header_index] = 'a b d'
with pytest.raises(ValueError) as err:
ascii.read(lines, format='ecsv')
assert "column names from ECSV header ['a', 'b', 'c']" in str(err)
@pytest.mark.skipif('not HAS_YAML')
def test_regression_5604():
"""
See https://github.com/astropy/astropy/issues/5604 for more.
"""
t = Table()
t.meta = {"foo": 5*u.km, "foo2": u.s}
t["bar"] = [7]*u.km
out = StringIO()
t.write(out, format="ascii.ecsv")
assert '!astropy.units.Unit' in out.getvalue()
assert '!astropy.units.Quantity' in out.getvalue()
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
if isinstance(a1, np.ndarray) and a1.dtype.kind == 'f':
assert quantity_allclose(a1, a2, rtol=1e-10)
else:
assert np.all(a1 == a2)
el = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scc = sc.copy()
scc.representation = 'cartesian'
tm = Time([51000.5, 51001.5], format='mjd', scale='tai', precision=5, location=el[0])
tm2 = Time(tm, format='iso')
tm3 = Time(tm, location=el)
tm3.info.serialize_method['ecsv'] = 'jd1_jd2'
mixin_cols = {
'tm': tm,
'tm2': tm2,
'tm3': tm3,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scc': scc,
'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5'] * 2),
'q': [1, 2] * u.m,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180.*u.deg),
'ang': Angle([1, 2] * u.deg),
'el': el,
# 'nd': NdarrayMixin(el) # not supported yet
}
time_attrs = ['value', 'shape', 'format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'tm2': time_attrs,
'tm3': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation', 'frame.name'],
'scc': ['x', 'y', 'z', 'representation', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'],
'q': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
}
@pytest.mark.skipif('not HAS_YAML')
def test_ecsv_mixins_ascii_read_class():
"""Ensure that ascii.read(ecsv_file) returns the correct class
(QTable if any Quantity subclasses, Table otherwise).
"""
# Make a table with every mixin type except Quantities
t = QTable({name: col for name, col in mixin_cols.items()
if not isinstance(col.info, QuantityInfo)})
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is Table
# Add a single quantity column
t['lon'] = mixin_cols['lon']
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = ascii.read(out.getvalue(), format='ecsv')
assert type(t2) is QTable
@pytest.mark.skipif('not HAS_YAML')
def test_ecsv_mixins_qtable_to_table():
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = Table.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.allclose(col.value, col2, rtol=1e-10)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_as_one(table_cls):
"""Test write/read all cols at once and validate intermediate column names"""
names = sorted(mixin_cols)
serialized_names = ['ang',
'dt',
'el.x', 'el.y', 'el.z',
'lat',
'lon',
'q',
'sc.ra', 'sc.dec',
'scc.x', 'scc.y', 'scc.z',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime',
'tm', # serialize_method is formatted_value
'tm2', # serialize_method is formatted_value
'tm3.jd1', 'tm3.jd2', # serialize is jd1_jd2
'tm3.location.x', 'tm3.location.y', 'tm3.location.z']
t = table_cls([mixin_cols[name] for name in names], names=names)
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
# Read as a ascii.basic table (skip all the ECSV junk)
t3 = table_cls.read(out.getvalue(), format='ascii.basic')
assert t3.colnames == serialized_names
@pytest.mark.skipif('not HAS_YAML')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_ecsv_mixins_per_column(table_cls, name_col):
"""Test write/read one col at a time and do detailed validation"""
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'description'
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = table_cls.read(out.getvalue(), format='ascii.ecsv')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.skipif('HAS_YAML')
def test_ecsv_but_no_yaml_warning():
"""
Test that trying to read an ECSV without PyYAML installed when guessing
emits a warning, but reading with guess=False gives an exception.
"""
with catch_warnings() as w:
ascii.read(SIMPLE_LINES)
assert len(w) == 1
assert "file looks like ECSV format but PyYAML is not installed" in str(w[0].message)
with pytest.raises(ascii.InconsistentTableError) as exc:
ascii.read(SIMPLE_LINES, format='ecsv')
assert "PyYAML package is required" in str(exc)
@pytest.mark.skipif('not HAS_YAML')
def test_round_trip_masked_table_default(tmpdir):
"""Test (mostly) round-trip of MaskedColumn through ECSV using default serialization
that uses an empty string "" to mark NULL values. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t.write(filename)
t2 = Table.read(filename)
assert t2.masked is True
assert t2.colnames == t.colnames
for name in t2.colnames:
# From formal perspective the round-trip columns are the "same"
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# But peeking under the mask shows that the underlying data are changed
# because by default ECSV uses "" to represent masked elements.
t[name].mask = False
t2[name].mask = False
assert not np.all(t2[name] == t[name]) # Expected diff
@pytest.mark.skipif('not HAS_YAML')
def test_round_trip_masked_table_serialize_mask(tmpdir):
"""Same as prev but set the serialize_method to 'data_mask' so mask is written out"""
filename = str(tmpdir.join('test.ecsv'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'][0] = '' # This would come back as masked for default "" NULL marker
t.write(filename, serialize_method='data_mask')
t2 = Table.read(filename)
assert t2.masked is True
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
|
cdad2bd64be02bbb07721e64a9b3f85e0fc29017c36405c7f07a86152f52df2b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from io import BytesIO, open
from collections import OrderedDict
import locale
import platform
from io import StringIO
import pathlib
import pytest
import numpy as np
from ... import ascii
from ....table import Table
from .... import table
from ....units import Unit
from ....table.table_helpers import simple_table
from .common import (raises, assert_equal, assert_almost_equal,
assert_true)
from .. import core
from ..ui import _probably_html, get_read_trace, cparser
# setup/teardown function to have the tests run in the correct directory
from .common import setup_function, teardown_function
try:
import bz2 # pylint: disable=W0611
except ImportError:
HAS_BZ2 = False
else:
HAS_BZ2 = True
asciiIO = lambda x: BytesIO(x.encode('ascii'))
@pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False},
{'use_fast_converter': True}, 'force'])
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
"""
expected_kind = 'U'
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind == expected_kind
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))
assert len(dat) == 2
assert dat.colnames == ['a', 'b']
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c d', 'e f'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(['1,2', '3,4'], format='basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# For good measure check the same in the unified I/O interface
dat = Table.read(['1,2', '3,4'], format='ascii.basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_delimiter_arg():
"""
When the delimiter is explicitly given then do not try others in guessing.
"""
fields = ['10.1E+19', '3.14', '2048', '-23']
values = [1.01e20, 3.14, 2048, -23]
# Default guess should recognise CSV with optional spaces
t0 = ascii.read(asciiIO(', '.join(fields)), guess=True)
for n, v in zip(t0.colnames, values):
assert t0[n][0] == v
# Forcing space as delimiter produces type str columns ('10.1E+19,')
t1 = ascii.read(asciiIO(', '.join(fields)), guess=True, delimiter=' ')
for n, v in zip(t1.colnames[:-1], fields[:-1]):
assert t1[n][0] == v+','
def test_reading_mixed_delimiter_tabs_spaces():
# Regression test for https://github.com/astropy/astropy/issues/6770
dat = ascii.read('1 2\t3\n1 2\t3', format='no_header', names=list('abc'))
assert len(dat) == 2
Table.read(['1 2\t3', '1 2\t3'], format='ascii.no_header',
names=['a', 'b', 'c'])
assert len(dat) == 2
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
# CParser only uses columns in `names` and thus reports mismach in num_col
with pytest.raises(ascii.InconsistentTableError):
ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
print('\n\n******** READING {}'.format(testfile['name']))
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if ('Reader' in test_opts and 'fast_{0}'.format(test_opts['Reader']._format_name)
in core.FAST_CLASSES): # has fast version
if 'Inputter' not in test_opts: # fast reader doesn't allow this
test_opts['fast_reader'] = fast_reader
table = ascii.read(testfile['name'], **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files_via_table(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
print('\n\n******** READING {}'.format(testfile['name']))
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts:
format = 'ascii.{0}'.format(test_opts['Reader']._format_name)
del test_opts['Reader']
else:
format = 'ascii'
if 'fast_{0}'.format(format) in core.FAST_CLASSES:
test_opts['fast_reader'] = fast_reader
table = Table.read(testfile['name'], format=format, **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
if not testfile['opts'].get('guess', True):
continue
print('\n\n******** READING {}'.format(testfile['name']))
for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):
# Copy read options except for those in filter_read_opts
guess_opts = dict((k, v) for k, v in testfile['opts'].items()
if k not in filter_read_opts)
table = ascii.read(testfile['name'], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot)
for colname in table.colnames:
# Three columns have all INDEF values and are masked
mask_value = colname in ('OTIME', 'MAG', 'MERR', 'XAIRMASS')
assert np.all(table[colname].mask == mask_value)
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot)
assert table['LID'].dtype.char in 'fd' # float or double
assert table['MAG'].dtype.char in 'fd' # even without any data values
assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)
assert table['ID'].dtype.char in 'il' # int or long
def test_daophot_header_keywords():
table = ascii.read('t/daophot.dat', Reader=ascii.Daophot)
expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),
('REJFILE', '"hello world"', 'filename', '%-23s'),
('SCALE', '1.', 'units/pix', '%-23.7g'),)
keywords = table.meta['keywords'] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword['value'], value)
assert_equal(keyword['units'], units)
assert_equal(keyword['format'], format_)
def test_daophot_multiple_aperture():
table = ascii.read('t/daophot3.dat', Reader=ascii.Daophot)
assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names
assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file
assert table['MERR2'][0] == 1.171
assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read('t/daophot4.dat', Reader=ascii.Daophot)
assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name
assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file
assert table['MERR2'][0] == 0.049
assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('t/no_data_without_header.dat', Reader=ascii.NoHeader,
guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('t/simple.txt', guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('t/bad.txt', fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
ascii.read('t/simple5.txt', delimiter='|', fast_reader=fast_reader)
@raises(OSError)
def test_missing_file():
ascii.read('does_not_exist')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
data = ascii.read('t/simple3.txt', names=names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_include_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
include_names = ('c1', 'c3')
data = ascii.read('t/simple3.txt', names=names, include_names=include_names,
delimiter='|', fast_reader=fast_reader)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_exclude_names(fast_reader):
exclude_names = ('Y', 'object')
data = ascii.read('t/simple3.txt', exclude_names=exclude_names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))
def test_include_names_daophot():
include_names = ('ID', 'MAG', 'PIER')
data = ascii.read('t/daophot.dat', include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')
data = ascii.read('t/daophot.dat', exclude_names=exclude_names)
assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter='|')
reader.inputter.process_lines = process_lines
data = reader.read('t/bars_at_ends.txt')
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r'^\|\s*', '', line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter='|')
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read('t/nls1_stackinfo.dbout')
cols = get_testfiles('t/nls1_stackinfo.dbout')['cols']
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = 't/test4.dat'
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)
assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)
assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)
assert_equal(data.field('statname')[2], 'chi2modvar')
assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_start_end():
data = ascii.read('t/test5.dat', header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field('statname')[0], 'chi2xspecvar')
assert_equal(data.field('statname')[-1], 'chi2gehrels')
def test_set_converters():
converters = {'zabs1.nh': [ascii.convert_numpy('int32'),
ascii.convert_numpy('float32')],
'p1.gamma': [ascii.convert_numpy('str')]
}
data = ascii.read('t/test4.dat', converters=converters)
assert_equal(str(data['zabs1.nh'].dtype), 'float32')
assert_equal(data['p1.gamma'][0], '1.26764500000')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_string(fast_reader):
f = 't/simple.txt'
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_filelike(fast_reader):
f = 't/simple.txt'
testfile = get_testfiles(f)
with open(f, 'rb') as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_lines(fast_reader):
f = 't/simple.txt'
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read('t/apostrophe.rdb')
assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
assert_equal(data.meta['comments'], ['first comment', 'second comment'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
**testfile['opts'])
assert_true((data['a'].mask == [False, True]).all())
assert_true((data['a'] == [1, 1]).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_col(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,
**testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_include_names(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_include_names=['b'], **testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_exclude_names(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_exclude_names=['a'], **testfile['opts'])
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation """
assert_true((data['a'].mask == [False, False]).all())
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data['b'] == [2, -999]).all())
data['b'].mask = False # explicitly unmask for comparison
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_list(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],
fast_reader=fast_reader, **testfile['opts'])
data['a'].mask = False # explicitly unmask for comparison
assert_true((data['a'] == [42, 42]).all())
def test_masking_Cds():
f = 't/cds.dat'
testfile = get_testfiles(f)
data = ascii.read(f,
**testfile['opts'])
assert_true(data['AK'].mask[0])
assert_true(not data['Fit'].mask[0])
def test_null_Ipac():
f = 't/ipac.dat'
testfile = get_testfiles(f)
data = ascii.read(f, **testfile['opts'])
mask = np.array([(True, False, True, False, True),
(False, False, False, False, False)],
dtype=[(str('ra'), '|b1'),
(str('dec'), '|b1'),
(str('sai'), '|b1'),
(str('v2'), '|b1'),
(str('sptype'), '|b1')])
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict((('intval', 1),
('floatval', 2.3e3),
('date', "Wed Sp 20 09:48:36 1995"),
('key_continue', 'IPAC keywords can continue across lines')))
comments = ['This is an example of a valid comment']
f = 't/ipac.dat'
testfile = get_testfiles(f)
data = ascii.read(f, **testfile['opts'])
assert data.meta['keywords'].keys() == keywords.keys()
for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):
assert data_kv['value'] == kv
assert data.meta['comments'] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read('t/space_delim_no_header.dat',
delimiter=',', guess=True)
assert(data.dtype.names == ('1 3.4 hello',))
assert(len(data) == 1)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconstent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_default_missing(fast_reader):
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,,',
'2, , 4.0 , ss '])
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is True
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [' a ',
'---',
' --']
assert dat['a'].dtype.kind == 'i'
# Same test with a fixed width reader
table = '\n'.join([' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss'])
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is True
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/apostrophe.rdb',
'nrows': 2,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/apostrophe.tab',
'nrows': 2,
'opts': {'Reader': ascii.Tab}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds}},
# Test malformed CDS file (issues #2241 #467)
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/cds_malformed.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},
{'cols': ('a', 'b', 'c'),
'name': 't/commented_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader}},
{'cols': ('a', 'b', 'c'),
'name': 't/commented_header2.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),
'name': 't/continuation.dat',
'nrows': 2,
'opts': {'Inputter': ascii.ContinuationLinesInputter,
'Reader': ascii.NoHeader}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 't/daophot.dat',
'nrows': 2,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALU-ES',
'VALU-ES_1',
'FLAG'),
'name': 't/sextractor.dat',
'nrows': 3,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 't/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('col0',
'objID',
'osrcid',
'xsrcid',
'SpecObjID',
'ra',
'dec',
'obsid',
'ccdid',
'z',
'modelMag_i',
'modelMagErr_i',
'modelMag_r',
'modelMagErr_r',
'expo',
'theta',
'rad_ecf_39',
'detlim90',
'fBlim90'),
'name': 't/nls1_stackinfo.dbout',
'nrows': 58,
'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Cds}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 't/no_data_daophot.dat',
'nrows': 0,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALUES',
'VALUES_1',
'FLAG'),
'name': 't/no_data_sextractor.dat',
'nrows': 0,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 't/no_data_ipac.dat',
'nrows': 0,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('ra', 'v2'),
'name': 't/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},
{'cols': ('a', 'b', 'c'),
'name': 't/no_data_with_header.dat',
'nrows': 0,
'opts': {}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/short.rdb',
'nrows': 7,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/short.tab',
'nrows': 7,
'opts': {'Reader': ascii.Tab}},
{'cols': ('test 1a', 'test2', 'test3', 'test4'),
'name': 't/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'"}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 't/simple2.txt',
'nrows': 3,
'opts': {'delimiter': '|'}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 't/simple3.txt',
'nrows': 2,
'opts': {'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),
'name': 't/simple4.txt',
'nrows': 3,
'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3'),
'name': 't/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader}},
{'cols': ('col1', 'col2', 'col3'),
'name': 't/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader, 'header_start': None}},
{'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),
'name': 't/space_delim_blank_lines.txt',
'nrows': 3,
'opts': {}},
{'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),
'name': 't/test4.dat',
'nrows': 9,
'opts': {}},
{'cols': ('a', 'b', 'c'),
'name': 't/fill_values.txt',
'nrows': 2,
'opts': {'delimiter': ','}},
{'name': 't/whitespace.dat',
'cols': ('quoted colname with tab\tinside', 'col2', 'col3'),
'nrows': 2,
'opts': {'delimiter': r'\s'}},
{'name': 't/simple_csv.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'opts': {'Reader': ascii.Csv}},
{'name': 't/simple_csv_missing.csv',
'cols': ('a', 'b', 'c'),
'nrows': 2,
'skip': True,
'opts': {'Reader': ascii.Csv}},
{'cols': ('cola', 'colb', 'colc'),
'name': 't/latex1.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Facility', 'Id', 'exposure', 'date'),
'name': 't/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 't/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 't/fixed_width_2_line.txt',
'nrows': 2,
'opts': {'Reader': ascii.FixedWidthTwoLine}},
]
try:
import bs4 # pylint: disable=W0611
testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),
'name': 't/html.html',
'nrows': 3,
'opts': {'Reader': ascii.HTML}})
except ImportError:
pass
if name is not None:
return [x for x in testfiles if x['name'] == name][0]
else:
return testfiles
def test_header_start_exception():
'''Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
'''
for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,
ascii.BaseReader, ascii.FixedWidthNoHeader,
ascii.Cds, ascii.Daophot]:
with pytest.raises(ValueError):
reader = ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ['# a, b',
'1, 2',
'3, 4']
t = ascii.read(lines)
assert t.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)
assert t.colnames == ['b', 'a']
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read('t/sextractor2.dat', Reader=ascii.SExtractor, guess=False)
expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),
Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),
Unit('mag * arcsec**(-2)')]
expected_descrs = ['Running object number',
'Windowed position estimate along x',
'Windowed position estimate along y',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',
'Extraction flags',
None,
'Barycenter position along MAMA x axis',
'Peak surface brightness above background']
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read('t/sextractor3.dat', Reader=ascii.SExtractor, guess=False)
expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',
'MAG_AUTO', 'MAGERR_AUTO',
'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',
'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']
expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),
Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')]
expected_descrs = ['Object position along x', None,
'Right ascension of barycenter (J2000)',
'Declination of barycenter (J2000)',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude', ] + [
'Fixed aperture magnitude vector'] * 7 + [
'RMS error vector for fixed aperture mag.'] * 7
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ['abc']
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')
assert t.colnames == ['#a', 'b']
assert len(t) == 2
assert t['#a'][1] == '#3'
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3'])
assert t.colnames == ['a', 'b', 'c']
assert t.meta['comments'] == ['comment1', 'comment2']
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert "** To figure out why the table did not read, use guess=False and" in str(err.value)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert 'At least one header line beginning and ending with delimiter required' in str(err.value)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False)
assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)
@pytest.mark.xfail('not HAS_BZ2')
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
t = ascii.read(open('t/ipac.dat.bz2', 'rb'))
assert t.colnames == ['ra', 'dec', 'sai', 'v2', 'sptype']
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,1.11,1',
'2, 2, 4.0 , ss '])
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',
'| r | rE | rea | real | D | do | dou | f | i | l | da| c |',
' 1 2 3 4 5 6 7 8 9 10 11 12 ']
dat = ascii.read(lines, format='ipac')
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == 'f'
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == 'i'
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ('U', 'S')
def test_almost_but_not_quite_daophot():
'''Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
'''
lines = ["# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9"]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize('fast', [False, 'force'])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected with header_start
at different positions, and that the table round-trips.
"""
comments = ['comment 1', 'comment 2', 'comment 3']
lines = ['# a b',
'# comment 1',
'# comment 2',
'# comment 3',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
out = StringIO()
ascii.write(dat, out, format='commented_header', fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines.insert(1, lines.pop(0))
dat = ascii.read(lines, format='commented_header', header_start=1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(2, lines.pop(1))
dat = ascii.read(lines, format='commented_header', header_start=2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
dat = ascii.read(lines, format='commented_header', header_start=-2, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines.insert(3, lines.pop(2))
dat = ascii.read(lines, format='commented_header', header_start=-1, fast_reader=fast)
assert dat.meta['comments'] == comments
assert dat.colnames == ['a', 'b']
lines = ['# a b',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert 'comments' not in dat.meta
assert dat.colnames == ['a', 'b']
def test_probably_html():
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for table in ('t/html.html',
'http://blah.com/table.html',
'https://blah.com/table.html',
'file://blah/table.htm',
'ftp://blah.com/table.html',
'file://blah.com/table.htm',
' <! doctype html > hello world',
'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype html > ', ' hello world'),
):
assert _probably_html(table) is True
for table in ('t/html.htms',
'Xhttp://blah.com/table.html',
' https://blah.com/table.htm',
'fole://blah/table.htm',
' < doctype html > hello world',
'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype htm > ', ' hello world'),
[[1, 2, 3]],
):
assert _probably_html(table) is False
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_data_header_start(fast_reader):
tests = [(['# comment',
'',
' ',
'skip this line', # line 0
'a b', # line 1
'1 2'], # line 2
[{'header_start': 1},
{'header_start': 1, 'data_start': 2}
]
),
(['# comment',
'',
' \t',
'skip this line', # line 0
'a b', # line 1
'',
' \t',
'skip this line', # line 2
'1 2'], # line 3
[{'header_start': 1, 'data_start': 3}]),
(['# comment',
'',
' ',
'a b', # line 0
'',
' ',
'skip this line', # line 1
'1 2'], # line 2
[{'header_start': 0, 'data_start': 2},
{'data_start': 2}])]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(lines, format='basic', fast_reader=fast_reader,
guess=True, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 1
assert np.all(t['a'] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format='basic')
assert 'No header line found' in str(err.value)
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
assert 'Inconsistent data column lengths' in str(err.value)
# Put a single line of column names but with no newline
for kwargs in [dict(),
dict(guess=False, fast_reader=False, format='basic'),
dict(guess=False, fast_reader=True, format='fast_basic')]:
table = BytesIO()
table.write(b'a b')
t = ascii.read(table, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 0
def test_path_object():
fpath = pathlib.Path('t/simple.txt')
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']
assert data['test2'][1] == 'hat2'
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format='ipac')
assert 'Column col0 failed to convert:' in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})
assert 'no converters' in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, str('de_DE'))
else:
locale.setlocale(locale.LC_ALL, str('de_DE.utf8'))
for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}):
t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,
fast_reader=fast_reader)
assert t['a'].dtype.kind == 'f'
except locale.Error as e:
pytest.skip('Locale error: {}'.format(e))
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
'''Test that a char column of a Table is assigned no unit and not
a dimensionless unit.'''
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {'--': '0'}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert reader.read("""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """)['a'][0] is np.ma.masked
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format='latex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format='aastex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
@pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252'])
def test_read_with_encoding(tmpdir, encoding):
data = {
'commented_header': u'# à b è \n 1 2 héllo',
'csv': u'à,b,è\n1,2,héllo'
}
testfile = str(tmpdir.join('test.txt'))
for fmt, content in data.items():
with open(testfile, 'w', encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
for guess in (True, False):
table = ascii.read(testfile, format=fmt, fast_reader=False,
encoding=encoding, guess=guess)
assert table['è'].dtype.kind == 'U'
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
def test_unsupported_read_with_encoding(tmpdir):
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read('t/simple3.txt', guess=False, fast_reader='force',
encoding='latin1', format='fast_csv')
def test_read_chunks_input_types():
"""
Test chunked reading for different input types: file path, file object,
and string input.
"""
fpath = 't/test5.dat'
t1 = ascii.read(fpath, header_start=1, data_start=3, )
for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()):
t_gen = ascii.read(fp, header_start=1, data_start=3,
guess=False, format='fast_basic',
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) == 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
for fp in (fpath, open(fpath, 'r'), open(fpath, 'r').read()):
# Now read the full table in chunks
t3 = ascii.read(fp, header_start=1, data_start=3,
fast_reader={'chunk_size': 300})
assert np.all(t1 == t3)
@pytest.mark.parametrize('masked', [True, False])
def test_read_chunks_formats(masked):
"""
Test different supported formats for chunked reading.
"""
t1 = simple_table(size=102, cols=10, kinds='fS', masked=masked)
for i, name in enumerate(t1.colnames):
t1.rename_column(name, 'col{}'.format(i + 1))
# TO DO commented_header does not currently work due to the special-cased
# implementation of header parsing.
for format in 'tab', 'csv', 'no_header', 'rdb', 'basic':
out = StringIO()
ascii.write(t1, out, format=format)
t_gen = ascii.read(out.getvalue(), format=format,
fast_reader={'chunk_size': 400, 'chunk_generator': True})
ts = list(t_gen)
for t in ts:
for col, col1 in zip(t.columns.values(), t1.columns.values()):
assert col.name == col1.name
assert col.dtype.kind == col1.dtype.kind
assert len(ts) > 4
t2 = table.vstack(ts)
assert np.all(t1 == t2)
# Now read the full table in chunks
t3 = ascii.read(out.getvalue(), format=format, fast_reader={'chunk_size': 400})
assert np.all(t1 == t3)
def test_read_chunks_chunk_size_too_small():
fpath = 't/test5.dat'
with pytest.raises(ValueError) as err:
ascii.read(fpath, header_start=1, data_start=3,
fast_reader={'chunk_size': 10})
assert 'no newline found in chunk (chunk_size too small?)' in str(err)
def test_read_chunks_table_changes():
"""Column changes type or size between chunks. This also tests the case with
no final newline.
"""
col = ['a b c'] + ['1.12334 xyz a'] * 50 + ['abcdefg 555 abc'] * 50
table = '\n'.join(col)
t1 = ascii.read(table, guess=False)
t2 = ascii.read(table, fast_reader={'chunk_size': 100})
# This also confirms that the dtypes are exactly the same, i.e.
# the string itemsizes are the same.
assert np.all(t1 == t2)
def test_read_non_ascii():
"""Test that pure-Python reader is used in case the file contains non-ASCII characters
in it.
"""
table = Table.read(['col1, col2', '\u2119, \u01b4', '1, 2'], format='csv')
assert np.all(table['col1'] == ['\u2119', '1'])
assert np.all(table['col2'] == ['\u01b4', '2'])
@pytest.mark.parametrize('enable', [True, False, 'force'])
def test_kwargs_dict_guess(enable):
"""Test that fast_reader dictionary is preserved through guessing sequence.
"""
# Fails for enable=(True, 'force') - #5578
ascii.read('a\tb\n 1\t2\n3\t 4.0', fast_reader=dict(enable=enable))
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Tab if (enable is False) else ascii.FastTab)
for k in get_read_trace():
if not k.get('status', 'Disabled').startswith('Disabled'):
assert k.get('kwargs').get('fast_reader').get('enable') is enable
|
70f7eaec7174c3f698f5f0e62beefb5925216ee89ecee7cc729f6ca863b7b674 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
The **asdf** subpackage contains code that is used to serialize astropy types
so that they can be represented and stored using the Advanced Scientific Data
Format (ASDF). This subpackage defines classes, referred to as **tags**, that
implement the logic for serialization and deserialization.
ASDF makes use of abstract data type definitons called **schemas**. The tag
classes provided here are specific implementations of particular schemas. Some
of the tags in Astropy (e.g., those related to transforms) implement schemas
that are defined by the ASDF Standard. In other cases, both the tags and
schemas are defined within Astropy (e.g., those related to many of the
coordinate frames).
Astropy currently has no ability to read or write ASDF files itself. In order
to process ASDF files it is necessary to make use of the standalone **asdf**
package. Users should never need to refer to tag implementations directly.
Their presence should be entirely transparent when processing ASDF files.
If both **asdf** and **astropy** are installed, no futher configuration is
required in order to process ASDF files. The **asdf** package has been designed
to automatically detect the presence of the tags defined by **astropy**.
Documentation on the ASDF Standard can be found `here
<https://asdf-standard.readthedocs.io>`__. Documentation on the ASDF Python
module can be found `here <https://asdf.readthedocs.io>`__.
"""
|
052f33541476c5f910087804d230b716b560a0bfd2277bffc682a21450c92cdf | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from ....tests.helper import catch_warnings
from ....table import Table, QTable, NdarrayMixin, Column
from ....table.table_helpers import simple_table
from .... import units as u
from ....coordinates import SkyCoord, Latitude, Longitude, Angle, EarthLocation
from ....time import Time, TimeDelta
from ....units.quantity import QuantityInfo
try:
import h5py
except ImportError:
HAS_H5PY = False
else:
HAS_H5PY = True
try:
import yaml
except ImportError:
HAS_YAML = False
else:
HAS_YAML = True
ALL_DTYPES = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8,
np.int16, np.int32, np.int64, np.float32, np.float64,
np.bool_, '|S3']
def _default_values(dtype):
if dtype == np.bool_:
return [0, 1, 1]
elif dtype == '|S3':
return [b'abc', b'def', b'ghi']
else:
return [1, 2, 3]
@pytest.mark.skipif('not HAS_H5PY')
def test_write_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file)
assert exc.value.args[0] == "table path should be set via the path= argument"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_notable_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(ValueError) as exc:
t1 = Table.read(test_file, path='/', format='hdf5')
assert exc.value.args[0] == 'no table found in HDF5 group /'
@pytest.mark.skipif('not HAS_H5PY')
def test_read_nopath(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file)
assert np.all(t1['a'] == t2['a'])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(ValueError) as exc:
t1.write(test_file, path='test/')
assert exc.value.args[0] == "table path should end with table name, not /"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_invalid_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/')
assert exc.value.args[0] == "Path test/ does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test').create_group('path')
with pytest.raises(OSError) as exc:
Table.read(test_file, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_missing_group_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
with pytest.raises(OSError) as exc:
Table.read(f, path='test/path/table')
assert exc.value.args[0] == "Path test/path/table does not exist"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_simple(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_table(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table', append=True)
assert exc.value.args[0] == "Table the_table already exists"
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_memory(tmpdir):
with h5py.File('test', 'w', driver='core', backing_store=False) as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(output_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(OSError) as exc:
t1.write(test_file, path='the_table')
assert exc.value.args[0].startswith("File exists:")
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table', overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
h5py.File(test_file, 'w').close() # create empty file
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table_1', append=True)
t1.write(test_file, path='the_table_2', append=True)
t2 = Table.read(test_file, path='the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_groups(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
with h5py.File(test_file, 'w') as f:
f.create_group('test_1')
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='test_1/the_table_1', append=True)
t1.write(test_file, path='test_2/the_table_2', append=True)
t2 = Table.read(test_file, path='test_1/the_table_1')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(test_file, path='test_2/the_table_2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_write_existing_append_overwrite(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='table1')
t1.write(test_file, path='table2', append=True)
t1v2 = Table()
t1v2.add_column(Column(name='a', data=[4, 5, 6]))
with pytest.raises(OSError) as exc:
t1v2.write(test_file, path='table1', append=True)
assert exc.value.args[0] == 'Table table1 already exists'
t1v2.write(test_file, path='table1', append=True, overwrite=True)
t2 = Table.read(test_file, path='table1')
assert np.all(t2['a'] == [4, 5, 6])
t3 = Table.read(test_file, path='table2')
assert np.all(t3['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_filobj_group_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(test_file, path='path/to/data/the_table')
import h5py
with h5py.File(test_file, 'r') as input_file:
t2 = Table.read(input_file['path/to'], path='data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_read_wrong_fileobj():
class FakeFile:
def read(self):
pass
f = FakeFile()
with pytest.raises(TypeError) as exc:
t1 = Table.read(f, format='hdf5')
assert exc.value.args[0] == 'h5py can only open regular files'
@pytest.mark.skipif('not HAS_H5PY')
def test_write_fileobj(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_filobj_group(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='path/to/data/the_table')
t2 = Table.read(test_file, path='path/to/data/the_table')
assert np.all(t2['a'] == [1, 2, 3])
@pytest.mark.skipif('not HAS_H5PY')
def test_write_wrong_type():
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
with pytest.raises(TypeError) as exc:
t1.write(1212, path='path/to/data/the_table', format='hdf5')
assert exc.value.args[0] == ('output should be a string '
'or an h5py File or Group object')
@pytest.mark.skipif('not HAS_H5PY')
@pytest.mark.parametrize(('dtype'), ALL_DTYPES)
def test_preserve_single_dtypes(tmpdir, dtype):
test_file = str(tmpdir.join('test.hdf5'))
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name='a', data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
assert np.all(t2['a'] == values)
assert t2['a'].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_all_dtypes(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
@pytest.mark.skipif('not HAS_H5PY')
def test_preserve_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.write(test_file, path='the_table')
t2 = Table.read(test_file, path='the_table')
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key])
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_preserve_serialized(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_preserve_serialized_compatibility_mode(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
with catch_warnings() as w:
t1.write(test_file, path='the_table', serialize_meta=True,
overwrite=True, compatibility_mode=True)
assert str(w[0].message).startswith(
"compatibility mode for writing is deprecated")
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_preserve_serialized_in_complicated_path(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.write(test_file, path='the_table/complicated/path', serialize_meta=True,
overwrite=True)
t2 = Table.read(test_file, path='the_table/complicated/path')
assert t1['a'].format == t2['a'].format
assert t1['a'].unit == t2['a'].unit
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_metadata_very_large(tmpdir):
"""Test that very large datasets work, now!"""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3], unit="s")
t1['a'].meta['a0'] = "A0"
t1['a'].meta['a1'] = {"a1": [0, 1]}
t1['a'].format = '7.3f'
t1['a'].description = 'A column'
t1.meta['b'] = 1
t1.meta['c'] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2 ** 16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2 ** 18)
t1.write(test_file, path='the_table', serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path='the_table')
assert t1['a'].unit == t2['a'].unit
assert t1['a'].format == t2['a'].format
assert t1['a'].description == t2['a'].description
assert t1['a'].meta == t2['a'].meta
assert t1.meta == t2.meta
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_metadata_very_large_fails_compatibility_mode(tmpdir):
"""Test that very large metadata do not work in compatibility mode."""
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1['a'] = Column(data=[1, 2, 3])
t1.meta["meta"] = "0" * (2 ** 16 + 1)
with catch_warnings() as w:
t1.write(test_file, path='the_table', serialize_meta=True,
overwrite=True, compatibility_mode=True)
assert len(w) == 2
# Error message slightly changed in h5py 2.7.1, thus the 2part assert
assert str(w[1].message).startswith(
"Attributes could not be written to the output HDF5 "
"file: Unable to create attribute ")
assert "bject header message is too large" in str(w[1].message)
@pytest.mark.skipif('not HAS_H5PY')
def test_skip_meta(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['a'] = 1
t1.meta['b'] = 'hello'
t1.meta['c'] = 3.14159
t1.meta['d'] = True
t1.meta['e'] = np.array([1, 2, 3])
t1.meta['f'] = str
with catch_warnings() as w:
t1.write(test_file, path='the_table')
assert len(w) == 1
assert str(w[0].message).startswith(
"Attribute `f` of type {0} cannot be written to HDF5 files - skipping".format(type(t1.meta['f'])))
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_fail_meta_serialize(tmpdir):
test_file = str(tmpdir.join('test.hdf5'))
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.meta['f'] = str
with pytest.raises(Exception) as err:
t1.write(test_file, path='the_table', serialize_meta=True)
assert "cannot represent an object: <class 'str'>" in str(err)
@pytest.mark.skipif('not HAS_H5PY')
def test_read_h5py_objects(tmpdir):
# Regression test - ensure that Datasets are recognized automatically
test_file = str(tmpdir.join('test.hdf5'))
import h5py
with h5py.File(test_file, 'w') as output_file:
t1 = Table()
t1.add_column(Column(name='a', data=[1, 2, 3]))
t1.write(output_file, path='the_table')
f = h5py.File(test_file)
t2 = Table.read(f, path='the_table')
assert np.all(t2['a'] == [1, 2, 3])
t3 = Table.read(f['/'], path='the_table')
assert np.all(t3['a'] == [1, 2, 3])
t4 = Table.read(f['the_table'])
assert np.all(t4['a'] == [1, 2, 3])
f.close() # don't raise an error in 'test --open-files'
def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = ['info.name', 'info.format', 'info.unit', 'info.description', 'info.meta']
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split('.'):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == 'info.meta':
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
assert np.all(a1 == a2)
# Testing HDF5 table read/write with mixins. This is mostly
# copied from FITS mixin testing.
el = EarthLocation(x=1 * u.km, y=3 * u.km, z=5 * u.km)
el2 = EarthLocation(x=[1, 2] * u.km, y=[3, 4] * u.km, z=[5, 6] * u.km)
sc = SkyCoord([1, 2], [3, 4], unit='deg,deg', frame='fk4',
obstime='J1990.5')
scc = sc.copy()
scc.representation = 'cartesian'
tm = Time([2450814.5, 2450815.5], format='jd', scale='tai', location=el)
mixin_cols = {
'tm': tm,
'dt': TimeDelta([1, 2] * u.day),
'sc': sc,
'scc': scc,
'scd': SkyCoord([1, 2], [3, 4], [5, 6], unit='deg,deg,m', frame='fk4',
obstime=['J1990.5', 'J1991.5']),
'q': [1, 2] * u.m,
'lat': Latitude([1, 2] * u.deg),
'lon': Longitude([1, 2] * u.deg, wrap_angle=180. * u.deg),
'ang': Angle([1, 2] * u.deg),
'el2': el2,
}
time_attrs = ['value', 'shape', 'format', 'scale', 'location']
compare_attrs = {
'c1': ['data'],
'c2': ['data'],
'tm': time_attrs,
'dt': ['shape', 'value', 'format', 'scale'],
'sc': ['ra', 'dec', 'representation', 'frame.name'],
'scc': ['x', 'y', 'z', 'representation', 'frame.name'],
'scd': ['ra', 'dec', 'distance', 'representation', 'frame.name'],
'q': ['value', 'unit'],
'lon': ['value', 'unit', 'wrap_angle'],
'lat': ['value', 'unit'],
'ang': ['value', 'unit'],
'el2': ['x', 'y', 'z', 'ellipsoid'],
'nd': ['x', 'y', 'z'],
}
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
def test_hdf5_mixins_qtable_to_table(tmpdir):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ['unit']
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class)
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_as_one(table_cls, tmpdir):
"""Test write/read all cols at once and validate intermediate column names"""
filename = str(tmpdir.join('test_simple.hdf5'))
names = sorted(mixin_cols)
serialized_names = ['ang',
'dt.jd1', 'dt.jd2',
'el2.x', 'el2.y', 'el2.z',
'lat',
'lon',
'q',
'sc.ra', 'sc.dec',
'scc.x', 'scc.y', 'scc.z',
'scd.ra', 'scd.dec', 'scd.distance',
'scd.obstime.jd1', 'scd.obstime.jd2',
'tm.jd1', 'tm.jd2',
]
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta['C'] = 'spam'
t.meta['comments'] = ['this', 'is', 'a', 'comment']
t.meta['history'] = ['first', 'second', 'third']
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t2.meta['C'] == 'spam'
assert t2.meta['comments'] == ['this', 'is', 'a', 'comment']
assert t2.meta['history'] == ['first', 'second', 'third']
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, 'r')
assert list(h5['root'].dtype.names) == serialized_names
h5.close()
@pytest.mark.skipif('not HAS_H5PY or not HAS_YAML')
@pytest.mark.parametrize('name_col', list(mixin_cols.items()))
@pytest.mark.parametrize('table_cls', (Table, QTable))
def test_hdf5_mixins_per_column(table_cls, name_col, tmpdir):
"""Test write/read one col at a time and do detailed validation"""
filename = str(tmpdir.join('test_simple.hdf5'))
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=['c1', name, 'c2'])
t[name].info.description = 'my description'
t[name].info.meta = {'list': list(range(50)), 'dict': {'a': 'b' * 200}}
if not t.has_mixin_columns:
pytest.skip('column is not a mixin (e.g. Quantity subclass in Table)')
if isinstance(t[name], NdarrayMixin):
pytest.xfail('NdarrayMixin not supported')
t.write(filename, format="hdf5", path='root', serialize_meta=True)
t2 = table_cls.read(filename, format='hdf5', path='root')
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith('tm'):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray
@pytest.mark.skipif('HAS_YAML or not HAS_H5PY')
def test_warn_for_dropped_info_attributes(tmpdir):
filename = str(tmpdir.join('test.hdf5'))
t = Table([[1, 2]])
t['col0'].info.description = 'hello'
with catch_warnings() as warns:
t.write(filename, path='root', serialize_meta=False)
assert len(warns) == 1
assert str(warns[0].message).startswith(
"table contains column(s) with defined 'unit'")
@pytest.mark.skipif('HAS_YAML or not HAS_H5PY')
def test_error_for_mixins_but_no_yaml(tmpdir):
filename = str(tmpdir.join('test.hdf5'))
t = Table([mixin_cols['sc']])
with pytest.raises(TypeError) as err:
t.write(filename, path='root', serialize_meta=True)
assert "cannot write type SkyCoord column 'col0' to HDF5 without PyYAML" in str(err)
@pytest.mark.skipif('not HAS_YAML or not HAS_H5PY')
def test_round_trip_masked_table_default(tmpdir):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = str(tmpdir.join('test.h5'))
t = simple_table(masked=True) # int, float, and str cols with one masked element
t['c'] = [b'c', b'd', b'e']
t['c'].mask[1] = True
t.write(filename, format='hdf5', path='root', serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is True
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name])
|
1993c93f6b152162c42fcef08b83034cf8b501be6eb5f0a5922a137d4c46ba2e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from numpy.testing import assert_array_equal
from asdf import yamlutil
import astropy.units as u
from astropy import modeling
from .basic import TransformType
from . import _parameter_to_value
__all__ = ['ShiftType', 'ScaleType', 'PolynomialType', 'Linear1DType']
class ShiftType(TransformType):
name = "transform/shift"
version = '1.2.0'
types = ['astropy.modeling.models.Shift']
@classmethod
def from_tree_transform(cls, node, ctx):
offset = node['offset']
if not isinstance(offset, u.Quantity) and not np.isscalar(offset):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Shift transform.")
return modeling.models.Shift(offset)
@classmethod
def to_tree_transform(cls, model, ctx):
offset = model.offset
node = {'offset': _parameter_to_value(offset)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Shift) and
isinstance(b, modeling.models.Shift))
assert_array_equal(a.offset.value, b.offset.value)
class ScaleType(TransformType):
name = "transform/scale"
version = '1.2.0'
types = ['astropy.modeling.models.Scale']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
if not isinstance(factor, u.Quantity) and not np.isscalar(factor):
raise NotImplementedError(
"Asdf currently only supports scalar inputs to Scale transform.")
return modeling.models.Scale(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
node = {'factor': _parameter_to_value(factor)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Scale) and
isinstance(b, modeling.models.Scale))
assert_array_equal(a.factor, b.factor)
class MultiplyType(TransformType):
name = "transform/multiplyscale"
version = '1.0.0'
types = ['astropy.modeling.models.Multiply']
@classmethod
def from_tree_transform(cls, node, ctx):
factor = node['factor']
return modeling.models.Multiply(factor)
@classmethod
def to_tree_transform(cls, model, ctx):
factor = model.factor
node = {'factor': _parameter_to_value(factor)}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Multiply) and
isinstance(b, modeling.models.Multiply))
assert_array_equal(a.factor, b.factor)
class PolynomialType(TransformType):
name = "transform/polynomial"
types = ['astropy.modeling.models.Polynomial1D',
'astropy.modeling.models.Polynomial2D']
@classmethod
def from_tree_transform(cls, node, ctx):
coefficients = np.asarray(node['coefficients'])
n_dim = coefficients.ndim
if n_dim == 1:
model = modeling.models.Polynomial1D(coefficients.size - 1)
model.parameters = coefficients
elif n_dim == 2:
shape = coefficients.shape
degree = shape[0] - 1
if shape[0] != shape[1]:
raise TypeError("Coefficients must be an (n+1, n+1) matrix")
coeffs = {}
for i in range(shape[0]):
for j in range(shape[0]):
if i + j < degree + 1:
name = 'c' + str(i) + '_' +str(j)
coeffs[name] = coefficients[i, j]
model = modeling.models.Polynomial2D(degree, **coeffs)
else:
raise NotImplementedError(
"Asdf currently only supports 1D or 2D polynomial transform.")
return model
@classmethod
def to_tree_transform(cls, model, ctx):
if isinstance(model, modeling.models.Polynomial1D):
coefficients = np.array(model.parameters)
elif isinstance(model, modeling.models.Polynomial2D):
degree = model.degree
coefficients = np.zeros((degree + 1, degree + 1))
for i in range(degree + 1):
for j in range(degree + 1):
if i + j < degree + 1:
name = 'c' + str(i) + '_' + str(j)
coefficients[i, j] = getattr(model, name).value
node = {'coefficients': coefficients}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)) and
isinstance(b, (modeling.models.Polynomial1D, modeling.models.Polynomial2D)))
assert_array_equal(a.parameters, b.parameters)
class Linear1DType(TransformType):
name = "transform/linear1d"
version = '1.0.0'
types = ['astropy.modeling.models.Linear1D']
@classmethod
def from_tree_transform(cls, node, ctx):
slope = node.get('slope', None)
intercept = node.get('intercept', None)
return modeling.models.Linear1D(slope=slope, intercept=intercept)
@classmethod
def to_tree_transform(cls, model, ctx):
node = {
'slope': _parameter_to_value(model.slope),
'intercept': _parameter_to_value(model.intercept),
}
return yamlutil.custom_tree_to_tagged_tree(node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, modeling.models.Linear1D) and
isinstance(b, modeling.models.Linear1D))
assert_array_equal(a.slope, b.slope)
assert_array_equal(a.intercept, b.intercept)
|
cdb36ba7a8820f4d0fc812ec6e31391f506bae115e8208d25141302dbb585266 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from astropy import table
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf.tests import helpers
def test_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 3
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_array_columns(tmpdir):
a = np.array([([[1, 2], [3, 4]], 2.0, 'x'),
([[5, 6], [7, 8]], 5.0, 'y'),
([[9, 10], [11, 12]], 8.2, 'z')],
dtype=[(str('a'), str('<i4'), (2, 2)),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
assert t.columns['a'].shape == (3, 2, 2)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_structured_array_columns(tmpdir):
a = np.array([((1, 'a'), 2.0, 'x'),
((4, 'b'), 5.0, 'y'),
((5, 'c'), 8.2, 'z')],
dtype=[(str('a'), [(str('a0'), str('<i4')),
(str('a1'), str('|S1'))]),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_row_order(tmpdir):
a = np.array([(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')],
dtype=[(str('a'), str('<i4')),
(str('b'), str('<f8')),
(str('c'), str('|S1'))])
t = table.Table(a, copy=False)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 1
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
def test_table_inline(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'))
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(list(ff.blocks.internal_blocks)) == 0
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check,
write_options={'auto_inline': 64})
def test_mismatched_columns():
yaml = """
table: !core/table
columns:
- !core/column
data: !core/ndarray
data: [0, 1, 2]
name: a
- !core/column
data: !core/ndarray
data: [0, 1, 2, 3]
name: b
"""
buff = helpers.yaml_to_asdf(yaml)
with pytest.raises(ValueError):
with asdf.AsdfFile.open(buff) as ff:
pass
def test_masked_table(tmpdir):
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
t = table.Table(rows=data_rows, names=('a', 'b', 'c'),
dtype=('i4', 'f8', 'S1'), masked=True)
t.columns['a'].description = 'RA'
t.columns['a'].unit = 'degree'
t.columns['a'].meta = {'foo': 'bar'}
t.columns['a'].mask = [True, False, True]
t.columns['c'].description = 'Some description of some sort'
def check(ff):
assert len(ff.blocks) == 4
helpers.assert_roundtrip_tree({'table': t}, tmpdir, asdf_check_func=check)
|
26a2ef4602cb5792b3e1c5e59b9f6f459348c53387bdde2e7cefd4bbbede69b3 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from astropy.io import fits
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf.tests import helpers
def test_complex_structure(tmpdir):
with fits.open(os.path.join(
os.path.dirname(__file__), 'data', 'complex.fits'), memmap=False) as hdulist:
tree = {
'fits': hdulist
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_fits_table(tmpdir):
a = np.array(
[(0, 1), (2, 3)],
dtype=[(str('A'), int), (str('B'), int)])
h = fits.HDUList()
h.append(fits.BinTableHDU.from_columns(a))
tree = {'fits': h}
def check_yaml(content):
assert b'!core/table' in content
helpers.assert_roundtrip_tree(tree, tmpdir, raw_yaml_check_func=check_yaml)
|
e3e7e8295f907fd19b7e7ba5c61e072e0ff85660c942b48c5bd86e9837b38b37 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
import astropy.units as u
from astropy.modeling import models as astmodels
test_models = [
astmodels.Identity(2), astmodels.Polynomial1D(2, c0=1, c1=2, c2=3),
astmodels.Polynomial2D(1, c0_0=1, c0_1=2, c1_0=3), astmodels.Shift(2.),
astmodels.Scale(3.4), astmodels.RotateNative2Celestial(5.63, -72.5, 180),
astmodels.Multiply(3), astmodels.Multiply(10*u.m),
astmodels.RotateCelestial2Native(5.63, -72.5, 180),
astmodels.EulerAngleRotation(23, 14, 2.3, axes_order='xzx'),
astmodels.Mapping((0, 1), n_inputs=3),
astmodels.Shift(2.*u.deg),
astmodels.Scale(3.4*u.deg),
astmodels.RotateNative2Celestial(5.63*u.deg, -72.5*u.deg, 180*u.deg),
astmodels.RotateCelestial2Native(5.63*u.deg, -72.5*u.deg, 180*u.deg),
]
def test_transforms_compound(tmpdir):
tree = {
'compound':
astmodels.Shift(1) & astmodels.Shift(2) |
astmodels.Sky2Pix_TAN() |
astmodels.Rotation2D() |
astmodels.AffineTransformation2D([[2, 0], [0, 2]], [42, 32]) +
astmodels.Rotation2D(32)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_inverse_transforms(tmpdir):
rotation = astmodels.Rotation2D(32)
rotation.inverse = astmodels.Rotation2D(45)
real_rotation = astmodels.Rotation2D(32)
tree = {
'rotation': rotation,
'real_rotation': real_rotation
}
def check(ff):
assert ff.tree['rotation'].inverse.angle == 45
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
@pytest.mark.parametrize(('model'), test_models)
def test_single_model(tmpdir, model):
tree = {'single_model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_name(tmpdir):
def check(ff):
assert ff.tree['rot'].name == 'foo'
tree = {'rot': astmodels.Rotation2D(23, name='foo')}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=check)
def test_zenithal_with_arguments(tmpdir):
tree = {
'azp': astmodels.Sky2Pix_AZP(0.5, 0.3)
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_naming_of_compound_model(tmpdir):
"""Issue #87"""
def asdf_check(ff):
assert ff.tree['model'].name == 'compound_model'
offx = astmodels.Shift(1)
scl = astmodels.Scale(2)
model = (offx | scl).rename('compound_model')
tree = {
'model': model
}
helpers.assert_roundtrip_tree(tree, tmpdir, asdf_check_func=asdf_check)
def test_generic_projections(tmpdir):
from astropy.io.misc.asdf.tags.transform import projections
for tag_name, (name, params, version) in projections._generic_projections.items():
tree = {
'forward': util.resolve_name(
'astropy.modeling.projections.Sky2Pix_{0}'.format(name))(),
'backward': util.resolve_name(
'astropy.modeling.projections.Pix2Sky_{0}'.format(name))()
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_tabular_model(tmpdir):
points = np.arange(0, 5)
values = [1., 10, 2, 45, -3]
model = astmodels.Tabular1D(points=points, lookup_table=values)
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
table = np.array([[ 3., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 0.]])
points = ([1, 2, 3], [1, 2, 3])
model2 = astmodels.Tabular2D(points, lookup_table=table, bounds_error=False,
fill_value=None, method='nearest')
tree = {'model': model2}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_bounding_box(tmpdir):
model = astmodels.Shift(1) & astmodels.Shift(2)
model.bounding_box = ((1, 3), (2, 4))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d(tmpdir):
model = astmodels.Linear1D()
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_linear1d_quantity(tmpdir):
model = astmodels.Linear1D(1*u.nm, 1*(u.nm/u.pixel))
tree = {'model': model}
helpers.assert_roundtrip_tree(tree, tmpdir)
|
bce1c610df4b8c262dbc10ab8d1e5923150c1989a227d230272583c6d5eaa58a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import datetime
from collections import OrderedDict
import pytest
import numpy as np
from astropy import time
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import AsdfFile, yamlutil, tagged
from asdf.tests import helpers
import asdf.schema as asdf_schema
def _flatten_combiners(schema):
newschema = OrderedDict()
def add_entry(path, schema, combiner):
# TODO: Simplify?
cursor = newschema
for i in range(len(path)):
part = path[i]
if isinstance(part, int):
cursor = cursor.setdefault('items', [])
while len(cursor) <= part:
cursor.append({})
cursor = cursor[part]
elif part == 'items':
cursor = cursor.setdefault('items', OrderedDict())
else:
cursor = cursor.setdefault('properties', OrderedDict())
if i < len(path) - 1 and isinstance(path[i+1], int):
cursor = cursor.setdefault(part, [])
else:
cursor = cursor.setdefault(part, OrderedDict())
cursor.update(schema)
def test_time(tmpdir):
time_array = time.Time(
np.arange(100), format="unix")
tree = {
'large_time_array': time_array
}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_time_with_location(tmpdir):
# See https://github.com/spacetelescope/asdf/issues/341
from astropy import units as u
from astropy.coordinates.earth import EarthLocation
location = EarthLocation(x=[1,2]*u.m, y=[3,4]*u.m, z=[5,6]*u.m)
t = time.Time([1,2], location=location, format='cxcsec')
tree = {'time': t}
helpers.assert_roundtrip_tree(tree, tmpdir)
def test_isot(tmpdir):
tree = {
'time': time.Time('2000-01-01T00:00:00.000')
}
helpers.assert_roundtrip_tree(tree, tmpdir)
ff = asdf.AsdfFile(tree)
tree = yamlutil.custom_tree_to_tagged_tree(ff.tree, ff)
assert isinstance(tree['time'], str)
def test_time_tag():
schema = asdf_schema.load_schema(
'http://stsci.edu/schemas/asdf/time/time-1.1.0',
resolve_references=True)
schema = _flatten_combiners(schema)
date = time.Time(datetime.datetime.now())
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
tag = 'tag:stsci.edu:asdf/time/time-1.1.0'
date = tagged.tag_object(tag, date)
tree = {'date': date}
asdf = AsdfFile(tree=tree)
instance = yamlutil.custom_tree_to_tagged_tree(tree['date'], asdf)
asdf_schema.validate(instance, schema=schema)
|
83098a639cda53e09093381364f250ccfcbe052e46fd55528f0ab16fd5e0aac2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf.tests.helpers import assert_roundtrip_tree
from astropy import units
from astropy.coordinates import ICRS, FK5, Longitude, Latitude, Angle
from ....extension import AstropyExtension
def test_hcrs_basic(tmpdir):
ra = Longitude(25, unit=units.deg)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_basic(tmpdir):
wrap_angle = Angle(1.5, unit=units.rad)
ra = Longitude(25, unit=units.deg, wrap_angle=wrap_angle)
dec = Latitude(45, unit=units.deg)
tree = {'coord': ICRS(ra=ra, dec=dec)}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_nodata(tmpdir):
tree = {'coord': ICRS()}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_icrs_compound(tmpdir):
icrs = ICRS(ra=[0, 1, 2]*units.deg, dec=[3, 4, 5]*units.deg)
tree = {'coord': icrs}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
def test_fk5_time(tmpdir):
tree = {'coord': FK5(equinox="2011-01-01T00:00:00")}
assert_roundtrip_tree(tree, tmpdir, extensions=AstropyExtension())
|
f9bfaaefef442ae457ddc23823e3885f98a15b874c18e3719fd22dd0e30439d9 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units as u
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf.tests import helpers
# TODO: Implement defunit
def test_unit():
yaml = """
unit: !unit/unit-1.0.0 "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert ff.tree['unit'].is_equivalent(u.Ry)
|
e2539b2fd74092cdd97199dd945059adc13b932aac95de36724ee9456255fb6e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import io
import pytest
from astropy import units
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf.tests import helpers
def roundtrip_quantity(yaml, quantity):
buff = helpers.yaml_to_asdf(yaml)
with asdf.AsdfFile.open(buff) as ff:
assert (ff.tree['quantity'] == quantity).all()
buff2 = io.BytesIO()
ff.write_to(buff2)
buff2.seek(0)
with asdf.AsdfFile.open(buff2) as ff:
assert (ff.tree['quantity'] == quantity).all()
def test_value_scalar(tmpdir):
testval = 2.71828
testunit = units.kpc
yaml = """
quantity: !unit/quantity-1.1.0
value: {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_array(tmpdir):
testval = [3.14159]
testunit = units.kg
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_multiarray(tmpdir):
testval = [x*2.3081 for x in range(10)]
testunit = units.ampere
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0 {}
unit: {}
""".format(testval, testunit)
quantity = units.Quantity(testval, unit=testunit)
roundtrip_quantity(yaml, quantity)
def test_value_ndarray(tmpdir):
from numpy import array, float64
testval = [[1,2,3],[4,5,6]]
testunit = units.km
yaml = """
quantity: !unit/quantity-1.1.0
value: !core/ndarray-1.0.0
datatype: float64
data:
{}
unit: {}
""".format(testval, testunit)
data = array(testval, float64)
quantity = units.Quantity(data, unit=testunit)
roundtrip_quantity(yaml, quantity)
|
a6d20f4a2b4a77e19a4b12045cf415ba8d14eb6131e64949c0bb16fb56d2fc0e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from ..helper import ignore_warnings
from ...utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from ... import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg != 'Astropy':
s = "\nRunning tests with {0} version {1}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {0}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {0}.\n\n".format(" ".join(dirs))
s += "Date: {0}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += "Platform: {0}\n\n".format(plat)
s += "Executable: {0}\n\n".format(sys.executable)
s += "Full Python Version: \n{0}\n\n".format(sys.version)
s += "encodings: sys: {0}, locale: {1}, filesystem: {2}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += "byteorder: {0}\n".format(sys.byteorder)
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += "{0}: not available\n".format(module_display)
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += "{0}: {1}\n".format(module_display, version)
# Helpers version
try:
from ...version import astropy_helpers_version
except ImportError:
pass
else:
s += "astropy_helpers: {0}\n".format(astropy_helpers_version)
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {0}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
12b5a0a0864c28d45febcca8bd8eb19053b226e56d7b8101fd5bebddba711c1a | from ... import units as u
from ..helper import assert_quantity_allclose, pytest
def test_assert_quantity_allclose():
assert_quantity_allclose([1, 2], [1, 2])
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm)
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=2 * u.cm)
with pytest.raises(AssertionError) as exc:
assert_quantity_allclose([1, 2] * u.m, [90, 200] * u.cm)
assert exc.value.args[0].startswith("\nNot equal to tolerance")
with pytest.raises(AssertionError):
assert_quantity_allclose([1, 2] * u.m, [101, 201] * u.cm, atol=0.5 * u.cm)
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200])
assert exc.value.args[0] == "Units for 'desired' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [100, 200] * u.cm)
assert exc.value.args[0] == "Units for 'desired' (cm) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2] * u.m, [100, 200] * u.cm, atol=0.3)
assert exc.value.args[0] == "Units for 'atol' () and 'actual' (m) are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], atol=0.3 * u.m)
assert exc.value.args[0] == "Units for 'atol' (m) and 'actual' () are not convertible"
with pytest.raises(u.UnitsError) as exc:
assert_quantity_allclose([1, 2], [1, 2], rtol=0.3 * u.m)
assert exc.value.args[0] == "`rtol` should be dimensionless"
|
de29ee883391ae1440adce55ce09b0fb525e67fd2e36042a858d8b6aa7148e55 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
import pytest
import numpy as np
from .. import core, funcs
from ...units import allclose
from ...utils.compat import NUMPY_LT_1_14
from ... import units as u
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_init():
""" Tests to make sure the code refuses inputs it is supposed to"""
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
Tcmb0=u.Quantity([0.0, 2], u.K))
with pytest.raises(ValueError):
h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)
cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=0.5)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, Neff=2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Tcmb0=3, m_nu=bad_mnu)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Ob(1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Odm(1)
with pytest.raises(TypeError):
core.default_cosmology.validate(4)
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,
Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H0.unit == u.km / u.Mpc / u.s
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb0.unit == u.K
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu0.unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
def test_clone():
""" Test clone operation"""
cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
z = np.linspace(0.1, 3, 15)
# First, test with no changes, which should return same object
newclone = cosmo.clone()
assert newclone is cosmo
# Now change H0
# Note that H0 affects Ode0 because it changes Ogamma0
newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)
assert newclone is not cosmo
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
Tcmb0=2.8 * u.K)
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
assert allclose(newclone.Tcmb0, 2.8 * u.K)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert allclose(newclone.H0, cosmo.H0)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ode0, cosmo.Ode0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.w0, cosmo.w0)
assert allclose(newclone.w0, -1.1)
assert not allclose(newclone.wa, cosmo.wa)
assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
newclone = cosmo.clone(not_an_arg=4)
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
def test_repr():
""" Test string representation of built in classes"""
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725)
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '
'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0. 0. 0.' if NUMPY_LT_1_14 else
'0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.LambdaCDM(70, 0.3, 0.5, Tcmb0=2.725, m_nu=u.Quantity(0.01, u.eV))
expected = ('LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '
'Tcmb0=2.725 K, Neff=3.04, m_nu=[{}] eV, '
'Ob0=None)').format(' 0.01 0.01 0.01' if NUMPY_LT_1_14 else
'0.01 0.01 0.01')
assert str(cosmo) == expected
cosmo = core.FlatLambdaCDM(50.0, 0.27, Tcmb0=3, Ob0=0.05)
expected = ('FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '
'Tcmb0=3 K, Neff=3.04, m_nu=[{}] eV, Ob0=0.05)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.wCDM(60.0, 0.27, 0.6, Tcmb0=2.725, w0=-0.8, name='test1')
expected = ('wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, '
'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')
expected = ('FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, '
'w0=-0.6, Tcmb0=0 K, Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, Tcmb0=2.725, wa=0.1, name='test3')
expected = ('w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, '
'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0. 0. 0.' if NUMPY_LT_1_14 else '0. 0. 0.')
assert str(cosmo) == expected
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',
Ob0=0.0456789)
expected = ('Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, '
'w0=-0.9, Tcmb0=0 K, Neff=3.04, m_nu=None, '
'Ob0=0.0457)')
assert str(cosmo) == expected
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,
zp=0.3, name='test5')
expected = ('wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, '
'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=0 K, '
'Neff=3.04, m_nu=None, Ob0=None)')
assert str(cosmo) == expected
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2, Tcmb0=2.725,
m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))
expected = ('w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '
'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, '
'm_nu=[{}] eV, Ob0=None)').format(
' 0.001 0.01 0.015' if NUMPY_LT_1_14 else
'0.001 0.01 0.015')
assert str(cosmo) == expected
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_z1():
""" Test a flat cosmology at z=1 against several other on-line
calculators.
"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
z = 1
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (http://adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(z),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(z),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(z),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(z),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(z),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Ode(1), 0)
# Ogamma0 and Onu
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0,
name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0,
m_nu=0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a mathematica
computation"""
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.w0, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,
Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.9)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59124088, 0.77419355, 0.92045455],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08868613, 0.11612903, 0.13806818], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50255474, 0.65806452, 0.78238636],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparision is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
""" Test that efunc and inv_efunc give inverse values"""
# Note that all of the subclasses here don't need
# scipy because they don't need to call de_density_scale
# The test following this tests the case where that is needed.
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# Below are the 'standard' included cosmologies
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
assert allclose(cosmo.w(1.0), -1.)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wa, -0.5)
assert allclose(cosmo.w(1.0), -1.25)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.wp, -0.9)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.w(0.5), -0.9)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu=0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.critical_density0,
9.309668456020899e-30 * u.g / u.cm**3)
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(tcos.critical_density([1, 5]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
assert allclose(tcos.critical_density([1., 5.]),
[2.70352772e-29, 5.53739080e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age_in_special_cosmologies():
"""Check that age in de Sitter and Einstein-de Sitter Universes work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.age(z=0), np.inf * u.Gyr)
assert allclose(c_dS.age(z=1), np.inf * u.Gyr)
assert allclose(c_dS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_dS.lookback_time(z=1), 6.777539216261741 * u.Gyr)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.age(z=0), 6.518614811154189 * u.Gyr)
assert allclose(c_EdS.age(z=1), 2.3046783684542738 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=0), 0 * u.Gyr)
assert allclose(c_EdS.lookback_time(z=1), 4.213936442699092 * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_in_special_cosmologies():
"""Check that de Sitter and Einstein-de Sitter Universes both work.
Some analytic solutions fail at these critical points.
"""
c_dS = core.FlatLambdaCDM(100, 0, Tcmb0=0)
assert allclose(c_dS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_dS.comoving_distance(z=1), 2997.92458 * u.Mpc)
c_EdS = core.FlatLambdaCDM(100, 1, Tcmb0=0)
assert allclose(c_EdS.comoving_distance(z=0), 0 * u.Mpc)
assert allclose(c_EdS.comoving_distance(z=1), 1756.1435599923348 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test Flat Universe with Omega_M > 1. Rarely used, but perfectly valid.
tcos = core.FlatLambdaCDM(100, 1.5, Tcmb0=0.0)
results = (2202.72682564,
1559.51679971,
-643.21002593,
1408.36365679,
85.09286258) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
-969.34452994,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05,
Tcmb0=2.725 * u.K, m_nu=u.Quantity(0, u.eV))
assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Alternative no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0 * u.K,
m_nu=u.Quantity(0.4, u.eV))
assert not tcos.has_massive_nu
assert tcos.m_nu is None
# Test basic setting, retrieval of values
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725 * u.K,
m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.725,
m_nu=u.Quantity(0.1, u.eV), Neff=3.1)
assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565, 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894, 5117.2097601, 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356, 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653, 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049, 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behaviour when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_roundtrip():
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck13 cosmology
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone isn't a redshift-dependent method
skip = ('Ok',
'angular_diameter_distance_z1z2',
'clone',
'de_density_scale', 'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
print('Round-trip testing {0}'.format(name))
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test distance functions between two redshifts
z2 = 2.0
func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2),
lambda z1:
core.Planck13._comoving_transverse_distance_z1z2(z1, z2),
lambda z1:
core.Planck13.angular_diameter_distance_z1z2(z1, z2)]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
|
0f06bdc342e48479d6b6de2653c290d9e022246c930b22373eacc3e4729f8d24 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
from ..time import Time
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, FastRBT, and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
from .table import Table, Column
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd')
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError("Column does not belong to index: {0}".format(col_name))
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {0} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError("Could not remove row {0} from index".format(row))
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {0} of\n{1}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy):
'''
Inputs a table and some subset of its columns, and
returns an index corresponding to this subset or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`
Subset of the columns in the table argument
'''
cols = set(table_copy.columns)
indices = set()
for column in cols:
for index in table[column].info.indices:
if set([x.info.name for x in index.columns]) == cols:
return index
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{0}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = '_{0}WithIndexCopy'.format(cls.__name__)
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError("No index found for {0}".format(item))
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError('No matches found for key {0}'.format(key))
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(key))
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError('Right side should contain {0} values'.format(len(rows)))
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError('Invalid index for iloc: {0}'.format(item))
return table_slice
|
e7462b2b9c5de44c2871988760fdc3b764e491d4bad46eb35c3bfbfb23a6b6ee | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from .index import TableIndices, TableLoc, TableILoc, TableLocIndices
import re
import sys
from collections import OrderedDict, Mapping
import warnings
from copy import deepcopy
import numpy as np
from numpy import ma
from .. import log
from ..io import registry as io_registry
from ..units import Quantity, QuantityInfo
from ..utils import isiterable, ShapedLikeNDArray
from ..utils.console import color_print
from ..utils.metadata import MetaData
from ..utils.data_info import BaseColumnInfo, MixinInfo, ParentDtypeInfo, DataInfo
from ..utils.exceptions import AstropyDeprecationWarning, NoValue
from . import groups
from .pprint import TableFormatter
from .column import (BaseColumn, Column, MaskedColumn, _auto_names, FalseArray,
col_copy)
from .row import Row
from .np_utils import fix_column_name, recarray_fromrecords
from .info import TableInfo, serialize_method_as
from .index import Index, _IndexModeContext, get_index
from . import conf
__doctest_skip__ = ['Table.read', 'Table.write',
'Table.convert_bytestring_to_unicode',
'Table.convert_unicode_to_bytestring',
]
class TableReplaceWarning(UserWarning):
"""
Warning class for cases when a table column is replaced via the
Table.__setitem__ syntax e.g. t['a'] = val.
This does not inherit from AstropyWarning because we want to use
stacklevel=3 to show the user where the issue occurred in their code.
"""
pass
def descr(col):
"""Array-interface compliant full description of a column.
This returns a 3-tuple (name, type, shape) that can always be
used in a structured array dtype definition.
"""
col_dtype = 'O' if (col.info.dtype is None) else col.info.dtype
col_shape = col.shape[1:] if hasattr(col, 'shape') else ()
return (col.info.name, col_dtype, col_shape)
def has_info_class(obj, cls):
return hasattr(obj, 'info') and isinstance(obj.info, cls)
class TableColumns(OrderedDict):
"""OrderedDict subclass for a set of columns.
This class enhances item access to provide convenient access to columns
by name or index, including slice access. It also handles renaming
of columns.
The initialization argument ``cols`` can be a list of ``Column`` objects
or any structure that is valid for initializing a Python dict. This
includes a dict, list of (key, val) tuples or [key, val] lists, etc.
Parameters
----------
cols : dict, list, tuple; optional
Column objects as data structure that can init dict (see above)
"""
def __init__(self, cols={}):
if isinstance(cols, (list, tuple)):
# `cols` should be a list of two-tuples, but it is allowed to have
# columns (BaseColumn or mixins) in the list.
newcols = []
for col in cols:
if has_info_class(col, BaseColumnInfo):
newcols.append((col.info.name, col))
else:
newcols.append(col)
cols = newcols
super().__init__(cols)
def __getitem__(self, item):
"""Get items from a TableColumns object.
::
tc = TableColumns(cols=[Column(name='a'), Column(name='b'), Column(name='c')])
tc['a'] # Column('a')
tc[1] # Column('b')
tc['a', 'b'] # <TableColumns names=('a', 'b')>
tc[1:3] # <TableColumns names=('b', 'c')>
"""
if isinstance(item, str):
return OrderedDict.__getitem__(self, item)
elif isinstance(item, (int, np.integer)):
return self.values()[item]
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.values()[item.item()]
elif isinstance(item, tuple):
return self.__class__([self[x] for x in item])
elif isinstance(item, slice):
return self.__class__([self[x] for x in list(self)[item]])
else:
raise IndexError('Illegal key or index value for {} object'
.format(self.__class__.__name__))
def __setitem__(self, item, value):
if item in self:
raise ValueError("Cannot replace column '{0}'. Use Table.replace_column() instead."
.format(item))
super().__setitem__(item, value)
def __repr__(self):
names = ("'{0}'".format(x) for x in self.keys())
return "<{1} names=({0})>".format(",".join(names), self.__class__.__name__)
def _rename_column(self, name, new_name):
if name == new_name:
return
if new_name in self:
raise KeyError("Column {0} already exists".format(new_name))
mapper = {name: new_name}
new_names = [mapper.get(name, name) for name in self]
cols = list(self.values())
self.clear()
self.update(list(zip(new_names, cols)))
# Define keys and values for Python 2 and 3 source compatibility
def keys(self):
return list(OrderedDict.keys(self))
def values(self):
return list(OrderedDict.values(self))
def isinstance(self, cls):
"""
Return a list of columns which are instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are instances of given classes.
"""
cols = [col for col in self.values() if isinstance(col, cls)]
return cols
def not_isinstance(self, cls):
"""
Return a list of columns which are not instances of the specified classes.
Parameters
----------
cls : class or tuple of classes
Column class (including mixin) or tuple of Column classes.
Returns
-------
col_list : list of Columns
List of Column objects which are not instances of given classes.
"""
cols = [col for col in self.values() if not isinstance(col, cls)]
return cols
class Table:
"""A class to represent tables of heterogeneous data.
`Table` provides a class for heterogeneous tabular data, making use of a
`numpy` structured array internally to store the data values. A key
enhancement provided by the `Table` class is the ability to easily modify
the structure of the table by adding or removing columns, or adding new
rows of data. In addition table and column metadata are fully supported.
`Table` differs from `~astropy.nddata.NDData` by the assumption that the
input data consists of columns of homogeneous data, where each column
has a unique identifier and may contain additional metadata such as the
data unit, format, and description.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. If the input is a Table the ``meta`` is always
copied regardless of the ``copy`` parameter.
Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
meta = MetaData()
# Define class attributes for core container objects to allow for subclass
# customization.
Row = Row
Column = Column
MaskedColumn = MaskedColumn
TableColumns = TableColumns
TableFormatter = TableFormatter
def as_array(self, keep_byteorder=False):
"""
Return a new copy of the table in the form of a structured np.ndarray or
np.ma.MaskedArray object (as appropriate).
Parameters
----------
keep_byteorder : bool, optional
By default the returned array has all columns in native byte
order. However, if this option is `True` this preserves the
byte order of all columns (if any are non-native).
Returns
-------
table_array : np.ndarray (unmasked) or np.ma.MaskedArray (masked)
Copy of table as a numpy structured array
"""
if len(self.columns) == 0:
return None
sys_byteorder = ('>', '<')[sys.byteorder == 'little']
native_order = ('=', sys_byteorder)
dtype = []
cols = self.columns.values()
for col in cols:
col_descr = descr(col)
byteorder = col.info.dtype.byteorder
if not keep_byteorder and byteorder not in native_order:
new_dt = np.dtype(col_descr[1]).newbyteorder('=')
col_descr = (col_descr[0], new_dt, col_descr[2])
dtype.append(col_descr)
empty_init = ma.empty if self.masked else np.empty
data = empty_init(len(self), dtype=dtype)
for col in cols:
# When assigning from one array into a field of a structured array,
# Numpy will automatically swap those columns to their destination
# byte order where applicable
data[col.info.name] = col
return data
def __init__(self, data=None, masked=None, names=None, dtype=None,
meta=None, copy=True, rows=None, copy_indices=True,
**kwargs):
# Set up a placeholder empty table
self._set_masked(masked)
self.columns = self.TableColumns()
self.meta = meta
self.formatter = self.TableFormatter()
self._copy_indices = True # copy indices from this Table by default
self._init_indices = copy_indices # whether to copy indices in init
self.primary_key = None
# Must copy if dtype are changing
if not copy and dtype is not None:
raise ValueError('Cannot specify dtype when copy=False')
# Row-oriented input, e.g. list of lists or list of tuples, list of
# dict, Row instance. Set data to something that the subsequent code
# will parse correctly.
is_list_of_dict = False
if rows is not None:
if data is not None:
raise ValueError('Cannot supply both `data` and `rows` values')
if all(isinstance(row, dict) for row in rows):
is_list_of_dict = True # Avoid doing the all(...) test twice.
data = rows
elif isinstance(rows, self.Row):
data = rows
else:
rec_data = recarray_fromrecords(rows)
data = [rec_data[name] for name in rec_data.dtype.names]
# Infer the type of the input data and set up the initialization
# function, number of columns, and potentially the default col names
default_names = None
if hasattr(data, '__astropy_table__'):
# Data object implements the __astropy_table__ interface method.
# Calling that method returns an appropriate instance of
# self.__class__ and respects the `copy` arg. The returned
# Table object should NOT then be copied (though the meta
# will be deep-copied anyway).
data = data.__astropy_table__(self.__class__, copy, **kwargs)
copy = False
elif kwargs:
raise TypeError('__init__() got unexpected keyword argument {!r}'
.format(list(kwargs.keys())[0]))
if (isinstance(data, np.ndarray) and
data.shape == (0,) and
not data.dtype.names):
data = None
if isinstance(data, self.Row):
data = data._table[data._index:data._index + 1]
if isinstance(data, (list, tuple)):
init_func = self._init_from_list
if data and (is_list_of_dict or all(isinstance(row, dict) for row in data)):
n_cols = len(data[0])
else:
n_cols = len(data)
elif isinstance(data, np.ndarray):
if data.dtype.names:
init_func = self._init_from_ndarray # _struct
n_cols = len(data.dtype.names)
default_names = data.dtype.names
else:
init_func = self._init_from_ndarray # _homog
if data.shape == ():
raise ValueError('Can not initialize a Table with a scalar')
elif len(data.shape) == 1:
data = data[np.newaxis, :]
n_cols = data.shape[1]
elif isinstance(data, Mapping):
init_func = self._init_from_dict
default_names = list(data)
n_cols = len(default_names)
elif isinstance(data, Table):
init_func = self._init_from_table
n_cols = len(data.colnames)
default_names = data.colnames
# don't copy indices if the input Table is in non-copy mode
self._init_indices = self._init_indices and data._copy_indices
elif data is None:
if names is None:
if dtype is None:
return # Empty table
try:
# No data nor names but dtype is available. This must be
# valid to initialize a structured array.
dtype = np.dtype(dtype)
names = dtype.names
dtype = [dtype[name] for name in names]
except Exception:
raise ValueError('dtype was specified but could not be '
'parsed for column names')
# names is guaranteed to be set at this point
init_func = self._init_from_list
n_cols = len(names)
data = [[]] * n_cols
else:
raise ValueError('Data type {0} not allowed to init Table'
.format(type(data)))
# Set up defaults if names and/or dtype are not specified.
# A value of None means the actual value will be inferred
# within the appropriate initialization routine, either from
# existing specification or auto-generated.
if names is None:
names = default_names or [None] * n_cols
if dtype is None:
dtype = [None] * n_cols
# Numpy does not support bytes column names on Python 3, so fix them
# up now.
names = [fix_column_name(name) for name in names]
self._check_names_dtype(names, dtype, n_cols)
# Finally do the real initialization
init_func(data, names, dtype, n_cols, copy)
# Whatever happens above, the masked property should be set to a boolean
if type(self.masked) is not bool:
raise TypeError("masked property has not been set to True or False")
def __getstate__(self):
columns = OrderedDict((key, col if isinstance(col, BaseColumn) else col_copy(col))
for key, col in self.columns.items())
return (columns, self.meta)
def __setstate__(self, state):
columns, meta = state
self.__init__(columns, meta=meta)
@property
def mask(self):
# Dynamic view of available masks
if self.masked:
mask_table = Table([col.mask for col in self.columns.values()],
names=self.colnames, copy=False)
# Set hidden attribute to force inplace setitem so that code like
# t.mask['a'] = [1, 0, 1] will correctly set the underlying mask.
# See #5556 for discussion.
mask_table._setitem_inplace = True
else:
mask_table = None
return mask_table
@mask.setter
def mask(self, val):
self.mask[:] = val
@property
def _mask(self):
"""This is needed so that comparison of a masked Table and a
MaskedArray works. The requirement comes from numpy.ma.core
so don't remove this property."""
return self.as_array().mask
def filled(self, fill_value=None):
"""Return a copy of self, with masked values filled.
If input ``fill_value`` supplied then that value is used for all
masked entries in the table. Otherwise the individual
``fill_value`` defined for each table column is used.
Parameters
----------
fill_value : str
If supplied, this ``fill_value`` is used for all masked entries
in the entire table.
Returns
-------
filled_table : Table
New table with masked values filled
"""
if self.masked:
data = [col.filled(fill_value) for col in self.columns.values()]
else:
data = self
return self.__class__(data, meta=deepcopy(self.meta))
@property
def indices(self):
'''
Return the indices associated with columns of the table
as a TableIndices object.
'''
lst = []
for column in self.columns.values():
for index in column.info.indices:
if sum([index is x for x in lst]) == 0: # ensure uniqueness
lst.append(index)
return TableIndices(lst)
@property
def loc(self):
'''
Return a TableLoc object that can be used for retrieving
rows by index in a given data range. Note that both loc
and iloc work only with single-column indices.
'''
return TableLoc(self)
@property
def loc_indices(self):
"""
Return a TableLocIndices object that can be used for retrieving
the row indices corresponding to given table index key value or values.
"""
return TableLocIndices(self)
@property
def iloc(self):
'''
Return a TableILoc object that can be used for retrieving
indexed rows in the order they appear in the index.
'''
return TableILoc(self)
def add_index(self, colnames, engine=None, unique=False):
'''
Insert a new index among one or more columns.
If there are no indices, make this index the
primary table index.
Parameters
----------
colnames : str or list
List of column names (or a single column name) to index
engine : type or None
Indexing engine class to use, from among SortedArray, BST,
FastBST, FastRBT, and SCEngine. If the supplied argument is None
(by default), use SortedArray.
unique : bool
Whether the values of the index must be unique. Default is False.
'''
if isinstance(colnames, str):
colnames = (colnames,)
columns = self.columns[tuple(colnames)].values()
# make sure all columns support indexing
for col in columns:
if not getattr(col.info, '_supports_indexing', False):
raise ValueError('Cannot create an index on column "{0}", of '
'type "{1}"'.format(col.info.name, type(col)))
index = Index(columns, engine=engine, unique=unique)
if not self.indices:
self.primary_key = colnames
for col in columns:
col.info.indices.append(index)
def remove_indices(self, colname):
'''
Remove all indices involving the given column.
If the primary index is removed, the new primary
index will be the most recently added remaining
index.
Parameters
----------
colname : str
Name of column
'''
col = self.columns[colname]
for index in self.indices:
try:
index.col_position(col.info.name)
except ValueError:
pass
else:
for c in index.columns:
c.info.indices.remove(index)
def index_mode(self, mode):
'''
Return a context manager for an indexing mode.
Parameters
----------
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications in an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
return _IndexModeContext(self, mode)
def __array__(self, dtype=None):
"""Support converting Table to np.array via np.array(table).
Coercion to a different dtype via np.array(table, dtype) is not
supported and will raise a ValueError.
"""
if dtype is not None:
raise ValueError('Datatype coercion is not allowed')
# This limitation is because of the following unexpected result that
# should have made a table copy while changing the column names.
#
# >>> d = astropy.table.Table([[1,2],[3,4]])
# >>> np.array(d, dtype=[('a', 'i8'), ('b', 'i8')])
# array([(0, 0), (0, 0)],
# dtype=[('a', '<i8'), ('b', '<i8')])
return self.as_array().data if self.masked else self.as_array()
def _check_names_dtype(self, names, dtype, n_cols):
"""Make sure that names and dtype are both iterable and have
the same length as data.
"""
for inp_list, inp_str in ((dtype, 'dtype'), (names, 'names')):
if not isiterable(inp_list):
raise ValueError('{0} must be a list or None'.format(inp_str))
if len(names) != n_cols or len(dtype) != n_cols:
raise ValueError(
'Arguments "names" and "dtype" must match number of columns'
.format(inp_str))
def _set_masked_from_cols(self, cols):
if self.masked is None:
if any(isinstance(col, (MaskedColumn, ma.MaskedArray)) for col in cols):
self._set_masked(True)
else:
self._set_masked(False)
elif not self.masked:
if any(np.any(col.mask) for col in cols if isinstance(col, (MaskedColumn, ma.MaskedArray))):
self._set_masked(True)
def _init_from_list_of_dicts(self, data, names, dtype, n_cols, copy):
names_from_data = set()
for row in data:
names_from_data.update(row)
cols = {}
for name in names_from_data:
cols[name] = []
for i, row in enumerate(data):
try:
cols[name].append(row[name])
except KeyError:
raise ValueError('Row {0} has no value for column {1}'.format(i, name))
if all(name is None for name in names):
names = sorted(names_from_data)
self._init_from_dict(cols, names, dtype, n_cols, copy)
return
def _init_from_list(self, data, names, dtype, n_cols, copy):
"""Initialize table from a list of columns. A column can be a
Column object, np.ndarray, mixin, or any other iterable object.
"""
if data and all(isinstance(row, dict) for row in data):
self._init_from_list_of_dicts(data, names, dtype, n_cols, copy)
return
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(data)
cols = []
def_names = _auto_names(n_cols)
for col, name, def_name, dtype in zip(data, names, def_names, dtype):
# Structured ndarray gets viewed as a mixin unless already a valid
# mixin class
if (isinstance(col, np.ndarray) and len(col.dtype) > 1 and
not self._add_as_mixin_column(col)):
col = col.view(NdarrayMixin)
if isinstance(col, (Column, MaskedColumn)):
col = self.ColumnClass(name=(name or col.info.name or def_name),
data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
elif self._add_as_mixin_column(col):
# Copy the mixin column attributes if they exist since the copy below
# may not get this attribute.
if copy:
col = col_copy(col, copy_indices=self._init_indices)
col.info.name = name or col.info.name or def_name
elif isinstance(col, np.ndarray) or isiterable(col):
col = self.ColumnClass(name=(name or def_name), data=col, dtype=dtype,
copy=copy, copy_indices=self._init_indices)
else:
raise ValueError('Elements in list initialization must be '
'either Column or list-like')
cols.append(col)
self._init_from_cols(cols)
def _init_from_ndarray(self, data, names, dtype, n_cols, copy):
"""Initialize table from an ndarray structured array"""
data_names = data.dtype.names or _auto_names(n_cols)
struct = data.dtype.names is not None
names = [name or data_names[i] for i, name in enumerate(names)]
cols = ([data[name] for name in data_names] if struct else
[data[:, i] for i in range(n_cols)])
# Set self.masked appropriately, then get class to create column instances.
self._set_masked_from_cols(cols)
if copy:
self._init_from_list(cols, names, dtype, n_cols, copy)
else:
dtype = [(name, col.dtype, col.shape[1:]) for name, col in zip(names, cols)]
newdata = data.view(dtype).ravel()
columns = self.TableColumns()
for name in names:
columns[name] = self.ColumnClass(name=name, data=newdata[name])
columns[name].info.parent_table = self
self.columns = columns
def _init_from_dict(self, data, names, dtype, n_cols, copy):
"""Initialize table from a dictionary of columns"""
# TODO: is this restriction still needed with no ndarray?
if not copy:
raise ValueError('Cannot use copy=False with a dict data input')
data_list = [data[name] for name in names]
self._init_from_list(data_list, names, dtype, n_cols, copy)
def _init_from_table(self, data, names, dtype, n_cols, copy):
"""Initialize table from an existing Table object """
table = data # data is really a Table, rename for clarity
self.meta.clear()
self.meta.update(deepcopy(table.meta))
self.primary_key = table.primary_key
cols = list(table.columns.values())
self._init_from_list(cols, names, dtype, n_cols, copy)
def _convert_col_for_table(self, col):
"""
Make sure that all Column objects have correct class for this type of
Table. For a base Table this most commonly means setting to
MaskedColumn if the table is masked. Table subclasses like QTable
override this method.
"""
if col.__class__ is not self.ColumnClass and isinstance(col, Column):
col = self.ColumnClass(col) # copy attributes and reference data
return col
def _init_from_cols(self, cols):
"""Initialize table from a list of Column or mixin objects"""
lengths = set(len(col) for col in cols)
if len(lengths) != 1:
raise ValueError('Inconsistent data column lengths: {0}'
.format(lengths))
# Set the table masking
self._set_masked_from_cols(cols)
# Make sure that all Column-based objects have correct class. For
# plain Table this is self.ColumnClass, but for instance QTable will
# convert columns with units to a Quantity mixin.
newcols = [self._convert_col_for_table(col) for col in cols]
self._make_table_from_cols(self, newcols)
# Deduplicate indices. It may happen that after pickling or when
# initing from an existing table that column indices which had been
# references to a single index object got *copied* into an independent
# object. This results in duplicates which will cause downstream problems.
index_dict = {}
for col in self.itercols():
for i, index in enumerate(col.info.indices or []):
names = tuple(ind_col.info.name for ind_col in index.columns)
if names in index_dict:
col.info.indices[i] = index_dict[names]
else:
index_dict[names] = index
def _new_from_slice(self, slice_):
"""Create a new table as a referenced slice from self."""
table = self.__class__(masked=self.masked)
table.meta.clear()
table.meta.update(deepcopy(self.meta))
table.primary_key = self.primary_key
cols = self.columns.values()
newcols = []
for col in cols:
col.info._copy_indices = self._copy_indices
newcol = col[slice_]
if col.info.indices:
newcol = col.info.slice_indices(newcol, slice_, len(col))
newcols.append(newcol)
col.info._copy_indices = True
self._make_table_from_cols(table, newcols)
return table
@staticmethod
def _make_table_from_cols(table, cols):
"""
Make ``table`` in-place so that it represents the given list of ``cols``.
"""
colnames = set(col.info.name for col in cols)
if None in colnames:
raise TypeError('Cannot have None for column name')
if len(colnames) != len(cols):
raise ValueError('Duplicate column names')
columns = table.TableColumns((col.info.name, col) for col in cols)
for col in cols:
col.info.parent_table = table
if table.masked and not hasattr(col, 'mask'):
col.mask = FalseArray(col.shape)
table.columns = columns
def itercols(self):
"""
Iterate over the columns of this table.
Examples
--------
To iterate over the columns of a table::
>>> t = Table([[1], [2]])
>>> for col in t.itercols():
... print(col)
col0
----
1
col1
----
2
Using ``itercols()`` is similar to ``for col in t.columns.values()``
but is syntactically preferred.
"""
for colname in self.columns:
yield self[colname]
def _base_repr_(self, html=False, descr_vals=None, max_width=None,
tableid=None, show_dtype=True, max_lines=None,
tableclass=None):
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(self)))
descr = ' '.join(descr_vals)
if html:
from ..utils.xml.writer import xml_escape
descr = '<i>{0}</i>\n'.format(xml_escape(descr))
else:
descr = '<{0}>\n'.format(descr)
if tableid is None:
tableid = 'table{id}'.format(id=id(self))
data_lines, outs = self.formatter._pformat_table(
self, tableid=tableid, html=html, max_width=max_width,
show_name=True, show_unit=None, show_dtype=show_dtype,
max_lines=max_lines, tableclass=tableclass)
out = descr + '\n'.join(data_lines)
return out
def _repr_html_(self):
return self._base_repr_(html=True, max_width=-1,
tableclass=conf.default_notebook_table_class)
def __repr__(self):
return self._base_repr_(html=False, max_width=None)
def __str__(self):
return '\n'.join(self.pformat())
def __bytes__(self):
return str(self).encode('utf-8')
@property
def has_mixin_columns(self):
"""
True if table has any mixin columns (defined as columns that are not Column
subclasses).
"""
return any(has_info_class(col, MixinInfo) for col in self.columns.values())
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
if isinstance(col, BaseColumn):
return False
# Is it a mixin but not not Quantity (which gets converted to Column with
# unit set).
return has_info_class(col, MixinInfo) and not has_info_class(col, QuantityInfo)
def pprint(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, align=None):
"""Print a formatted string representation of the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for max_width except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int
Maximum number of lines in table output.
max_width : int or `None`
Maximum character width of output.
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
"""
lines, outs = self.formatter._pformat_table(self, max_lines, max_width,
show_name=show_name, show_unit=show_unit,
show_dtype=show_dtype, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
n_header = outs['n_header']
for i, line in enumerate(lines):
if i < n_header:
color_print(line, 'red')
else:
print(line)
def _make_index_row_display_table(self, index_row_name):
if index_row_name not in self.columns:
idx_col = self.ColumnClass(name=index_row_name, data=np.arange(len(self)))
return self.__class__([idx_col] + self.columns.values(),
copy=False)
else:
return self
def show_in_notebook(self, tableid=None, css=None, display_length=50,
table_class='astropy-default', show_row_index='idx'):
"""Render the table in HTML and show it in the IPython notebook.
Parameters
----------
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}-XXX``, where
id is the unique integer id of the table object, id(self), and XXX
is a random number to avoid conflicts when printing the same table
multiple times.
table_class : str or `None`
A string with a list of HTML classes used to style the table.
The special default string ('astropy-default') means that the string
will be retrieved from the configuration item
``astropy.table.default_notebook_table_class``. Note that these
table classes may make use of bootstrap, as this is loaded with the
notebook. See `this page <http://getbootstrap.com/css/#tables>`_
for the list of classes.
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS_NB``.
display_length : int, optional
Number or rows to show. Defaults to 50.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
Notes
-----
Currently, unlike `show_in_browser` (with ``jsviewer=True``), this
method needs to access online javascript code repositories. This is due
to modern browsers' limitations on accessing local files. Hence, if you
call this method while offline (and don't have a cached version of
jquery and jquery.dataTables), you will not get the jsviewer features.
"""
from .jsviewer import JSViewer
from IPython.display import HTML
if tableid is None:
tableid = 'table{0}-{1}'.format(id(self),
np.random.randint(1, 1e6))
jsv = JSViewer(display_length=display_length)
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
if table_class == 'astropy-default':
table_class = conf.default_notebook_table_class
html = display_table._base_repr_(html=True, max_width=-1, tableid=tableid,
max_lines=-1, show_dtype=False,
tableclass=table_class)
columns = display_table.columns.values()
sortable_columns = [i for i, col in enumerate(columns)
if col.dtype.kind in 'iufc']
html += jsv.ipynb(tableid, css=css, sort_columns=sortable_columns)
return HTML(html)
def show_in_browser(self, max_lines=5000, jsviewer=False,
browser='default', jskwargs={'use_local_files': True},
tableid=None, table_class="display compact",
css=None, show_row_index='idx'):
"""Render the table in HTML and show it in a web browser.
Parameters
----------
max_lines : int
Maximum number of rows to export to the table (set low by default
to avoid memory issues, since the browser view requires duplicating
the table in memory). A negative value of ``max_lines`` indicates
no row limit.
jsviewer : bool
If `True`, prepends some javascript headers so that the table is
rendered as a `DataTables <https://datatables.net>`_ data table.
This allows in-browser searching & sorting.
browser : str
Any legal browser name, e.g. ``'firefox'``, ``'chrome'``,
``'safari'`` (for mac, you may need to use ``'open -a
"/Applications/Google Chrome.app" {}'`` for Chrome). If
``'default'``, will use the system default browser.
jskwargs : dict
Passed to the `astropy.table.JSViewer` init. Defaults to
``{'use_local_files': True}`` which means that the JavaScript
libraries will be served from local copies.
tableid : str or `None`
An html ID tag for the table. Default is ``table{id}``, where id
is the unique integer id of the table object, id(self).
table_class : str or `None`
A string with a list of HTML classes used to style the table.
Default is "display compact", and other possible values can be
found in https://www.datatables.net/manual/styling/classes
css : string
A valid CSS string declaring the formatting for the table. Defaults
to ``astropy.table.jsviewer.DEFAULT_CSS``.
show_row_index : str or False
If this does not evaluate to False, a column with the given name
will be added to the version of the table that gets displayed.
This new column shows the index of the row in the table itself,
even when the displayed table is re-sorted by another column. Note
that if a column with this name already exists, this option will be
ignored. Defaults to "idx".
"""
import os
import webbrowser
import tempfile
from .jsviewer import DEFAULT_CSS
from urllib.parse import urljoin
from urllib.request import pathname2url
if css is None:
css = DEFAULT_CSS
# We can't use NamedTemporaryFile here because it gets deleted as
# soon as it gets garbage collected.
tmpdir = tempfile.mkdtemp()
path = os.path.join(tmpdir, 'table.html')
with open(path, 'w') as tmp:
if jsviewer:
if show_row_index:
display_table = self._make_index_row_display_table(show_row_index)
else:
display_table = self
display_table.write(tmp, format='jsviewer', css=css,
max_lines=max_lines, jskwargs=jskwargs,
table_id=tableid, table_class=table_class)
else:
self.write(tmp, format='html')
try:
br = webbrowser.get(None if browser == 'default' else browser)
except webbrowser.Error:
log.error("Browser '{}' not found.".format(browser))
else:
br.open(urljoin('file:', pathname2url(path)))
def pformat(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False, html=False, tableid=None,
align=None, tableclass=None):
"""Return a list of lines for the formatted string representation of
the table.
If no value of ``max_lines`` is supplied then the height of the
screen terminal is used to set ``max_lines``. If the terminal
height cannot be determined then the default is taken from the
configuration item ``astropy.conf.max_lines``. If a negative
value of ``max_lines`` is supplied then there is no line limit
applied.
The same applies for ``max_width`` except the configuration item is
``astropy.conf.max_width``.
Parameters
----------
max_lines : int or `None`
Maximum number of rows to output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
html : bool
Format the output as an HTML table. Default is False.
tableid : str or `None`
An ID tag for the table; only used if html is set. Default is
"table{id}", where id is the unique integer id of the table object,
id(self)
align : str or list or tuple or `None`
Left/right alignment of columns. Default is right (None) for all
columns. Other allowed values are '>', '<', '^', and '0=' for
right, left, centered, and 0-padded, respectively. A list of
strings can be provided for alignment of tables with multiple
columns.
tableclass : str or list of str or `None`
CSS classes for the table; only used if html is set. Default is
None.
Returns
-------
lines : list
Formatted table as a list of strings.
"""
lines, outs = self.formatter._pformat_table(
self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype, html=html,
tableid=tableid, tableclass=tableclass, align=align)
if outs['show_length']:
lines.append('Length = {0} rows'.format(len(self)))
return lines
def more(self, max_lines=None, max_width=None, show_name=True,
show_unit=None, show_dtype=False):
"""Interactively browse table with a paging interface.
Supported keys::
f, <space> : forward one page
b : back one page
r : refresh same page
n : next row
p : previous row
< : go to beginning
> : go to end
q : quit browsing
h : print this help
Parameters
----------
max_lines : int
Maximum number of lines in table output
max_width : int or `None`
Maximum character width of output
show_name : bool
Include a header row for column names. Default is True.
show_unit : bool
Include a header row for unit. Default is to show a row
for units only if one or more columns has a defined value
for the unit.
show_dtype : bool
Include a header row for column dtypes. Default is True.
"""
self.formatter._more_tabcol(self, max_lines, max_width, show_name=show_name,
show_unit=show_unit, show_dtype=show_dtype)
def __getitem__(self, item):
if isinstance(item, str):
return self.columns[item]
elif isinstance(item, (int, np.integer)):
return self.Row(self, item)
elif (isinstance(item, np.ndarray) and item.shape == () and item.dtype.kind == 'i'):
return self.Row(self, item.item())
elif self._is_list_or_tuple_of_str(item):
out = self.__class__([self[x] for x in item],
meta=deepcopy(self.meta),
copy_indices=self._copy_indices)
out._groups = groups.TableGroups(out, indices=self.groups._indices,
keys=self.groups._keys)
return out
elif ((isinstance(item, np.ndarray) and item.size == 0) or
(isinstance(item, (tuple, list)) and not item)):
# If item is an empty array/list/tuple then return the table with no rows
return self._new_from_slice([])
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
isinstance(item, tuple) and all(isinstance(x, np.ndarray)
for x in item)):
# here for the many ways to give a slice; a tuple of ndarray
# is produced by np.where, as in t[np.where(t['a'] > 2)]
# For all, a new table is constructed with slice of all columns
return self._new_from_slice(item)
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __setitem__(self, item, value):
# If the item is a string then it must be the name of a column.
# If that column doesn't already exist then create it now.
if isinstance(item, str) and item not in self.colnames:
NewColumn = self.MaskedColumn if self.masked else self.Column
# If value doesn't have a dtype and won't be added as a mixin then
# convert to a numpy array.
if not hasattr(value, 'dtype') and not self._add_as_mixin_column(value):
value = np.asarray(value)
# Structured ndarray gets viewed as a mixin (unless already a valid
# mixin class).
if (isinstance(value, np.ndarray) and len(value.dtype) > 1 and
not self._add_as_mixin_column(value)):
value = value.view(NdarrayMixin)
# Make new column and assign the value. If the table currently
# has no rows (len=0) of the value is already a Column then
# define new column directly from value. In the latter case
# this allows for propagation of Column metadata. Otherwise
# define a new column with the right length and shape and then
# set it from value. This allows for broadcasting, e.g. t['a']
# = 1.
name = item
# If this is a column-like object that could be added directly to table
if isinstance(value, BaseColumn) or self._add_as_mixin_column(value):
# If we're setting a new column to a scalar, broadcast it.
# (things will fail in _init_from_cols if this doesn't work)
if (len(self) > 0 and (getattr(value, 'isscalar', False) or
getattr(value, 'shape', None) == () or
len(value) == 1)):
new_shape = (len(self),) + getattr(value, 'shape', ())[1:]
if isinstance(value, np.ndarray):
value = np.broadcast_to(value, shape=new_shape,
subok=True)
elif isinstance(value, ShapedLikeNDArray):
value = value._apply(np.broadcast_to, shape=new_shape,
subok=True)
new_column = col_copy(value)
new_column.info.name = name
elif len(self) == 0:
new_column = NewColumn(value, name=name)
else:
new_column = NewColumn(name=name, length=len(self), dtype=value.dtype,
shape=value.shape[1:],
unit=getattr(value, 'unit', None))
new_column[:] = value
# Now add new column to the table
self.add_columns([new_column], copy=False)
else:
n_cols = len(self.columns)
if isinstance(item, str):
# Set an existing column by first trying to replace, and if
# this fails do an in-place update. See definition of mask
# property for discussion of the _setitem_inplace attribute.
if (not getattr(self, '_setitem_inplace', False)
and not conf.replace_inplace):
try:
self._replace_column_warnings(item, value)
return
except Exception:
pass
self.columns[item][:] = value
elif isinstance(item, (int, np.integer)):
self._set_row(idx=item, colnames=self.colnames, vals=value)
elif (isinstance(item, slice) or
isinstance(item, np.ndarray) or
isinstance(item, list) or
(isinstance(item, tuple) and # output from np.where
all(isinstance(x, np.ndarray) for x in item))):
if isinstance(value, Table):
vals = (col for col in value.columns.values())
elif isinstance(value, np.ndarray) and value.dtype.names:
vals = (value[name] for name in value.dtype.names)
elif np.isscalar(value):
import itertools
vals = itertools.repeat(value, n_cols)
else: # Assume this is an iterable that will work
if len(value) != n_cols:
raise ValueError('Right side value needs {0} elements (one for each column)'
.format(n_cols))
vals = value
for col, val in zip(self.columns.values(), vals):
col[item] = val
else:
raise ValueError('Illegal type {0} for table item access'
.format(type(item)))
def __delitem__(self, item):
if isinstance(item, str):
self.remove_column(item)
elif isinstance(item, (int, np.integer)):
self.remove_row(item)
elif (isinstance(item, (list, tuple, np.ndarray)) and
all(isinstance(x, str) for x in item)):
self.remove_columns(item)
elif (isinstance(item, (list, np.ndarray)) and
np.asarray(item).dtype.kind == 'i'):
self.remove_rows(item)
elif isinstance(item, slice):
self.remove_rows(item)
else:
raise IndexError('illegal key or index value')
def _ipython_key_completions_(self):
return self.colnames
def field(self, item):
"""Return column[item] for recarray compatibility."""
return self.columns[item]
@property
def masked(self):
return self._masked
@masked.setter
def masked(self, masked):
raise Exception('Masked attribute is read-only (use t = Table(t, masked=True)'
' to convert to a masked table)')
def _set_masked(self, masked):
"""
Set the table masked property.
Parameters
----------
masked : bool
State of table masking (`True` or `False`)
"""
if hasattr(self, '_masked'):
# The only allowed change is from None to False or True, or False to True
if self._masked is None and masked in [False, True]:
self._masked = masked
elif self._masked is False and masked is True:
log.info("Upgrading Table to masked Table. Use Table.filled() to convert to unmasked table.")
self._masked = masked
elif self._masked is masked:
raise Exception("Masked attribute is already set to {0}".format(masked))
else:
raise Exception("Cannot change masked attribute to {0} once it is set to {1}"
.format(masked, self._masked))
else:
if masked in [True, False, None]:
self._masked = masked
else:
raise ValueError("masked should be one of True, False, None")
if self._masked:
self._column_class = self.MaskedColumn
else:
self._column_class = self.Column
@property
def ColumnClass(self):
if self._column_class is None:
return self.Column
else:
return self._column_class
@property
def dtype(self):
return np.dtype([descr(col) for col in self.columns.values()])
@property
def colnames(self):
return list(self.columns.keys())
@staticmethod
def _is_list_or_tuple_of_str(names):
"""Check that ``names`` is a tuple or list of strings"""
return (isinstance(names, (tuple, list)) and names and
all(isinstance(x, str) for x in names))
def keys(self):
return list(self.columns.keys())
def __len__(self):
if len(self.columns) == 0:
return 0
lengths = set(len(col) for col in self.columns.values())
if len(lengths) != 1:
len_strs = [' {0} : {1}'.format(name, len(col)) for name, col in self.columns.items()]
raise ValueError('Column length mismatch:\n{0}'.format('\n'.join(len_strs)))
return lengths.pop()
def index_column(self, name):
"""
Return the positional index of column ``name``.
Parameters
----------
name : str
column name
Returns
-------
index : int
Positional index of column ``name``.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Get index of column 'b' of the table::
>>> t.index_column('b')
1
"""
try:
return self.colnames.index(name)
except ValueError:
raise ValueError("Column {0} does not exist".format(name))
def add_column(self, col, index=None, name=None, rename_duplicate=False, copy=True):
"""
Add a new Column object ``col`` to the table. If ``index``
is supplied then insert column before ``index`` position
in the list of columns, otherwise append column to the end
of the list.
Parameters
----------
col : Column
Column object to add.
index : int or `None`
Insert column before this position or at end (default).
name : str
Column name
rename_duplicate : bool
Uniquify column name if it already exist. Default is False.
copy : bool
Make a copy of the new column. Default is True.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create a third column 'c' and append it to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_column(col_c)
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Add column 'd' at position 1. Note that the column is inserted
before the given index::
>>> col_d = Column(name='d', data=['a', 'b', 'c'])
>>> t.add_column(col_d, 1)
>>> print(t)
a d b c
--- --- --- ---
1 a 0.1 x
2 b 0.2 y
3 c 0.3 z
Add second column named 'b' with rename_duplicate::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> t.add_column(col_b, rename_duplicate=True)
>>> print(t)
a b b_1
--- --- ---
1 0.1 1.1
2 0.2 1.2
3 0.3 1.3
Add an unnamed column or mixin object in the table using a default name
or by specifying an explicit name with ``name``. Name can also be overridden::
>>> t = Table([[1, 2], [0.1, 0.2]], names=('a', 'b'))
>>> col_c = Column(data=['x', 'y'])
>>> t.add_column(col_c)
>>> t.add_column(col_c, name='c')
>>> col_b = Column(name='b', data=[1.1, 1.2])
>>> t.add_column(col_b, name='d')
>>> print(t)
a b col2 c d
--- --- ---- --- ---
1 0.1 x x 1.1
2 0.2 y y 1.2
To add several columns use add_columns.
"""
if index is None:
index = len(self.columns)
if name is not None:
name = (name,)
self.add_columns([col], [index], name, copy=copy, rename_duplicate=rename_duplicate)
def add_columns(self, cols, indexes=None, names=None, copy=True, rename_duplicate=False):
"""
Add a list of new Column objects ``cols`` to the table. If a
corresponding list of ``indexes`` is supplied then insert column
before each ``index`` position in the *original* list of columns,
otherwise append columns to the end of the list.
Parameters
----------
cols : list of Columns
Column objects to add.
indexes : list of ints or `None`
Insert column before this position or at end (default).
names : list of str
Column names
copy : bool
Make a copy of the new columns. Default is True.
rename_duplicate : bool
Uniquify new column names if they duplicate the existing ones.
Default is False.
Examples
--------
Create a table with two columns 'a' and 'b'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> print(t)
a b
--- ---
1 0.1
2 0.2
3 0.3
Create column 'c' and 'd' and append them to the end of the table::
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d])
>>> print(t)
a b c d
--- --- --- ---
1 0.1 x u
2 0.2 y v
3 0.3 z w
Add column 'c' at position 0 and column 'd' at position 1. Note that
the columns are inserted before the given position::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> col_d = Column(name='d', data=['u', 'v', 'w'])
>>> t.add_columns([col_c, col_d], [0, 1])
>>> print(t)
c a d b
--- --- --- ---
x 1 u 0.1
y 2 v 0.2
z 3 w 0.3
Add second column 'b' and column 'c' with ``rename_duplicate``::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> col_b = Column(name='b', data=[1.1, 1.2, 1.3])
>>> col_c = Column(name='c', data=['x', 'y', 'z'])
>>> t.add_columns([col_b, col_c], rename_duplicate=True)
>>> print(t)
a b b_1 c
--- --- --- ---
1 0.1 1.1 x
2 0.2 1.2 y
3 0.3 1.3 z
Add unnamed columns or mixin objects in the table using default names
or by specifying explicit names with ``names``. Names can also be overridden::
>>> t = Table()
>>> col_a = Column(data=['x', 'y'])
>>> col_b = Column(name='b', data=['u', 'v'])
>>> t.add_columns([col_a, col_b])
>>> t.add_columns([col_a, col_b], names=['c', 'd'])
>>> print(t)
col0 b c d
---- --- --- ---
x u x u
y v y v
"""
if indexes is None:
indexes = [len(self.columns)] * len(cols)
elif len(indexes) != len(cols):
raise ValueError('Number of indexes must match number of cols')
if copy:
cols = [col_copy(col) for col in cols]
if len(self.columns) == 0:
# No existing table data, init from cols
newcols = cols
else:
newcols = list(self.columns.values())
new_indexes = list(range(len(newcols) + 1))
for col, index in zip(cols, indexes):
i = new_indexes.index(index)
new_indexes.insert(i, None)
newcols.insert(i, col)
if names is None:
names = (None,) * len(cols)
elif len(names) != len(cols):
raise ValueError('Number of names must match number of cols')
for i, (col, name) in enumerate(zip(cols, names)):
if name is None:
if col.info.name is not None:
continue
name = 'col{}'.format(i + len(self.columns))
if col.info.parent_table is not None:
col = col_copy(col)
col.info.name = name
if rename_duplicate:
existing_names = set(self.colnames)
for col in cols:
i = 1
orig_name = col.info.name
if col.info.name in existing_names:
# If the column belongs to another table then copy it
# before renaming
while col.info.name in existing_names:
# Iterate until a unique name is found
if col.info.parent_table is not None:
col = col_copy(col)
new_name = '{0}_{1}'.format(orig_name, i)
col.info.name = new_name
i += 1
existing_names.add(new_name)
self._init_from_cols(newcols)
def _replace_column_warnings(self, name, col):
"""
Same as replace_column but issues warnings under various circumstances.
"""
warns = conf.replace_warnings
if 'refcount' in warns and name in self.colnames:
refcount = sys.getrefcount(self[name])
if name in self.colnames:
old_col = self[name]
# This may raise an exception (e.g. t['a'] = 1) in which case none of
# the downstream code runs.
self.replace_column(name, col)
if 'always' in warns:
warnings.warn("replaced column '{}'".format(name),
TableReplaceWarning, stacklevel=3)
if 'slice' in warns:
try:
# Check for ndarray-subclass slice. An unsliced instance
# has an ndarray for the base while sliced has the same class
# as parent.
if isinstance(old_col.base, old_col.__class__):
msg = ("replaced column '{}' which looks like an array slice. "
"The new column no longer shares memory with the "
"original array.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
except AttributeError:
pass
if 'refcount' in warns:
# Did reference count change?
new_refcount = sys.getrefcount(self[name])
if refcount != new_refcount:
msg = ("replaced column '{}' and the number of references "
"to the column changed.".format(name))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
if 'attributes' in warns:
# Any of the standard column attributes changed?
changed_attrs = []
new_col = self[name]
# Check base DataInfo attributes that any column will have
for attr in DataInfo.attr_names:
if getattr(old_col.info, attr) != getattr(new_col.info, attr):
changed_attrs.append(attr)
if changed_attrs:
msg = ("replaced column '{}' and column attributes {} changed."
.format(name, changed_attrs))
warnings.warn(msg, TableReplaceWarning, stacklevel=3)
def replace_column(self, name, col):
"""
Replace column ``name`` with the new ``col`` object.
Parameters
----------
name : str
Name of column to replace
col : column object (list, ndarray, Column, etc)
New column object to replace the existing column
Examples
--------
Replace column 'a' with a float version of itself::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3]], names=('a', 'b'))
>>> float_a = t['a'].astype(float)
>>> t.replace_column('a', float_a)
"""
if name not in self.colnames:
raise ValueError('column name {0} is not in the table'.format(name))
if self[name].info.indices:
raise ValueError('cannot replace a table index column')
t = self.__class__([col], names=[name])
cols = OrderedDict(self.columns)
cols[name] = t[name]
self._init_from_cols(cols.values())
def remove_row(self, index):
"""
Remove a row from the table.
Parameters
----------
index : int
Index of row to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove row 1 from the table::
>>> t.remove_row(1)
>>> print(t)
a b c
--- --- ---
1 0.1 x
3 0.3 z
To remove several rows at the same time use remove_rows.
"""
# check the index against the types that work with np.delete
if not isinstance(index, (int, np.integer)):
raise TypeError("Row index must be an integer")
self.remove_rows(index)
def remove_rows(self, row_specifier):
"""
Remove rows from the table.
Parameters
----------
row_specifier : slice, int, or array of ints
Specification for rows to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove rows 0 and 2 from the table::
>>> t.remove_rows([0, 2])
>>> print(t)
a b c
--- --- ---
2 0.2 y
Note that there are no warnings if the slice operator extends
outside the data::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_rows(slice(10, 20, 1))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
"""
# Update indices
for index in self.indices:
index.remove_rows(row_specifier)
keep_mask = np.ones(len(self), dtype=bool)
keep_mask[row_specifier] = False
columns = self.TableColumns()
for name, col in self.columns.items():
newcol = col[keep_mask]
newcol.info.parent_table = self
columns[name] = newcol
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def remove_column(self, name):
"""
Remove a column from the table.
This can also be done with::
del table[name]
Parameters
----------
name : str
Name of column to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove column 'b' from the table::
>>> t.remove_column('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
To remove several columns at the same time use remove_columns.
"""
self.remove_columns([name])
def remove_columns(self, names):
'''
Remove several columns from the table.
Parameters
----------
names : list
A list containing the names of the columns to remove
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Remove columns 'b' and 'c' from the table::
>>> t.remove_columns(['b', 'c'])
>>> print(t)
a
---
1
2
3
Specifying only a single column also works. Remove column 'b' from the table::
>>> t = Table([[1, 2, 3], [0.1, 0.2, 0.3], ['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.remove_columns('b')
>>> print(t)
a c
--- ---
1 x
2 y
3 z
This gives the same as using remove_column.
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
for name in names:
self.columns.pop(name)
def _convert_string_dtype(self, in_kind, out_kind):
"""
Convert string-like columns to/from bytestring and unicode (internal only).
Parameters
----------
in_kind : str
Input dtype.kind
out_kind : str
Output dtype.kind
"""
# If there are no `in_kind` columns then do nothing
cols = self.columns.values()
if not any(col.dtype.kind == in_kind for col in cols):
return
newcols = []
for col in cols:
if col.dtype.kind == in_kind:
newdtype = re.sub(in_kind, out_kind, col.dtype.str)
newcol = col.__class__(col, dtype=newdtype)
else:
newcol = col
newcols.append(newcol)
self._init_from_cols(newcols)
def convert_bytestring_to_unicode(self, python3_only=NoValue):
"""
Convert bytestring columns (dtype.kind='S') to unicode (dtype.kind='U') assuming
ASCII encoding.
Internally this changes string columns to represent each character
in the string with a 4-byte UCS-4 equivalent, so it is inefficient
for memory but allows scripts to manipulate string arrays with
natural syntax.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('S', 'U')
def convert_unicode_to_bytestring(self, python3_only=NoValue):
"""
Convert ASCII-only unicode columns (dtype.kind='U') to bytestring (dtype.kind='S').
When exporting a unicode string array to a file, it may be desirable
to encode unicode columns as bytestrings. This routine takes
advantage of numpy automated conversion which works for strings that
are pure ASCII.
"""
if python3_only is not NoValue:
warnings.warn('The "python3_only" keyword is now deprecated.',
AstropyDeprecationWarning)
self._convert_string_dtype('U', 'S')
def keep_columns(self, names):
'''
Keep only the columns specified (remove the others).
Parameters
----------
names : list
A list containing the names of the columns to keep. All other
columns will be removed.
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> print(t)
a b c
--- --- ---
1 0.1 x
2 0.2 y
3 0.3 z
Specifying only a single column name keeps only this column.
Keep only column 'a' of the table::
>>> t.keep_columns('a')
>>> print(t)
a
---
1
2
3
Specifying a list of column names is keeps is also possible.
Keep columns 'a' and 'c' of the table::
>>> t = Table([[1, 2, 3],[0.1, 0.2, 0.3],['x', 'y', 'z']],
... names=('a', 'b', 'c'))
>>> t.keep_columns(['a', 'c'])
>>> print(t)
a c
--- ---
1 x
2 y
3 z
'''
if isinstance(names, str):
names = [names]
for name in names:
if name not in self.columns:
raise KeyError("Column {0} does not exist".format(name))
remove = list(set(self.keys()) - set(names))
self.remove_columns(remove)
def rename_column(self, name, new_name):
'''
Rename a column.
This can also be done directly with by setting the ``name`` attribute
for a column::
table[name].name = new_name
TODO: this won't work for mixins
Parameters
----------
name : str
The current name of the column.
new_name : str
The new name for the column
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[3,4],[5,6]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 3 5
2 4 6
Renaming column 'a' to 'aa'::
>>> t.rename_column('a' , 'aa')
>>> print(t)
aa b c
--- --- ---
1 3 5
2 4 6
'''
if name not in self.keys():
raise KeyError("Column {0} does not exist".format(name))
self.columns[name].info.name = new_name
def _set_row(self, idx, colnames, vals):
try:
assert len(vals) == len(colnames)
except Exception:
raise ValueError('right hand side must be a sequence of values with '
'the same length as the number of selected columns')
# Keep track of original values before setting each column so that
# setting row can be transactional.
orig_vals = []
cols = self.columns
try:
for name, val in zip(colnames, vals):
orig_vals.append(cols[name][idx])
cols[name][idx] = val
except Exception:
# If anything went wrong first revert the row update then raise
for name, val in zip(colnames, orig_vals[:-1]):
cols[name][idx] = val
raise
def add_row(self, vals=None, mask=None):
"""Add a new row to the end of the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
This method requires that the Table object "owns" the underlying array
data. In particular one cannot add a row to a Table that was
initialized with copy=False from an existing array.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
Examples
--------
Create a table with three columns 'a', 'b' and 'c'::
>>> t = Table([[1,2],[4,5],[7,8]], names=('a','b','c'))
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
Adding a new row with entries '3' in 'a', '6' in 'b' and '9' in 'c'::
>>> t.add_row([3,6,9])
>>> print(t)
a b c
--- --- ---
1 4 7
2 5 8
3 6 9
"""
self.insert_row(len(self), vals, mask)
def insert_row(self, index, vals=None, mask=None):
"""Add a new row before the given ``index`` position in the table.
The ``vals`` argument can be:
sequence (e.g. tuple or list)
Column values in the same order as table columns.
mapping (e.g. dict)
Keys corresponding to column names. Missing values will be
filled with np.zeros for the column dtype.
`None`
All values filled with np.zeros for the column dtype.
The ``mask`` attribute should give (if desired) the mask for the
values. The type of the mask should match that of the values, i.e. if
``vals`` is an iterable, then ``mask`` should also be an iterable
with the same length, and if ``vals`` is a mapping, then ``mask``
should be a dictionary.
Parameters
----------
vals : tuple, list, dict or `None`
Use the specified values in the new row
mask : tuple, list, dict or `None`
Use the specified mask values in the new row
"""
colnames = self.colnames
N = len(self)
if index < -N or index > N:
raise IndexError("Index {0} is out of bounds for table with length {1}"
.format(index, N))
if index < 0:
index += N
def _is_mapping(obj):
"""Minimal checker for mapping (dict-like) interface for obj"""
attrs = ('__getitem__', '__len__', '__iter__', 'keys', 'values', 'items')
return all(hasattr(obj, attr) for attr in attrs)
if mask is not None and not self.masked:
# Possibly issue upgrade warning and update self.ColumnClass. This
# does not change the existing columns.
self._set_masked(True)
if _is_mapping(vals) or vals is None:
# From the vals and/or mask mappings create the corresponding lists
# that have entries for each table column.
if mask is not None and not _is_mapping(mask):
raise TypeError("Mismatch between type of vals and mask")
# Now check that the mask is specified for the same keys as the
# values, otherwise things get really confusing.
if mask is not None and set(vals.keys()) != set(mask.keys()):
raise ValueError('keys in mask should match keys in vals')
if vals and any(name not in colnames for name in vals):
raise ValueError('Keys in vals must all be valid column names')
vals_list = []
mask_list = []
for name in colnames:
if vals and name in vals:
vals_list.append(vals[name])
mask_list.append(False if mask is None else mask[name])
else:
col = self[name]
if hasattr(col, 'dtype'):
# Make a placeholder zero element of the right type which is masked.
# This assumes the appropriate insert() method will broadcast a
# numpy scalar to the right shape.
vals_list.append(np.zeros(shape=(), dtype=col.dtype))
# For masked table any unsupplied values are masked by default.
mask_list.append(self.masked and vals is not None)
else:
raise ValueError("Value must be supplied for column '{0}'".format(name))
vals = vals_list
mask = mask_list
if isiterable(vals):
if mask is not None and (not isiterable(mask) or _is_mapping(mask)):
raise TypeError("Mismatch between type of vals and mask")
if len(self.columns) != len(vals):
raise ValueError('Mismatch between number of vals and columns')
if mask is not None:
if len(self.columns) != len(mask):
raise ValueError('Mismatch between number of masks and columns')
else:
mask = [False] * len(self.columns)
else:
raise TypeError('Vals must be an iterable or mapping or None')
columns = self.TableColumns()
try:
# Insert val at index for each column
for name, col, val, mask_ in zip(colnames, self.columns.values(), vals, mask):
# If the new row caused a change in self.ColumnClass then
# Column-based classes need to be converted first. This is
# typical for adding a row with mask values to an unmasked table.
if isinstance(col, Column) and not isinstance(col, self.ColumnClass):
col = self.ColumnClass(col, copy=False)
newcol = col.insert(index, val, axis=0)
if not isinstance(newcol, BaseColumn):
newcol.info.name = name
if self.masked:
newcol.mask = FalseArray(newcol.shape)
if len(newcol) != N + 1:
raise ValueError('Incorrect length for column {0} after inserting {1}'
' (expected {2}, got {3})'
.format(name, val, len(newcol), N + 1))
newcol.info.parent_table = self
# Set mask if needed
if self.masked:
newcol.mask[index] = mask_
columns[name] = newcol
# insert row in indices
for table_index in self.indices:
table_index.insert_row(index, vals, self.columns.values())
except Exception as err:
raise ValueError("Unable to insert row because of exception in column '{0}':\n{1}"
.format(name, err))
else:
self._replace_cols(columns)
# Revert groups to default (ungrouped) state
if hasattr(self, '_groups'):
del self._groups
def _replace_cols(self, columns):
for col, new_col in zip(self.columns.values(), columns.values()):
new_col.info.indices = []
for index in col.info.indices:
index.columns[index.col_position(col.info.name)] = new_col
new_col.info.indices.append(index)
self.columns = columns
def argsort(self, keys=None, kind=None):
"""
Return the indices which would sort the table according to one or
more key columns. This simply calls the `numpy.argsort` function on
the table with the ``order`` parameter set to ``keys``.
Parameters
----------
keys : str or list of str
The column name(s) to order the table by
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
Returns
-------
index_array : ndarray, int
Array of indices that sorts the table by the specified key
column(s).
"""
if isinstance(keys, str):
keys = [keys]
# use index sorted order if possible
if keys is not None:
index = get_index(self, self[keys])
if index is not None:
return index.sorted_data()
kwargs = {}
if keys:
kwargs['order'] = keys
if kind:
kwargs['kind'] = kind
if keys:
data = self[keys].as_array()
else:
data = self.as_array()
return data.argsort(**kwargs)
def sort(self, keys=None):
'''
Sort the table according to one or more keys. This operates
on the existing table and does not return a new table.
Parameters
----------
keys : str or list of str
The key(s) to order the table by. If None, use the
primary index of the Table.
Examples
--------
Create a table with 3 columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Sorting according to standard sorting rules, first 'name' then 'firstname'::
>>> t.sort(['name','firstname'])
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
if keys is None:
if not self.indices:
raise ValueError("Table sort requires input keys or a table index")
keys = [x.info.name for x in self.indices[0].columns]
if isinstance(keys, str):
keys = [keys]
indexes = self.argsort(keys)
sort_index = get_index(self, self[keys])
if sort_index is not None:
# avoid inefficient relabelling of sorted index
prev_frozen = sort_index._frozen
sort_index._frozen = True
for col in self.columns.values():
col[:] = col.take(indexes, axis=0)
if sort_index is not None:
# undo index freeze
sort_index._frozen = prev_frozen
# now relabel the sort index appropriately
sort_index.sort()
def reverse(self):
'''
Reverse the row order of table rows. The table is reversed
in place and there are no function arguments.
Examples
--------
Create a table with three columns::
>>> t = Table([['Max', 'Jo', 'John'], ['Miller','Miller','Jackson'],
... [12,15,18]], names=('firstname','name','tel'))
>>> print(t)
firstname name tel
--------- ------- ---
Max Miller 12
Jo Miller 15
John Jackson 18
Reversing order::
>>> t.reverse()
>>> print(t)
firstname name tel
--------- ------- ---
John Jackson 18
Jo Miller 15
Max Miller 12
'''
for col in self.columns.values():
col[:] = col[::-1]
for index in self.indices:
index.reverse()
@classmethod
def read(cls, *args, **kwargs):
"""
Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is the input filename.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `Table`
Table corresponding to file contents
Notes
-----
"""
# The hanging Notes section just above is a section placeholder for
# import-time processing that collects available formats into an
# RST table and inserts at the end of the docstring. DO NOT REMOVE.
out = io_registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
return out
def write(self, *args, **kwargs):
"""Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
See http://docs.astropy.org/en/stable/io/unified.html for details.
Parameters
----------
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
serialize_method = kwargs.pop('serialize_method', None)
with serialize_method_as(self, serialize_method):
io_registry.write(self, *args, **kwargs)
def copy(self, copy_data=True):
'''
Return a copy of the table.
Parameters
----------
copy_data : bool
If `True` (the default), copy the underlying data array.
Otherwise, use the same data array. The ``meta`` is always
deepcopied regardless of the value for ``copy_data``.
'''
out = self.__class__(self, copy=copy_data)
# If the current table is grouped then do the same in the copy
if hasattr(self, '_groups'):
out._groups = groups.TableGroups(out, indices=self._groups._indices,
keys=self._groups._keys)
return out
def __deepcopy__(self, memo=None):
return self.copy(True)
def __copy__(self):
return self.copy(False)
def __lt__(self, other):
return super().__lt__(other)
def __gt__(self, other):
return super().__gt__(other)
def __le__(self, other):
return super().__le__(other)
def __ge__(self, other):
return super().__ge__(other)
def __eq__(self, other):
if isinstance(other, Table):
other = other.as_array()
if self.masked:
if isinstance(other, np.ma.MaskedArray):
result = self.as_array() == other
else:
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in self.dtype.names])
result = (self.as_array().data == other) & (self.mask == false_mask)
else:
if isinstance(other, np.ma.MaskedArray):
# If mask is True, then by definition the row doesn't match
# because the other array is not masked.
false_mask = np.zeros(1, dtype=[(n, bool) for n in other.dtype.names])
result = (self.as_array() == other.data) & (other.mask == false_mask)
else:
result = self.as_array() == other
return result
def __ne__(self, other):
return ~self.__eq__(other)
@property
def groups(self):
if not hasattr(self, '_groups'):
self._groups = groups.TableGroups(self)
return self._groups
def group_by(self, keys):
"""
Group this table by the specified ``keys``
This effectively splits the table into groups which correspond to
unique values of the ``keys`` grouping object. The output is a new
`TableGroups` which contains a copy of this table but sorted by row
according to ``keys``.
The ``keys`` input to `group_by` can be specified in different ways:
- String or list of strings corresponding to table column name(s)
- Numpy array (homogeneous or structured) with same length as this table
- `Table` with same length as this table
Parameters
----------
keys : str, list of str, numpy array, or `Table`
Key grouping object
Returns
-------
out : `Table`
New table with groups set
"""
if self.has_mixin_columns:
raise NotImplementedError('group_by not available for tables with mixin columns')
return groups.table_group_by(self, keys)
def to_pandas(self):
"""
Return a :class:`pandas.DataFrame` instance
Returns
-------
dataframe : :class:`pandas.DataFrame`
A pandas :class:`pandas.DataFrame` instance
Raises
------
ImportError
If pandas is not installed
ValueError
If the Table contains mixin or multi-dimensional columns
"""
from pandas import DataFrame
if self.has_mixin_columns:
raise ValueError("Cannot convert a table with mixin columns to a pandas DataFrame")
if any(getattr(col, 'ndim', 1) > 1 for col in self.columns.values()):
raise ValueError("Cannot convert a table with multi-dimensional columns to a pandas DataFrame")
out = OrderedDict()
for name, column in self.columns.items():
if isinstance(column, MaskedColumn):
if column.dtype.kind in ['i', 'u']:
out[name] = column.astype(float).filled(np.nan)
elif column.dtype.kind in ['f', 'c']:
out[name] = column.filled(np.nan)
else:
out[name] = column.astype(object).filled(np.nan)
else:
out[name] = column
if out[name].dtype.byteorder not in ('=', '|'):
out[name] = out[name].byteswap().newbyteorder()
return DataFrame(out)
@classmethod
def from_pandas(cls, dataframe):
"""
Create a `Table` from a :class:`pandas.DataFrame` instance
Parameters
----------
dataframe : :class:`pandas.DataFrame`
The pandas :class:`pandas.DataFrame` instance
Returns
-------
table : `Table`
A `Table` (or subclass) instance
"""
out = OrderedDict()
for name in dataframe.columns:
column = dataframe[name]
mask = np.array(column.isnull())
data = np.array(column)
if data.dtype.kind == 'O':
# If all elements of an object array are string-like or np.nan
# then coerce back to a native numpy str/unicode array.
string_types = (str, bytes)
nan = np.nan
if all(isinstance(x, string_types) or x is nan for x in data):
# Force any missing (null) values to b''. Numpy will
# upcast to str/unicode as needed.
data[mask] = b''
# When the numpy object array is represented as a list then
# numpy initializes to the correct string or unicode type.
data = np.array([x for x in data])
if np.any(mask):
out[name] = MaskedColumn(data=data, name=name, mask=mask)
else:
out[name] = Column(data=data, name=name)
return cls(out)
info = TableInfo()
class QTable(Table):
"""A class to represent tables of heterogeneous data.
`QTable` provides a class for heterogeneous tabular data which can be
easily modified, for instance adding columns or new rows.
The `QTable` class is identical to `Table` except that columns with an
associated ``unit`` attribute are converted to `~astropy.units.Quantity`
objects.
Parameters
----------
data : numpy ndarray, dict, list, Table, or table-like object, optional
Data to initialize table.
masked : bool, optional
Specify whether the table is masked.
names : list, optional
Specify column names.
dtype : list, optional
Specify column data types.
meta : dict, optional
Metadata associated with the table.
copy : bool, optional
Copy the input data. Default is True.
rows : numpy ndarray, list of lists, optional
Row-oriented data for table instead of ``data`` argument.
copy_indices : bool, optional
Copy any indices in the input data. Default is True.
**kwargs : dict, optional
Additional keyword args when converting table-like object.
"""
def _add_as_mixin_column(self, col):
"""
Determine if ``col`` should be added to the table directly as
a mixin column.
"""
return has_info_class(col, MixinInfo)
def _convert_col_for_table(self, col):
if (isinstance(col, Column) and getattr(col, 'unit', None) is not None):
# We need to turn the column into a quantity, or a subclass
# identified in the unit (such as u.mag()).
q_cls = getattr(col.unit, '_quantity_class', Quantity)
qcol = q_cls(col.data, col.unit, copy=False)
qcol.info = col.info
col = qcol
else:
col = super()._convert_col_for_table(col)
return col
class NdarrayMixin(np.ndarray):
"""
Mixin column class to allow storage of arbitrary numpy
ndarrays within a Table. This is a subclass of numpy.ndarray
and has the same initialization options as ndarray().
"""
info = ParentDtypeInfo()
def __new__(cls, obj, *args, **kwargs):
self = np.array(obj, *args, **kwargs).view(cls)
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
return self
def __array_finalize__(self, obj):
if obj is None:
return
if callable(super().__array_finalize__):
super().__array_finalize__(obj)
# Self was created from template (e.g. obj[slice] or (obj * 2))
# or viewcast e.g. obj.view(Column). In either case we want to
# init Column attributes for self from obj if possible.
if 'info' in getattr(obj, '__dict__', ()):
self.info = obj.info
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle NdarrayMixin objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
|
3dfa9973f12831b2c55d1beed185594e5a2c03484079793035241442f4a20a2e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from functools import partial
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from ..utils.exceptions import AstropyUserWarning
from ..utils.console import human_file_size
from ..utils.decorators import deprecated_renamed_argument
from .. import units as u
from ..nddata import support_nddata
from ..modeling.core import _make_arithmetic_operator, BINARY_OPERATORS
from ..modeling.core import _CompoundModelMeta
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
'''
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or `numpy.ndarray` or array-like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one prior to
convolving
nan_treatment : 'interpolate', 'fill'
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
'''
from .boundary_none import (convolve1d_boundary_none,
convolve2d_boundary_none,
convolve3d_boundary_none)
from .boundary_extend import (convolve1d_boundary_extend,
convolve2d_boundary_extend,
convolve3d_boundary_extend)
from .boundary_fill import (convolve1d_boundary_fill,
convolve2d_boundary_fill,
convolve3d_boundary_fill)
from .boundary_wrap import (convolve1d_boundary_wrap,
convolve2d_boundary_wrap,
convolve3d_boundary_wrap)
if boundary not in BOUNDARY_OPTIONS:
raise ValueError("Invalid boundary option: must be one of {0}"
.format(BOUNDARY_OPTIONS))
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# The cython routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# If array and kernel are both Kernal instances, convolve here and return.
if isinstance(kernel, Kernel) and isinstance(array, Kernel):
if isinstance(array, Kernel1D) and isinstance(kernel, Kernel1D):
new_array = convolve1d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel1D(array=new_array)
elif isinstance(array, Kernel2D) and isinstance(kernel, Kernel2D):
new_array = convolve2d_boundary_fill(array.array, kernel.array,
0, True)
new_kernel = Kernel2D(array=new_array)
else:
raise Exception("Can't convolve 1D and 2D kernel.")
new_kernel._separable = kernel._separable and array._separable
new_kernel._is_bool = False
return new_kernel
# Copy or alias array to array_internal
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == 'fill' or np.ma.is_masked(array) or mask is not None:
if np.ma.is_masked(array):
# np.ma.maskedarray.filled() returns a copy.
array_internal = array_internal.filled(np.nan)
# MaskedArray.astype() has neither copy nor order params like ndarray.astype has.
# np.ma.maskedarray.filled() returns an ndarray not a maksedarray (implicit default subok=False).
# astype must be called after filling the masked data to avoid possible additional copying.
array_internal = array_internal.astype(float, copy=False, order='C', subok=True) # subok=True is redundant but leave for future.
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
array_internal = np.array(array, dtype=float, copy=True, order='C', subok=False)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array_internal[mask != 0] = np.nan
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it
# is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory
# when ndarray subclasses are passed in.
array_internal = np.array(array, dtype=float, copy=False, order='C', subok=True)
except (TypeError, ValueError) as e:
raise TypeError('array should be a Numpy array or something '
'convertable into a float array', e)
array_dtype = getattr(array, 'dtype', array_internal.dtype)
# Copy or alias kernel to kernel_internal
# Due to NaN interpolation and kernel normalization, a copy must always be made.
# 1st alias and then copy after depending on whether kernel is masked (so as not to copy twice).
if isinstance(kernel, Kernel):
kernel_internal = kernel.array
else:
kernel_internal = kernel
if np.ma.is_masked(kernel_internal):
# *kernel* doesn't support NaN interpolation, so instead we just fill it.
# np.ma.maskedarray.filled() returns a copy.
kernel_internal = kernel_internal.filled(fill_value)
# MaskedArray.astype() has neither copy nor order params like ndarray.astype has.
# np.ma.maskedarray.filled() returns an ndarray not a maksedarray (implicit default subok=False).
# astype must be called after filling the masked data to avoid possible additional copying.
kernel_internal = kernel_internal.astype(float, copy=False, order='C', subok=True) # subok=True is redundant here but leave for future.
else:
try:
kernel_internal = np.array(kernel_internal, dtype=float, copy=True, order='C', subok=False)
except (TypeError, ValueError) as e:
raise TypeError('kernel should be a Numpy array or something '
'convertable into a float array', e)
# Check that the number of dimensions is compatible
if array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan:
badvals = np.isnan(array_internal)
if nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
array_internal[initially_nan] = fill_value
# Because the Cython routines have to normalize the kernel on the fly, we
# explicitly normalize the kernel here, and then scale the image at the
# end if normalization was not requested.
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if (kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero) and normalize_kernel:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
if not kernel_sums_to_zero:
kernel_internal /= kernel_sum
renormalize_by_kernel = not kernel_sums_to_zero
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim == 1:
if boundary == 'extend':
result = convolve1d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve1d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve1d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve1d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
elif array_internal.ndim == 2:
if boundary == 'extend':
result = convolve2d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary == 'fill':
result = convolve2d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel,
)
elif boundary == 'wrap':
result = convolve2d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif boundary is None:
result = convolve2d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel,
)
elif array_internal.ndim == 3:
if boundary == 'extend':
result = convolve3d_boundary_extend(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary == 'fill':
result = convolve3d_boundary_fill(array_internal,
kernel_internal,
float(fill_value),
renormalize_by_kernel)
elif boundary == 'wrap':
result = convolve3d_boundary_wrap(array_internal,
kernel_internal,
renormalize_by_kernel)
elif boundary is None:
result = convolve3d_boundary_none(array_internal,
kernel_internal,
renormalize_by_kernel)
else:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
# If normalization was not requested, we need to scale the array (since
# the kernel is effectively normalized within the cython functions)
if not normalize_kernel and not kernel_sums_to_zero:
result *= kernel_sum
if preserve_nan:
result[badvals] = np.nan
if nan_treatment == 'fill':
array_internal[initially_nan] = np.nan
# Try to preserve the input type if it's a floating point type
if array_dtype.kind == 'f':
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@deprecated_renamed_argument('interpolate_nan', 'nan_treatment', 'v2.0.0')
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, quiet=False,
min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.python.org/pypi/pyFFTW>`_ or
`pyFFTW3 <https://pypi.python.org/pypi/PyFFTW3/0.2.1>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : 'interpolate', 'fill'
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
Other Parameters
----------------
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fftn, ifftn : functions, optional
The fft and inverse fft functions. Can be overridden to use your own
ffts, e.g. an fftw3 wrapper or scipy's fftn,
``fft=scipy.fftpack.fftn``
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
quiet : bool, optional
Silence warning message about NaN interpolation
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = np.asarray(array, dtype=complex)
kernel = np.asarray(kernel, dtype=complex)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# mask catching - masks must be turned into NaNs for use later in the image
if np.ma.is_masked(array):
mamask = array.mask
array = np.array(array)
array[mamask] = np.nan
elif mask is not None:
# copying here because we have to mask it below. But no need to copy
# if mask is None because we won't modify it.
array = np.array(array)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
array[mask != 0] = np.nan
# the *kernel* doesn't support NaN interpolation, so instead we just fill it
if np.ma.is_masked(kernel):
kernel = kernel.filled(0)
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {0}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {0}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.array([fsize for ii in range(array.ndim)], dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
# restore NaNs in original image (they were modified inplace earlier)
# We don't have to worry about masked arrays - if input was masked, it was
# copied
array[nanmaskarray] = np.nan
kernel[nanmaskkernel] = np.nan
fftmult *= kernel_scale
if return_fft:
return fftmult
if interpolate_nan:
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
BINARY_OPERATORS['convolve_fft'] = _make_arithmetic_operator(partial(convolve_fft, **kwargs))
elif mode == 'convolve':
BINARY_OPERATORS['convolve'] = _make_arithmetic_operator(partial(convolve, **kwargs))
else:
raise ValueError('Mode {} is not supported.'.format(mode))
return _CompoundModelMeta._from_operator(mode, model, kernel)
|
9cae7efe58334edd1db3002b05edb69be746815fbcf65d5a8be5810cd877165f | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions related to Python runtime introspection."""
import inspect
import types
import importlib
from distutils.version import LooseVersion
__all__ = ['resolve_name', 'minversion', 'find_current_module',
'isinstancemethod']
__doctest_skip__ = ['find_current_module']
def resolve_name(name, *additional_parts):
"""Resolve a name like ``module.object`` to an object and return it.
This ends up working like ``from module import object`` but is easier
to deal with than the `__import__` builtin and supports digging into
submodules.
Parameters
----------
name : `str`
A dotted path to a Python object--that is, the name of a function,
class, or other object in a module with the full path to that module,
including parent modules, separated by dots. Also known as the fully
qualified name of the object.
additional_parts : iterable, optional
If more than one positional arguments are given, those arguments are
automatically dotted together with ``name``.
Examples
--------
>>> resolve_name('astropy.utils.introspection.resolve_name')
<function resolve_name at 0x...>
>>> resolve_name('astropy', 'utils', 'introspection', 'resolve_name')
<function resolve_name at 0x...>
Raises
------
`ImportError`
If the module or named object is not found.
"""
additional_parts = '.'.join(additional_parts)
if additional_parts:
name = name + '.' + additional_parts
parts = name.split('.')
if len(parts) == 1:
# No dots in the name--just a straight up module import
cursor = 1
fromlist = []
else:
cursor = len(parts) - 1
fromlist = [parts[-1]]
module_name = parts[:cursor]
while cursor > 0:
try:
ret = __import__(str('.'.join(module_name)), fromlist=fromlist)
break
except ImportError:
if cursor == 0:
raise
cursor -= 1
module_name = parts[:cursor]
fromlist = [parts[cursor]]
ret = ''
for part in parts[cursor:]:
try:
ret = getattr(ret, part)
except AttributeError:
raise ImportError(name)
return ret
def minversion(module, version, inclusive=True, version_path='__version__'):
"""
Returns `True` if the specified Python module satisfies a minimum version
requirement, and `False` if not.
Parameters
----------
module : module or `str`
An imported module of which to check the version, or the name of
that module (in which case an import of that module is attempted--
if this fails `False` is returned).
version : `str`
The version as a string that this module must have at a minimum (e.g.
``'0.12'``).
inclusive : `bool`
The specified version meets the requirement inclusively (i.e. ``>=``)
as opposed to strictly greater than (default: `True`).
version_path : `str`
A dotted attribute path to follow in the module for the version.
Defaults to just ``'__version__'``, which should work for most Python
modules.
Examples
--------
>>> import astropy
>>> minversion(astropy, '0.4.4')
True
"""
if isinstance(module, types.ModuleType):
module_name = module.__name__
elif isinstance(module, str):
module_name = module
try:
module = resolve_name(module_name)
except ImportError:
return False
else:
raise ValueError('module argument must be an actual imported '
'module, or the import name of the module; '
'got {0!r}'.format(module))
if '.' not in version_path:
have_version = getattr(module, version_path)
else:
have_version = resolve_name(module.__name__, version_path)
if inclusive:
return LooseVersion(have_version) >= LooseVersion(version)
else:
return LooseVersion(have_version) > LooseVersion(version)
def find_current_module(depth=1, finddiff=False):
"""
Determines the module/package from which this function is called.
This function has two modes, determined by the ``finddiff`` option. it
will either simply go the requested number of frames up the call
stack (if ``finddiff`` is False), or it will go up the call stack until
it reaches a module that is *not* in a specified set.
Parameters
----------
depth : int
Specifies how far back to go in the call stack (0-indexed, so that
passing in 0 gives back `astropy.utils.misc`).
finddiff : bool or list
If False, the returned ``mod`` will just be ``depth`` frames up from
the current frame. Otherwise, the function will start at a frame
``depth`` up from current, and continue up the call stack to the
first module that is *different* from those in the provided list.
In this case, ``finddiff`` can be a list of modules or modules
names. Alternatively, it can be True, which will use the module
``depth`` call stack frames up as the module the returned module
most be different from.
Returns
-------
mod : module or None
The module object or None if the package cannot be found. The name of
the module is available as the ``__name__`` attribute of the returned
object (if it isn't None).
Raises
------
ValueError
If ``finddiff`` is a list with an invalid entry.
Examples
--------
The examples below assume that there are two modules in a package named
``pkg``. ``mod1.py``::
def find1():
from astropy.utils import find_current_module
print find_current_module(1).__name__
def find2():
from astropy.utils import find_current_module
cmod = find_current_module(2)
if cmod is None:
print 'None'
else:
print cmod.__name__
def find_diff():
from astropy.utils import find_current_module
print find_current_module(0,True).__name__
``mod2.py``::
def find():
from .mod1 import find2
find2()
With these modules in place, the following occurs::
>>> from pkg import mod1, mod2
>>> from astropy.utils import find_current_module
>>> mod1.find1()
pkg.mod1
>>> mod1.find2()
None
>>> mod2.find()
pkg.mod2
>>> find_current_module(0)
<module 'astropy.utils.misc' from 'astropy/utils/misc.py'>
>>> mod1.find_diff()
pkg.mod1
"""
frm = inspect.currentframe()
for i in range(depth):
frm = frm.f_back
if frm is None:
return None
if finddiff:
currmod = inspect.getmodule(frm)
if finddiff is True:
diffmods = [currmod]
else:
diffmods = []
for fd in finddiff:
if inspect.ismodule(fd):
diffmods.append(fd)
elif isinstance(fd, str):
diffmods.append(importlib.import_module(fd))
elif fd is True:
diffmods.append(currmod)
else:
raise ValueError('invalid entry in finddiff')
while frm:
frmb = frm.f_back
modb = inspect.getmodule(frmb)
if modb not in diffmods:
return modb
frm = frmb
else:
return inspect.getmodule(frm)
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
``modname``, nor does it include private attributes (those that
begin with '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool or list of str
If `True`, only attributes that are either members of ``modname`` OR
one of its modules or subpackages will be included. If it is a list
of strings, those specify the possible packages that will be
considered "local".
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module ``modname`` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.introspection.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for functions or
classes it can be different if they are actually defined elsewhere and
just referenced in ``modname``.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
mod = resolve_name(modname)
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
if onlylocals is True:
onlylocals = [modname]
valids = [any(fqn.startswith(nm) for nm in onlylocals) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
# Note: I would have preferred call this is_instancemethod, but this naming is
# for consistency with other functions in the `inspect` module
def isinstancemethod(cls, obj):
"""
Returns `True` if the given object is an instance method of the class
it is defined on (as opposed to a `staticmethod` or a `classmethod`).
This requires both the class the object is a member of as well as the
object itself in order to make this determination.
Parameters
----------
cls : `type`
The class on which this method was defined.
obj : `object`
A member of the provided class (the membership is not checked directly,
but this function will always return `False` if the given object is not
a member of the given class).
Examples
--------
>>> class MetaClass(type):
... def a_classmethod(cls): pass
...
>>> class MyClass(metaclass=MetaClass):
... def an_instancemethod(self): pass
...
... @classmethod
... def another_classmethod(cls): pass
...
... @staticmethod
... def a_staticmethod(): pass
...
>>> isinstancemethod(MyClass, MyClass.a_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.another_classmethod)
False
>>> isinstancemethod(MyClass, MyClass.a_staticmethod)
False
>>> isinstancemethod(MyClass, MyClass.an_instancemethod)
True
"""
return _isinstancemethod(cls, obj)
def _isinstancemethod(cls, obj):
if not isinstance(obj, types.FunctionType):
return False
# Unfortunately it seems the easiest way to get to the original
# staticmethod object is to look in the class's __dict__, though we
# also need to look up the MRO in case the method is not in the given
# class's dict
name = obj.__name__
for basecls in cls.mro(): # This includes cls
if name in basecls.__dict__:
return not isinstance(basecls.__dict__[name], staticmethod)
# This shouldn't happen, though this is the most sensible response if
# it does.
raise AttributeError(name)
|
13129e7119cf6c0fdef981ea9247cac790f7e63f81e098da9c86fc69af2e0788 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module includes helper functions for array operations.
"""
from copy import deepcopy
import numpy as np
from .decorators import support_nddata
from .. import units as u
from ..coordinates import SkyCoord
from ..utils import lazyproperty
from ..wcs.utils import skycoord_to_pixel, proj_plane_pixel_scales
from ..wcs import Sip
__all__ = ['extract_array', 'add_array', 'subpixel_indices',
'overlap_slices', 'block_reduce', 'block_replicate',
'NoOverlapError', 'PartialOverlapError', 'Cutout2D']
class NoOverlapError(ValueError):
'''Raised when determining the overlap of non-overlapping arrays.'''
pass
class PartialOverlapError(ValueError):
'''Raised when arrays only partially overlap.'''
pass
def _round(a):
'''Always round up.
``np.round`` cannot be used here, because it rounds .5 to the nearest
even number.
'''
return int(np.floor(a + 0.5))
def _offset(a):
'''Offset by 0.5 for an even array.
For an array with an odd number of elements, the center is
symmetric, e.g. for 3 elements, it's center +/-1 elements, but for
four elements it's center -2 / +1
This function introduces that offset.
'''
if np.mod(a, 2) == 0:
return -0.5
else:
return 0.
def overlap_slices(large_array_shape, small_array_shape, position,
mode='partial'):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : tuple or int
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ['partial', 'trim', 'strict']:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape, )
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape, )
if np.isscalar(position):
position = (position, )
if len(small_array_shape) != len(large_array_shape):
raise ValueError('"large_array_shape" and "small_array_shape" must '
'have the same number of dimensions.')
if len(small_array_shape) != len(position):
raise ValueError('"position" must have the same number of dimensions '
'as "small_array_shape".')
# Get edge coordinates
edges_min = [_round(pos + 0.5 - small_shape / 2. + _offset(small_shape))
for (pos, small_shape) in zip(position, small_array_shape)]
edges_max = [_round(pos + 0.5 + small_shape / 2. + _offset(small_shape))
for (pos, small_shape) in zip(position, small_array_shape)]
for e_max in edges_max:
if e_max <= 0:
raise NoOverlapError('Arrays do not overlap.')
for e_min, large_shape in zip(edges_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError('Arrays do not overlap.')
if mode == 'strict':
for e_min in edges_min:
if e_min < 0:
raise PartialOverlapError('Arrays overlap only partially.')
for e_max, large_shape in zip(edges_max, large_array_shape):
if e_max >= large_shape:
raise PartialOverlapError('Arrays overlap only partially.')
# Set up slices
slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
if mode == 'trim':
slices_small = tuple(slice(0, slc.stop - slc.start)
for slc in slices_large)
else:
slices_small = tuple(slice(max(0, -edge_min),
min(large_shape - edge_min,
edge_max - edge_min))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
return slices_large, slices_small
def extract_array(array_large, shape, position, mode='partial',
fill_value=np.nan, return_position=False):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : `~numpy.ndarray`
The array from which to extract the small array.
shape : tuple or int
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : tuple of numbers or number
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` must have the same ``dtype`` as the
``array_large`` array.
return_position : boolean, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : `~numpy.ndarray`
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape, )
if np.isscalar(position):
position = (position, )
if mode not in ['partial', 'trim', 'strict']:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(array_large.shape,
shape, position, mode=mode)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == 'partial'):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
extracted_array[:] = fill_value
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position,
small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array
def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : `~numpy.ndarray`
Large array.
array_small : `~numpy.ndarray`
Small array to add.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : `~numpy.ndarray`
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is really larger
if all(large_shape > small_shape for (large_shape, small_shape)
in zip(array_large.shape, array_small.shape)):
large_slices, small_slices = overlap_slices(array_large.shape,
array_small.shape, position)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.")
def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : `~numpy.ndarray` or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : `~numpy.ndarray`
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling)
@support_nddata
def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array_like
The data to be resampled.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a `~numpy.ndarray` along with an ``axis`` keyword,
which defines the axis along which the function is applied. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +SKIP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +SKIP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
from skimage.measure import block_reduce
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
block_size = np.array([int(i) for i in block_size])
size_resampled = np.array(data.shape) // block_size
size_init = size_resampled * block_size
# trim data if necessary
for i in range(data.ndim):
if data.shape[i] != size_init[i]:
data = data.swapaxes(0, i)
data = data[:size_init[i]]
data = data.swapaxes(0, i)
return block_reduce(data, tuple(block_size), func=func)
@support_nddata
def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array_like
The data to be block replicated.
block_size : int or array_like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array_like
The block-replicated data.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data = np.asanyarray(data)
block_size = np.atleast_1d(block_size)
if data.ndim > 1 and len(block_size) == 1:
block_size = np.repeat(block_size, data.ndim)
if len(block_size) != data.ndim:
raise ValueError('`block_size` must be a scalar or have the same '
'length as `data.shape`')
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
data = data / float(np.prod(block_size))
return data
class Cutout2D:
"""
Create a cutout object from a 2D array.
The returned object will contain a 2D cutout array. If
``copy=False`` (default), the cutout array is a view into the
original ``data`` array, otherwise the cutout array will contain a
copy of the original data.
If a `~astropy.wcs.WCS` object is input, then the returned object
will also contain a copy of the original WCS, but updated for the
cutout array.
For example usage, see :ref:`cutout_images`.
.. warning::
The cutout WCS object does not currently handle cases where the
input WCS object contains distortion lookup tables described in
the `FITS WCS distortion paper
<http://www.atnf.csiro.au/people/mcalabre/WCS/dcs_20040422.pdf>`__.
Parameters
----------
data : `~numpy.ndarray`
The 2D data array from which to extract the cutout array.
position : tuple or `~astropy.coordinates.SkyCoord`
The position of the cutout array's center with respect to
the ``data`` array. The position can be specified either as
a ``(x, y)`` tuple of pixel coordinates or a
`~astropy.coordinates.SkyCoord`, in which case ``wcs`` is a
required input.
size : int, array-like, `~astropy.units.Quantity`
The size of the cutout array along each axis. If ``size``
is a scalar number or a scalar `~astropy.units.Quantity`,
then a square cutout of ``size`` will be created. If
``size`` has two elements, they should be in ``(ny, nx)``
order. Scalar numbers in ``size`` are assumed to be in
units of pixels. ``size`` can also be a
`~astropy.units.Quantity` object or contain
`~astropy.units.Quantity` objects. Such
`~astropy.units.Quantity` objects must be in pixel or
angular units. For all cases, ``size`` will be converted to
an integer number of pixels, rounding the the nearest
integer. See the ``mode`` keyword for additional details on
the final cutout size.
.. note::
If ``size`` is in angular units, the cutout size is
converted to pixels using the pixel scales along each
axis of the image at the ``CRPIX`` location. Projection
and other non-linear distortions are not taken into
account.
wcs : `~astropy.wcs.WCS`, optional
A WCS object associated with the input ``data`` array. If
``wcs`` is not `None`, then the returned cutout object will
contain a copy of the updated WCS for the cutout data array.
mode : {'trim', 'partial', 'strict'}, optional
The mode used for creating the cutout data array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
cutout array and the input ``data`` array is sufficient.
For the ``'strict'`` mode, the cutout array has to be fully
contained within the ``data`` array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In
all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'``
mode, positions in the cutout array that do not overlap with
the ``data`` array will be filled with ``fill_value``. In
``'trim'`` mode only the overlapping elements are returned,
thus the resulting cutout array may be smaller than the
requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the
cutout array that do not overlap with the input ``data``.
``fill_value`` must have the same ``dtype`` as the input
``data`` array.
copy : bool, optional
If `False` (default), then the cutout data will be a view
into the original ``data`` array. If `True`, then the
cutout data will hold a copy of the original ``data`` array.
Attributes
----------
data : 2D `~numpy.ndarray`
The 2D cutout array.
shape : 2 tuple
The ``(ny, nx)`` shape of the cutout array.
shape_input : 2 tuple
The ``(ny, nx)`` shape of the input (original) array.
input_position_cutout : 2 tuple
The (unrounded) ``(x, y)`` position with respect to the cutout
array.
input_position_original : 2 tuple
The original (unrounded) ``(x, y)`` input position (with respect
to the original array).
slices_original : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the original array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
slices_cutout : 2 tuple of slice objects
A tuple of slice objects for the minimal bounding box of the
cutout with respect to the cutout array. For
``mode='partial'``, the slices are for the valid (non-filled)
cutout values.
xmin_original, ymin_original, xmax_original, ymax_original : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values. These
values are the same as those in `bbox_original`.
xmin_cutout, ymin_cutout, xmax_cutout, ymax_cutout : float
The minimum and maximum ``x`` and ``y`` indices of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values. These values are
the same as those in `bbox_cutout`.
wcs : `~astropy.wcs.WCS` or `None`
A WCS object associated with the cutout array if a ``wcs``
was input.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata.utils import Cutout2D
>>> from astropy import units as u
>>> data = np.arange(20.).reshape(5, 4)
>>> cutout1 = Cutout2D(data, (2, 2), (3, 3))
>>> print(cutout1.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> print(cutout1.center_original)
(2.0, 2.0)
>>> print(cutout1.center_cutout)
(1.0, 1.0)
>>> print(cutout1.origin_original)
(1, 1)
>>> cutout2 = Cutout2D(data, (2, 2), 3)
>>> print(cutout2.data) # doctest: +FLOAT_CMP
[[ 5. 6. 7.]
[ 9. 10. 11.]
[13. 14. 15.]]
>>> size = u.Quantity([3, 3], u.pixel)
>>> cutout3 = Cutout2D(data, (0, 0), size)
>>> print(cutout3.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout4 = Cutout2D(data, (0, 0), (3 * u.pixel, 3))
>>> print(cutout4.data) # doctest: +FLOAT_CMP
[[0. 1.]
[4. 5.]]
>>> cutout5 = Cutout2D(data, (0, 0), (3, 3), mode='partial')
>>> print(cutout5.data) # doctest: +FLOAT_CMP
[[nan nan nan]
[nan 0. 1.]
[nan 4. 5.]]
"""
def __init__(self, data, position, size, wcs=None, mode='trim',
fill_value=np.nan, copy=False):
if isinstance(position, SkyCoord):
if wcs is None:
raise ValueError('wcs must be input if position is a '
'SkyCoord')
position = skycoord_to_pixel(position, wcs, mode='all') # (x, y)
if np.isscalar(size):
size = np.repeat(size, 2)
# special handling for a scalar Quantity
if isinstance(size, u.Quantity):
size = np.atleast_1d(size)
if len(size) == 1:
size = np.repeat(size, 2)
if len(size) > 2:
raise ValueError('size must have at most two elements')
shape = np.zeros(2).astype(int)
pixel_scales = None
# ``size`` can have a mixture of int and Quantity (and even units),
# so evaluate each axis separately
for axis, side in enumerate(size):
if not isinstance(side, u.Quantity):
shape[axis] = int(np.round(size[axis])) # pixels
else:
if side.unit == u.pixel:
shape[axis] = int(np.round(side.value))
elif side.unit.physical_type == 'angle':
if wcs is None:
raise ValueError('wcs must be input if any element '
'of size has angular units')
if pixel_scales is None:
pixel_scales = u.Quantity(
proj_plane_pixel_scales(wcs), wcs.wcs.cunit[axis])
shape[axis] = int(np.round(
(side / pixel_scales[axis]).decompose()))
else:
raise ValueError('shape can contain Quantities with only '
'pixel or angular units')
data = np.asanyarray(data)
# reverse position because extract_array and overlap_slices
# use (y, x), but keep the input position
pos_yx = position[::-1]
cutout_data, input_position_cutout = extract_array(
data, tuple(shape), pos_yx, mode=mode, fill_value=fill_value,
return_position=True)
if copy:
cutout_data = np.copy(cutout_data)
self.data = cutout_data
self.input_position_cutout = input_position_cutout[::-1] # (x, y)
slices_original, slices_cutout = overlap_slices(
data.shape, shape, pos_yx, mode=mode)
self.slices_original = slices_original
self.slices_cutout = slices_cutout
self.shape = self.data.shape
self.input_position_original = position
self.shape_input = shape
((self.ymin_original, self.ymax_original),
(self.xmin_original, self.xmax_original)) = self.bbox_original
((self.ymin_cutout, self.ymax_cutout),
(self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout
# the true origin pixel of the cutout array, including any
# filled cutout values
self._origin_original_true = (
self.origin_original[0] - self.slices_cutout[1].start,
self.origin_original[1] - self.slices_cutout[0].start)
if wcs is not None:
self.wcs = deepcopy(wcs)
self.wcs.wcs.crpix -= self._origin_original_true
self.wcs._naxis = [self.data.shape[1], self.data.shape[0]]
if wcs.sip is not None:
self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b,
wcs.sip.ap, wcs.sip.bp,
wcs.sip.crpix - self._origin_original_true)
else:
self.wcs = None
def to_original_position(self, cutout_position):
"""
Convert an ``(x, y)`` position in the cutout array to the original
``(x, y)`` position in the original large array.
Parameters
----------
cutout_position : tuple
The ``(x, y)`` pixel position in the cutout array.
Returns
-------
original_position : tuple
The corresponding ``(x, y)`` pixel position in the original
large array.
"""
return tuple(cutout_position[i] + self.origin_original[i]
for i in [0, 1])
def to_cutout_position(self, original_position):
"""
Convert an ``(x, y)`` position in the original large array to
the ``(x, y)`` position in the cutout array.
Parameters
----------
original_position : tuple
The ``(x, y)`` pixel position in the original large array.
Returns
-------
cutout_position : tuple
The corresponding ``(x, y)`` pixel position in the cutout
array.
"""
return tuple(original_position[i] - self.origin_original[i]
for i in [0, 1])
def plot_on_original(self, ax=None, fill=False, **kwargs):
"""
Plot the cutout region on a matplotlib Axes instance.
Parameters
----------
ax : `matplotlib.axes.Axes` instance, optional
If `None`, then the current `matplotlib.axes.Axes` instance
is used.
fill : bool, optional
Set whether to fill the cutout patch. The default is
`False`.
kwargs : optional
Any keyword arguments accepted by `matplotlib.patches.Patch`.
Returns
-------
ax : `matplotlib.axes.Axes` instance
The matplotlib Axes instance constructed in the method if
``ax=None``. Otherwise the output ``ax`` is the same as the
input ``ax``.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
kwargs['fill'] = fill
if ax is None:
ax = plt.gca()
height, width = self.shape
hw, hh = width / 2., height / 2.
pos_xy = self.position_original - np.array([hw, hh])
patch = mpatches.Rectangle(pos_xy, width, height, 0., **kwargs)
ax.add_patch(patch)
return ax
@staticmethod
def _calc_center(slices):
"""
Calculate the center position. The center position will be
fractional for even-sized arrays. For ``mode='partial'``, the
central position is calculated for the valid (non-filled) cutout
values.
"""
return tuple(0.5 * (slices[i].start + slices[i].stop - 1)
for i in [1, 0])
@staticmethod
def _calc_bbox(slices):
"""
Calculate a minimal bounding box in the form ``((ymin, ymax),
(xmin, xmax))``. Note these are pixel locations, not slice
indices. For ``mode='partial'``, the bounding box indices are
for the valid (non-filled) cutout values.
"""
# (stop - 1) to return the max pixel location, not the slice index
return ((slices[0].start, slices[0].stop - 1),
(slices[1].start, slices[1].stop - 1))
@lazyproperty
def origin_original(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the original array. For ``mode='partial'``, the
origin pixel is calculated for the valid (non-filled) cutout
values.
"""
return (self.slices_original[1].start, self.slices_original[0].start)
@lazyproperty
def origin_cutout(self):
"""
The ``(x, y)`` index of the origin pixel of the cutout with
respect to the cutout array. For ``mode='partial'``, the origin
pixel is calculated for the valid (non-filled) cutout values.
"""
return (self.slices_cutout[1].start, self.slices_cutout[0].start)
@lazyproperty
def position_original(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the original array.
"""
return (_round(self.input_position_original[0]),
_round(self.input_position_original[1]))
@lazyproperty
def position_cutout(self):
"""
The ``(x, y)`` position index (rounded to the nearest pixel) in
the cutout array.
"""
return (_round(self.input_position_cutout[0]),
_round(self.input_position_cutout[1]))
@lazyproperty
def center_original(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the original array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_original)
@lazyproperty
def center_cutout(self):
"""
The central ``(x, y)`` position of the cutout array with respect
to the cutout array. For ``mode='partial'``, the central
position is calculated for the valid (non-filled) cutout values.
"""
return self._calc_center(self.slices_cutout)
@lazyproperty
def bbox_original(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
original array. For ``mode='partial'``, the bounding box
indices are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_original)
@lazyproperty
def bbox_cutout(self):
"""
The bounding box ``((ymin, ymax), (xmin, xmax))`` of the minimal
rectangular region of the cutout array with respect to the
cutout array. For ``mode='partial'``, the bounding box indices
are for the valid (non-filled) cutout values.
"""
return self._calc_bbox(self.slices_cutout)
|
3c973852924095d6630d55813cbfb6a5bc0f56762aaf29a1441d6de758b53eb4 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The astropy.time package provides functionality for manipulating times and
dates. Specific emphasis is placed on supporting time scales (e.g. UTC, TAI,
UT1) and time representations (e.g. JD, MJD, ISO 8601) that are used in
astronomy.
"""
import copy
import operator
from datetime import datetime, timedelta
import numpy as np
from .. import units as u, constants as const
from .. import _erfa as erfa
from ..units import UnitConversionError
from ..utils import ShapedLikeNDArray
from ..utils.compat.misc import override__dir__
from ..utils.data_info import MixinInfo, data_info_factory
from .utils import day_frac
from .formats import (TIME_FORMATS, TIME_DELTA_FORMATS,
TimeJD, TimeUnique, TimeAstropyTime, TimeDatetime)
# Import TimeFromEpoch to avoid breaking code that followed the old example of
# making a custom timescale in the documentation.
from .formats import TimeFromEpoch # pylint: disable=W0611
__all__ = ['Time', 'TimeDelta', 'TIME_SCALES', 'STANDARD_TIME_SCALES', 'TIME_DELTA_SCALES',
'ScaleValueError', 'OperandTypeError', 'TimeInfo']
STANDARD_TIME_SCALES = ('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
LOCAL_SCALES = ('local',)
TIME_TYPES = dict((scale, scales) for scales in (STANDARD_TIME_SCALES, LOCAL_SCALES) for scale in scales)
TIME_SCALES = STANDARD_TIME_SCALES + LOCAL_SCALES
MULTI_HOPS = {('tai', 'tcb'): ('tt', 'tdb'),
('tai', 'tcg'): ('tt',),
('tai', 'ut1'): ('utc',),
('tai', 'tdb'): ('tt',),
('tcb', 'tcg'): ('tdb', 'tt'),
('tcb', 'tt'): ('tdb',),
('tcb', 'ut1'): ('tdb', 'tt', 'tai', 'utc'),
('tcb', 'utc'): ('tdb', 'tt', 'tai'),
('tcg', 'tdb'): ('tt',),
('tcg', 'ut1'): ('tt', 'tai', 'utc'),
('tcg', 'utc'): ('tt', 'tai'),
('tdb', 'ut1'): ('tt', 'tai', 'utc'),
('tdb', 'utc'): ('tt', 'tai'),
('tt', 'ut1'): ('tai', 'utc'),
('tt', 'utc'): ('tai',),
}
GEOCENTRIC_SCALES = ('tai', 'tt', 'tcg')
BARYCENTRIC_SCALES = ('tcb', 'tdb')
ROTATIONAL_SCALES = ('ut1',)
TIME_DELTA_TYPES = dict((scale, scales)
for scales in (GEOCENTRIC_SCALES, BARYCENTRIC_SCALES,
ROTATIONAL_SCALES, LOCAL_SCALES) for scale in scales)
TIME_DELTA_SCALES = GEOCENTRIC_SCALES + BARYCENTRIC_SCALES + ROTATIONAL_SCALES + LOCAL_SCALES
# For time scale changes, we need L_G and L_B, which are stored in erfam.h as
# /* L_G = 1 - d(TT)/d(TCG) */
# define ERFA_ELG (6.969290134e-10)
# /* L_B = 1 - d(TDB)/d(TCB), and TDB (s) at TAI 1977/1/1.0 */
# define ERFA_ELB (1.550519768e-8)
# These are exposed in erfa as erfa.ELG and erfa.ELB.
# Implied: d(TT)/d(TCG) = 1-L_G
# and d(TCG)/d(TT) = 1/(1-L_G) = 1 + (1-(1-L_G))/(1-L_G) = 1 + L_G/(1-L_G)
# scale offsets as second = first + first * scale_offset[(first,second)]
SCALE_OFFSETS = {('tt', 'tai'): None,
('tai', 'tt'): None,
('tcg', 'tt'): -erfa.ELG,
('tt', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcg', 'tai'): -erfa.ELG,
('tai', 'tcg'): erfa.ELG / (1. - erfa.ELG),
('tcb', 'tdb'): -erfa.ELB,
('tdb', 'tcb'): erfa.ELB / (1. - erfa.ELB)}
# triple-level dictionary, yay!
SIDEREAL_TIME_MODELS = {
'mean': {
'IAU2006': {'function': erfa.gmst06, 'scales': ('ut1', 'tt')},
'IAU2000': {'function': erfa.gmst00, 'scales': ('ut1', 'tt')},
'IAU1982': {'function': erfa.gmst82, 'scales': ('ut1',)}},
'apparent': {
'IAU2006A': {'function': erfa.gst06a, 'scales': ('ut1', 'tt')},
'IAU2000A': {'function': erfa.gst00a, 'scales': ('ut1', 'tt')},
'IAU2000B': {'function': erfa.gst00b, 'scales': ('ut1',)},
'IAU1994': {'function': erfa.gst94, 'scales': ('ut1',)}}}
class TimeInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # unit is read-only and None
attr_names = MixinInfo.attr_names | {'serialize_method'}
_supports_indexing = True
# The usual tuple of attributes needed for serialization is replaced
# by a property, since Time can be serialized different ways.
_represent_as_dict_extra_attrs = ('format', 'scale', 'precision',
'in_subfmt', 'out_subfmt', 'location',
'_delta_ut1_utc', '_delta_tdb_tt')
# When serializing, write out the `value` attribute using the column name.
_represent_as_dict_primary_data = 'value'
mask_val = np.ma.masked
@property
def _represent_as_dict_attrs(self):
method = self.serialize_method[self._serialize_context]
if method == 'formatted_value':
out = ('value',)
elif method == 'jd1_jd2':
out = ('jd1', 'jd2')
else:
raise ValueError("serialize method must be 'formatted_value' or 'jd1_jd2'")
return out + self._represent_as_dict_extra_attrs
def __init__(self, bound=False):
super().__init__(bound)
# If bound to a data object instance then create the dict of attributes
# which stores the info attribute values.
if bound:
# Specify how to serialize this object depending on context.
# If ``True`` for a context, then use formatted ``value`` attribute
# (e.g. the ISO time string). If ``False`` then use float jd1 and jd2.
self.serialize_method = {'fits': 'jd1_jd2',
'ecsv': 'formatted_value',
'hdf5': 'jd1_jd2',
'yaml': 'jd1_jd2',
None: 'jd1_jd2'}
@property
def unit(self):
return None
info_summary_stats = staticmethod(
data_info_factory(names=MixinInfo._stats,
funcs=[getattr(np, stat) for stat in MixinInfo._stats]))
# When Time has mean, std, min, max methods:
# funcs = [lambda x: getattr(x, stat)() for stat_name in MixinInfo._stats])
def _construct_from_dict_base(self, map):
if 'jd1' in map and 'jd2' in map:
format = map.pop('format')
map['format'] = 'jd'
map['val'] = map.pop('jd1')
map['val2'] = map.pop('jd2')
else:
format = map['format']
map['val'] = map.pop('value')
out = self._parent_cls(**map)
out.format = format
return out
def _construct_from_dict(self, map):
delta_ut1_utc = map.pop('_delta_ut1_utc', None)
delta_tdb_tt = map.pop('_delta_tdb_tt', None)
out = self._construct_from_dict_base(map)
if delta_ut1_utc is not None:
out._delta_ut1_utc = delta_ut1_utc
if delta_tdb_tt is not None:
out._delta_tdb_tt = delta_tdb_tt
return out
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Time instance which is consistent with the input Time objects
``cols`` and has ``length`` rows.
This is intended for creating an empty Time instance whose elements can
be set in-place for table operations like join or vstack. It checks
that the input locations and attributes are consistent. This is used
when a Time object is used as a mixin column in an astropy Table.
Parameters
----------
cols : list
List of input columns (Time objects)
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Time (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'description'))
attrs.pop('dtype') # Not relevant for Time
col0 = cols[0]
# Check that location is consistent for all Time objects
for col in cols[1:]:
# This is the method used by __setitem__ to ensure that the right side
# has a consistent location (and coerce data if necessary, but that does
# not happen in this case since `col` is already a Time object). If this
# passes then any subsequent table operations via setitem will work.
try:
col0._make_value_equivalent(slice(None), col)
except ValueError:
raise ValueError('input columns have inconsistent locations')
# Make a new Time object with the desired shape and attributes
shape = (length,) + attrs.pop('shape')
jd2000 = 2451544.5 # Arbitrary JD value J2000.0 that will work with ERFA
jd1 = np.full(shape, jd2000, dtype='f8')
jd2 = np.zeros(shape, dtype='f8')
tm_attrs = {attr: getattr(col0, attr)
for attr in ('scale', 'location',
'precision', 'in_subfmt', 'out_subfmt')}
out = self._parent_cls(jd1, jd2, format='jd', **tm_attrs)
out.format = col0.format
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class TimeDeltaInfo(TimeInfo):
_represent_as_dict_extra_attrs = ('format', 'scale')
def _construct_from_dict(self, map):
return self._construct_from_dict_base(map)
class Time(ShapedLikeNDArray):
"""
Represent and manipulate times and dates for astronomy.
A `Time` object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format`` and must correspond to the specified time ``scale``. The
optional ``val2`` time input should be supplied only for numeric input
formats (e.g. JD) where very high precision (better than 64-bit precision)
is required.
The allowed values for ``format`` can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
Parameters
----------
val : sequence, ndarray, number, str, bytes, or `~astropy.time.Time` object
Value(s) to initialize the time or times. Bytes are decoded as ascii.
val2 : sequence, ndarray, or number; optional
Value(s) to initialize the time or times. Only used for numerical
input, to help preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following:
('tai', 'tcb', 'tcg', 'tdb', 'tt', 'ut1', 'utc')
precision : int, optional
Digits of precision in string representation of time
in_subfmt : str, optional
Subformat for inputting string times
out_subfmt : str, optional
Subformat for outputting string times
location : `~astropy.coordinates.EarthLocation` or tuple, optional
If given as an tuple, it should be able to initialize an
an EarthLocation instance, i.e., either contain 3 items with units of
length for geocentric coordinates, or contain a longitude, latitude,
and an optional height for geodetic coordinates.
Can be a single location, or one for each input time.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_SCALES
"""List of time scales"""
FORMATS = TIME_FORMATS
"""Dict of time formats"""
# Make sure that reverse arithmetic (e.g., TimeDelta.__rmul__)
# gets called over the __mul__ of Numpy arrays.
__array_priority__ = 20000
# Declare that Time can be used as a Table column by defining the
# attribute where column attributes will be stored.
_astropy_column_attrs = None
def __new__(cls, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if isinstance(val, cls):
self = val.replicate(format=format, copy=copy)
else:
self = super().__new__(cls)
return self
def __getnewargs__(self):
return (self._time,)
def __init__(self, val, val2=None, format=None, scale=None,
precision=None, in_subfmt=None, out_subfmt=None,
location=None, copy=False):
if location is not None:
from ..coordinates import EarthLocation
if isinstance(location, EarthLocation):
self.location = location
else:
self.location = EarthLocation(*location)
if self.location.size == 1:
self.location = self.location.squeeze()
else:
self.location = None
if isinstance(val, self.__class__):
# Update _time formatting parameters if explicitly specified
if precision is not None:
self._time.precision = precision
if in_subfmt is not None:
self._time.in_subfmt = in_subfmt
if out_subfmt is not None:
self._time.out_subfmt = out_subfmt
self.SCALES = TIME_TYPES[self.scale]
if scale is not None:
self._set_scale(scale)
else:
self._init_from_vals(val, val2, format, scale, copy,
precision, in_subfmt, out_subfmt)
self.SCALES = TIME_TYPES[self.scale]
if self.location is not None and (self.location.size > 1 and
self.location.shape != self.shape):
try:
# check the location can be broadcast to self's shape.
self.location = np.broadcast_to(self.location, self.shape,
subok=True)
except Exception:
raise ValueError('The location with shape {0} cannot be '
'broadcast against time with shape {1}. '
'Typically, either give a single location or '
'one for each time.'
.format(self.location.shape, self.shape))
def _init_from_vals(self, val, val2, format, scale, copy,
precision=None, in_subfmt=None, out_subfmt=None):
"""
Set the internal _format, scale, and _time attrs from user
inputs. This handles coercion into the correct shapes and
some basic input validation.
"""
if precision is None:
precision = 3
if in_subfmt is None:
in_subfmt = '*'
if out_subfmt is None:
out_subfmt = '*'
# Coerce val into an array
val = _make_array(val, copy)
# If val2 is not None, ensure consistency
if val2 is not None:
val2 = _make_array(val2, copy)
try:
np.broadcast(val, val2)
except ValueError:
raise ValueError('Input val and val2 have inconsistent shape; '
'they cannot be broadcast together.')
if scale is not None:
if not (isinstance(scale, str) and
scale.lower() in self.SCALES):
raise ScaleValueError("Scale {0!r} is not in the allowed scales "
"{1}".format(scale,
sorted(self.SCALES)))
# If either of the input val, val2 are masked arrays then
# find the masked elements and fill them.
mask, val, val2 = _check_for_masked_and_fill(val, val2)
# Parse / convert input values into internal jd1, jd2 based on format
self._time = self._get_time_fmt(val, val2, format, scale,
precision, in_subfmt, out_subfmt)
self._format = self._time.name
# If any inputs were masked then masked jd2 accordingly. From above
# routine ``mask`` must be either Python bool False or an bool ndarray
# with shape broadcastable to jd2.
if mask is not False:
mask = np.broadcast_to(mask, self._time.jd2.shape)
self._time.jd2[mask] = np.nan
def _get_time_fmt(self, val, val2, format, scale,
precision, in_subfmt, out_subfmt):
"""
Given the supplied val, val2, format and scale try to instantiate
the corresponding TimeFormat class to convert the input values into
the internal jd1 and jd2.
If format is `None` and the input is a string-type or object array then
guess available formats and stop when one matches.
"""
if format is None and val.dtype.kind in ('S', 'U', 'O'):
formats = [(name, cls) for name, cls in self.FORMATS.items()
if issubclass(cls, TimeUnique)]
err_msg = ('any of the formats where the format keyword is '
'optional {0}'.format([name for name, cls in formats]))
# AstropyTime is a pseudo-format that isn't in the TIME_FORMATS registry,
# but try to guess it at the end.
formats.append(('astropy_time', TimeAstropyTime))
elif not (isinstance(format, str) and
format.lower() in self.FORMATS):
if format is None:
raise ValueError("No time format was given, and the input is "
"not unique")
else:
raise ValueError("Format {0!r} is not one of the allowed "
"formats {1}".format(format,
sorted(self.FORMATS)))
else:
formats = [(format, self.FORMATS[format])]
err_msg = 'the format class {0}'.format(format)
for format, FormatClass in formats:
try:
return FormatClass(val, val2, scale, precision, in_subfmt, out_subfmt)
except UnitConversionError:
raise
except (ValueError, TypeError):
pass
else:
raise ValueError('Input values did not match {0}'.format(err_msg))
@classmethod
def now(cls):
"""
Creates a new object corresponding to the instant in time this
method is called.
.. note::
"Now" is determined using the `~datetime.datetime.utcnow`
function, so its accuracy and precision is determined by that
function. Generally that means it is set by the accuracy of
your system clock.
Returns
-------
nowtime
A new `Time` object (or a subclass of `Time` if this is called from
such a subclass) at the current time.
"""
# call `utcnow` immediately to be sure it's ASAP
dtnow = datetime.utcnow()
return cls(val=dtnow, format='datetime', scale='utc')
info = TimeInfo()
@property
def writeable(self):
return self._time.jd1.flags.writeable & self._time.jd2.flags.writeable
@writeable.setter
def writeable(self, value):
self._time.jd1.flags.writeable = value
self._time.jd2.flags.writeable = value
@property
def format(self):
"""
Get or set time format.
The format defines the way times are represented when accessed via the
``.value`` attribute. By default it is the same as the format used for
initializing the `Time` instance, but it can be set to any other value
that could be used for initialization. These can be listed with::
>>> list(Time.FORMATS)
['jd', 'mjd', 'decimalyear', 'unix', 'cxcsec', 'gps', 'plot_date',
'datetime', 'iso', 'isot', 'yday', 'fits', 'byear', 'jyear', 'byear_str',
'jyear_str']
"""
return self._format
@format.setter
def format(self, format):
"""Set time format"""
if format not in self.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(self.FORMATS)))
format_cls = self.FORMATS[format]
# If current output subformat is not in the new format then replace
# with default '*'
if hasattr(format_cls, 'subfmts'):
subfmt_names = [subfmt[0] for subfmt in format_cls.subfmts]
if self.out_subfmt not in subfmt_names:
self.out_subfmt = '*'
self._time = format_cls(self._time.jd1, self._time.jd2,
self._time._scale, self.precision,
in_subfmt=self.in_subfmt,
out_subfmt=self.out_subfmt,
from_jd=True)
self._format = format
def __repr__(self):
return ("<{0} object: scale='{1}' format='{2}' value={3}>"
.format(self.__class__.__name__, self.scale, self.format,
getattr(self, self.format)))
def __str__(self):
return str(getattr(self, self.format))
@property
def scale(self):
"""Time scale"""
return self._time.scale
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# Determine the chain of scale transformations to get from the current
# scale to the new scale. MULTI_HOPS contains a dict of all
# transformations (xforms) that require intermediate xforms.
# The MULTI_HOPS dict is keyed by (sys1, sys2) in alphabetical order.
xform = (self.scale, scale)
xform_sort = tuple(sorted(xform))
multi = MULTI_HOPS.get(xform_sort, ())
xforms = xform_sort[:1] + multi + xform_sort[-1:]
# If we made the reverse xform then reverse it now.
if xform_sort != xform:
xforms = tuple(reversed(xforms))
# Transform the jd1,2 pairs through the chain of scale xforms.
jd1, jd2 = self._time.jd1, self._time.jd2_filled
for sys1, sys2 in zip(xforms[:-1], xforms[1:]):
# Some xforms require an additional delta_ argument that is
# provided through Time methods. These values may be supplied by
# the user or computed based on available approximations. The
# get_delta_ methods are available for only one combination of
# sys1, sys2 though the property applies for both xform directions.
args = [jd1, jd2]
for sys12 in ((sys1, sys2), (sys2, sys1)):
dt_method = '_get_delta_{0}_{1}'.format(*sys12)
try:
get_dt = getattr(self, dt_method)
except AttributeError:
pass
else:
args.append(get_dt(jd1, jd2))
break
conv_func = getattr(erfa, sys1 + sys2)
jd1, jd2 = conv_func(*args)
if self.masked:
jd2[self.mask] = np.nan
self._time = self.FORMATS[self.format](jd1, jd2, scale, self.precision,
self.in_subfmt, self.out_subfmt,
from_jd=True)
@property
def precision(self):
"""
Decimal precision when outputting seconds as floating point (int
value between 0 and 9 inclusive).
"""
return self._time.precision
@precision.setter
def precision(self, val):
del self.cache
if not isinstance(val, int) or val < 0 or val > 9:
raise ValueError('precision attribute must be an int between '
'0 and 9')
self._time.precision = val
@property
def in_subfmt(self):
"""
Unix wildcard pattern to select subformats for parsing string input
times.
"""
return self._time.in_subfmt
@in_subfmt.setter
def in_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('in_subfmt attribute must be a string')
self._time.in_subfmt = val
@property
def out_subfmt(self):
"""
Unix wildcard pattern to select subformats for outputting times.
"""
return self._time.out_subfmt
@out_subfmt.setter
def out_subfmt(self, val):
del self.cache
if not isinstance(val, str):
raise ValueError('out_subfmt attribute must be a string')
self._time.out_subfmt = val
@property
def shape(self):
"""The shape of the time instances.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of the ``jd1``, ``jd2``, ``location``,
``delta_ut1_utc``, or ``delta_tdb_tt`` attributes cannot be changed
without the arrays being copied. For these cases, use the
`Time.reshape` method (which copies any arrays that cannot be
reshaped in-place).
"""
return self._time.jd1.shape
@shape.setter
def shape(self, shape):
del self.cache
# We have to keep track of arrays that were already reshaped,
# since we may have to return those to their original shape if a later
# shape-setting fails.
reshaped = []
oldshape = self.shape
# In-place reshape of data/attributes. Need to access _time.jd1/2 not
# self.jd1/2 because the latter are not guaranteed to be the actual
# data, and in fact should not be directly changeable from the public
# API.
for obj, attr in ((self._time, 'jd1'),
(self._time, 'jd2'),
(self, '_delta_ut1_utc'),
(self, '_delta_tdb_tt'),
(self, 'location')):
val = getattr(obj, attr, None)
if val is not None and val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
def _shaped_like_input(self, value):
out = value
if not self._time.jd1.shape and not np.ma.is_masked(value):
out = value.item()
return out
@property
def jd1(self):
"""
First of the two doubles that internally store time value(s) in JD.
"""
jd1 = self._time.mask_if_needed(self._time.jd1)
return self._shaped_like_input(jd1)
@property
def jd2(self):
"""
Second of the two doubles that internally store time value(s) in JD.
"""
jd2 = self._time.mask_if_needed(self._time.jd2)
return self._shaped_like_input(jd2)
@property
def value(self):
"""Time value(s) in current format"""
# The underlying way to get the time values for the current format is:
# self._shaped_like_input(self._time.to_value(parent=self))
# This is done in __getattr__. By calling getattr(self, self.format)
# the ``value`` attribute is cached.
return getattr(self, self.format)
@property
def masked(self):
return self._time.masked
@property
def mask(self):
return self._time.mask
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent Time object"""
# If there is a vector location then broadcast to the Time shape
# and then select with ``item``
if self.location is not None and self.location.shape:
self_location = np.broadcast_to(self.location, self.shape, subok=True)[item]
else:
self_location = self.location
if isinstance(value, Time):
# Make sure locations are compatible. Location can be either None or
# a Location object.
if self_location is None and value.location is None:
match = True
elif ((self_location is None and value.location is not None) or
(self_location is not None and value.location is None)):
match = False
else:
match = np.all(self_location == value.location)
if not match:
raise ValueError('cannot set to Time with different location: '
'expected location={} and '
'got location={}'
.format(self_location, value.location))
else:
try:
value = self.__class__(value, scale=self.scale, location=self_location)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format,
location=self_location)
except Exception as err:
raise ValueError('cannot convert value to a compatible Time object: {}'
.format(err))
return value
def __setitem__(self, item, value):
if not self.writeable:
if self.shape:
raise ValueError('{} object is read-only. Make a '
'copy() or set "writeable" attribute to True.'
.format(self.__class__.__name__))
else:
raise ValueError('scalar {} object is read-only.'
.format(self.__class__.__name__))
# Any use of setitem results in immediate cache invalidation
del self.cache
# Setting invalidates transform deltas
for attr in ('_delta_tdb_tt', '_delta_ut1_utc'):
if hasattr(self, attr):
delattr(self, attr)
if value is np.ma.masked or value is np.nan:
self._time.jd2[item] = np.nan
return
value = self._make_value_equivalent(item, value)
# Finally directly set the jd1/2 values. Locations are known to match.
if self.scale is not None:
value = getattr(value, self.scale)
self._time.jd1[item] = value._time.jd1
self._time.jd2[item] = value._time.jd2
def light_travel_time(self, skycoord, kind='barycentric', location=None, ephemeris=None):
"""Light travel time correction to the barycentre or heliocentre.
The frame transformations used to calculate the location of the solar
system barycentre and the heliocentre rely on the erfa routine epv00,
which is consistent with the JPL DE405 ephemeris to an accuracy of
11.2 km, corresponding to a light travel time of 4 microseconds.
The routine assumes the source(s) are at large distance, i.e., neglects
finite-distance effects.
Parameters
----------
skycoord : `~astropy.coordinates.SkyCoord`
The sky location to calculate the correction for.
kind : str, optional
``'barycentric'`` (default) or ``'heliocentric'``
location : `~astropy.coordinates.EarthLocation`, optional
The location of the observatory to calculate the correction for.
If no location is given, the ``location`` attribute of the Time
object is used
ephemeris : str, optional
Solar system ephemeris to use (e.g., 'builtin', 'jpl'). By default,
use the one set with ``astropy.coordinates.solar_system_ephemeris.set``.
For more information, see `~astropy.coordinates.solar_system_ephemeris`.
Returns
-------
time_offset : `~astropy.time.TimeDelta`
The time offset between the barycentre or Heliocentre and Earth,
in TDB seconds. Should be added to the original time to get the
time in the Solar system barycentre or the Heliocentre.
Also, the time conversion to BJD will then include the relativistic correction as well.
"""
if kind.lower() not in ('barycentric', 'heliocentric'):
raise ValueError("'kind' parameter must be one of 'heliocentric' "
"or 'barycentric'")
if location is None:
if self.location is None:
raise ValueError('An EarthLocation needs to be set or passed '
'in to calculate bary- or heliocentric '
'corrections')
location = self.location
from ..coordinates import (UnitSphericalRepresentation, CartesianRepresentation,
HCRS, ICRS, GCRS, solar_system_ephemeris)
# ensure sky location is ICRS compatible
if not skycoord.is_transformable_to(ICRS()):
raise ValueError("Given skycoord is not transformable to the ICRS")
# get location of observatory in ITRS coordinates at this Time
try:
itrs = location.get_itrs(obstime=self)
except Exception:
raise ValueError("Supplied location does not have a valid `get_itrs` method")
with solar_system_ephemeris.set(ephemeris):
if kind.lower() == 'heliocentric':
# convert to heliocentric coordinates, aligned with ICRS
cpos = itrs.transform_to(HCRS(obstime=self)).cartesian.xyz
else:
# first we need to convert to GCRS coordinates with the correct
# obstime, since ICRS coordinates have no frame time
gcrs_coo = itrs.transform_to(GCRS(obstime=self))
# convert to barycentric (BCRS) coordinates, aligned with ICRS
cpos = gcrs_coo.transform_to(ICRS()).cartesian.xyz
# get unit ICRS vector to star
spos = (skycoord.icrs.represent_as(UnitSphericalRepresentation).
represent_as(CartesianRepresentation).xyz)
# Move X,Y,Z to last dimension, to enable possible broadcasting below.
cpos = np.rollaxis(cpos, 0, cpos.ndim)
spos = np.rollaxis(spos, 0, spos.ndim)
# calculate light travel time correction
tcor_val = (spos * cpos).sum(axis=-1) / const.c
return TimeDelta(tcor_val, scale='tdb')
def sidereal_time(self, kind, longitude=None, model=None):
"""Calculate sidereal time.
Parameters
---------------
kind : str
``'mean'`` or ``'apparent'``, i.e., accounting for precession
only, or also for nutation.
longitude : `~astropy.units.Quantity`, `str`, or `None`; optional
The longitude on the Earth at which to compute the sidereal time.
Can be given as a `~astropy.units.Quantity` with angular units
(or an `~astropy.coordinates.Angle` or
`~astropy.coordinates.Longitude`), or as a name of an
observatory (currently, only ``'greenwich'`` is supported,
equivalent to 0 deg). If `None` (default), the ``lon`` attribute of
the Time object is used.
model : str or `None`; optional
Precession (and nutation) model to use. The available ones are:
- {0}: {1}
- {2}: {3}
If `None` (default), the last (most recent) one from the appropriate
list above is used.
Returns
-------
sidereal time : `~astropy.coordinates.Longitude`
Sidereal time as a quantity with units of hourangle
""" # docstring is formatted below
from ..coordinates import Longitude
if kind.lower() not in SIDEREAL_TIME_MODELS.keys():
raise ValueError('The kind of sidereal time has to be {0}'.format(
' or '.join(sorted(SIDEREAL_TIME_MODELS.keys()))))
available_models = SIDEREAL_TIME_MODELS[kind.lower()]
if model is None:
model = sorted(available_models.keys())[-1]
else:
if model.upper() not in available_models:
raise ValueError(
'Model {0} not implemented for {1} sidereal time; '
'available models are {2}'
.format(model, kind, sorted(available_models.keys())))
if longitude is None:
if self.location is None:
raise ValueError('No longitude is given but the location for '
'the Time object is not set.')
longitude = self.location.lon
elif longitude == 'greenwich':
longitude = Longitude(0., u.degree,
wrap_angle=180.*u.degree)
else:
# sanity check on input
longitude = Longitude(longitude, u.degree,
wrap_angle=180.*u.degree)
gst = self._erfa_sidereal_time(available_models[model.upper()])
return Longitude(gst + longitude, u.hourangle)
if isinstance(sidereal_time.__doc__, str):
sidereal_time.__doc__ = sidereal_time.__doc__.format(
'apparent', sorted(SIDEREAL_TIME_MODELS['apparent'].keys()),
'mean', sorted(SIDEREAL_TIME_MODELS['mean'].keys()))
def _erfa_sidereal_time(self, model):
"""Calculate a sidereal time using a IAU precession/nutation model."""
from ..coordinates import Longitude
erfa_function = model['function']
erfa_parameters = [getattr(getattr(self, scale)._time, jd_part)
for scale in model['scales']
for jd_part in ('jd1', 'jd2_filled')]
sidereal_time = erfa_function(*erfa_parameters)
if self.masked:
sidereal_time[self.mask] = np.nan
return Longitude(sidereal_time, u.radian).to(u.hourangle)
def copy(self, format=None):
"""
Return a fully independent copy the Time object, optionally changing
the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
In this method a full copy of the internal time arrays will be made.
The internal time arrays are normally not changeable by the user so in
most cases the ``replicate()`` method should be used.
Parameters
----------
format : str, optional
Time format of the copy.
Returns
-------
tm : Time object
Copy of this object
"""
return self._apply('copy', format=format)
def replicate(self, format=None, copy=False):
"""
Return a replica of the Time object, optionally changing the format.
If ``format`` is supplied then the time format of the returned Time
object will be set accordingly, otherwise it will be unchanged from the
original.
If ``copy`` is set to `True` then a full copy of the internal time arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory. The internal time arrays
are normally not changeable by the user so in most cases it should not
be necessary to set ``copy`` to `True`.
The convenience method copy() is available in which ``copy`` is `True`
by default.
Parameters
----------
format : str, optional
Time format of the replica.
copy : bool, optional
Return a true copy instead of using references where possible.
Returns
-------
tm : Time object
Replica of this object
"""
return self._apply('copy' if copy else 'replicate', format=format)
def _apply(self, method, *args, format=None, **kwargs):
"""Create a new time object, possibly applying a method to the arrays.
Parameters
----------
method : str or callable
If string, can be 'replicate' or the name of a relevant
`~numpy.ndarray` method. In the former case, a new time instance
with unchanged internal data is created, while in the latter the
method is applied to the internal ``jd1`` and ``jd2`` arrays, as
well as to possible ``location``, ``_delta_ut1_utc``, and
``_delta_tdb_tt`` arrays.
If a callable, it is directly applied to the above arrays.
Examples: 'copy', '__getitem__', 'reshape', `~numpy.broadcast_to`.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``. If the ``format`` keyword
argument is present, this will be used as the Time format of the
replica.
Examples
--------
Some ways this is used internally::
copy : ``_apply('copy')``
replicate : ``_apply('replicate')``
reshape : ``_apply('reshape', new_shape)``
index or slice : ``_apply('__getitem__', item)``
broadcast : ``_apply(np.broadcast, shape=new_shape)``
"""
new_format = self.format if format is None else format
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
if method == 'replicate':
apply_method = None
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
jd1, jd2 = self._time.jd1, self._time.jd2
if apply_method:
jd1 = apply_method(jd1)
jd2 = apply_method(jd2)
# Get a new instance of our class and set its attributes directly.
tm = super().__new__(self.__class__)
tm._time = TimeJD(jd1, jd2, self.scale, self.precision,
self.in_subfmt, self.out_subfmt, from_jd=True)
# Optional ndarray attributes.
for attr in ('_delta_ut1_utc', '_delta_tdb_tt', 'location',
'precision', 'in_subfmt', 'out_subfmt'):
try:
val = getattr(self, attr)
except AttributeError:
continue
if apply_method:
# Apply the method to any value arrays (though skip if there is
# only a single element and the method would return a view,
# since in that case nothing would change).
if getattr(val, 'size', 1) > 1:
val = apply_method(val)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
val = copy.copy(val)
setattr(tm, attr, val)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
tm.info = self.info
# Make the new internal _time object corresponding to the format
# in the copy. If the format is unchanged this process is lightweight
# and does not create any new arrays.
if new_format not in tm.FORMATS:
raise ValueError('format must be one of {0}'
.format(list(tm.FORMATS)))
NewFormat = tm.FORMATS[new_format]
tm._time = NewFormat(tm._time.jd1, tm._time.jd2,
tm._time._scale, tm.precision,
tm.in_subfmt, tm.out_subfmt,
from_jd=True)
tm._format = new_format
tm.SCALES = self.SCALES
return tm
def __copy__(self):
"""
Overrides the default behavior of the `copy.copy` function in
the python stdlib to behave like `Time.copy`. Does *not* make a
copy of the JD arrays - only copies by reference.
"""
return self.replicate()
def __deepcopy__(self, memo):
"""
Overrides the default behavior of the `copy.deepcopy` function
in the python stdlib to behave like `Time.copy`. Does make a
copy of the JD arrays.
"""
return self.copy()
def _advanced_index(self, indices, axis=None, keepdims=False):
"""Turn argmin, argmax output into an advanced index.
Argmin, argmax output contains indices along a given axis in an array
shaped like the other dimensions. To use this to get values at the
correct location, a list is constructed in which the other axes are
indexed sequentially. For ``keepdims`` is ``True``, the net result is
the same as constructing an index grid with ``np.ogrid`` and then
replacing the ``axis`` item with ``indices`` with its shaped expanded
at ``axis``. For ``keepdims`` is ``False``, the result is the same but
with the ``axis`` dimension removed from all list entries.
For ``axis`` is ``None``, this calls :func:`~numpy.unravel_index`.
Parameters
----------
indices : array
Output of argmin or argmax.
axis : int or None
axis along which argmin or argmax was used.
keepdims : bool
Whether to construct indices that keep or remove the axis along
which argmin or argmax was used. Default: ``False``.
Returns
-------
advanced_index : list of arrays
Suitable for use as an advanced index.
"""
if axis is None:
return np.unravel_index(indices, self.shape)
ndim = self.ndim
if axis < 0:
axis = axis + ndim
if keepdims and indices.ndim < self.ndim:
indices = np.expand_dims(indices, axis)
return [(indices if i == axis else np.arange(s).reshape(
(1,)*(i if keepdims or i < axis else i-1) + (s,) +
(1,)*(ndim-i-(1 if keepdims or i > axis else 2))))
for i, s in enumerate(self.shape)]
def argmin(self, axis=None, out=None):
"""Return indices of the minimum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmin`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmin` for detailed documentation.
"""
# first get the minimum at normal precision.
jd = self.jd1 + self.jd2
approx = np.min(jd, axis, keepdims=True)
# Approx is very close to the true minimum, and by subtracting it at
# full precision, all numbers near 0 can be represented correctly,
# so we can be sure we get the true minimum.
# The below is effectively what would be done for
# dt = (self - self.__class__(approx, format='jd')).jd
# which translates to:
# approx_jd1, approx_jd2 = day_frac(approx, 0.)
# dt = (self.jd1 - approx_jd1) + (self.jd2 - approx_jd2)
dt = (self.jd1 - approx) + self.jd2
return dt.argmin(axis, out)
def argmax(self, axis=None, out=None):
"""Return indices of the maximum values along the given axis.
This is similar to :meth:`~numpy.ndarray.argmax`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used. See :func:`~numpy.argmax` for detailed documentation.
"""
# For procedure, see comment on argmin.
jd = self.jd1 + self.jd2
approx = np.max(jd, axis, keepdims=True)
dt = (self.jd1 - approx) + self.jd2
return dt.argmax(axis, out)
def argsort(self, axis=-1):
"""Returns the indices that would sort the time array.
This is similar to :meth:`~numpy.ndarray.argsort`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied. Internally,
it uses :func:`~numpy.lexsort`, and hence no sort method can be chosen.
"""
jd_approx = self.jd
jd_remainder = (self - self.__class__(jd_approx, format='jd')).jd
if axis is None:
return np.lexsort((jd_remainder.ravel(), jd_approx.ravel()))
else:
return np.lexsort(keys=(jd_remainder, jd_approx), axis=axis)
def min(self, axis=None, out=None, keepdims=False):
"""Minimum along a given axis.
This is similar to :meth:`~numpy.ndarray.min`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.min``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmin(axis), axis, keepdims)]
def max(self, axis=None, out=None, keepdims=False):
"""Maximum along a given axis.
This is similar to :meth:`~numpy.ndarray.max`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used, and that corresponding attributes are copied.
Note that the ``out`` argument is present only for compatibility with
``np.max``; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return self[self._advanced_index(self.argmax(axis), axis, keepdims)]
def ptp(self, axis=None, out=None, keepdims=False):
"""Peak to peak (maximum - minimum) along a given axis.
This is similar to :meth:`~numpy.ndarray.ptp`, but adapted to ensure
that the full precision given by the two doubles ``jd1`` and ``jd2``
is used.
Note that the ``out`` argument is present only for compatibility with
`~numpy.ptp`; since `Time` instances are immutable, it is not possible
to have an actual ``out`` to store the result in.
"""
if out is not None:
raise ValueError("Since `Time` instances are immutable, ``out`` "
"cannot be set to anything but ``None``.")
return (self.max(axis, keepdims=keepdims) -
self.min(axis, keepdims=keepdims))
def sort(self, axis=-1):
"""Return a copy sorted along the specified axis.
This is similar to :meth:`~numpy.ndarray.sort`, but internally uses
indexing with :func:`~numpy.lexsort` to ensure that the full precision
given by the two doubles ``jd1`` and ``jd2`` is kept, and that
corresponding attributes are properly sorted and copied as well.
Parameters
----------
axis : int or None
Axis to be sorted. If ``None``, the flattened array is sorted.
By default, sort over the last axis.
"""
return self[self._advanced_index(self.argsort(axis), axis,
keepdims=True)]
@property
def cache(self):
"""
Return the cache associated with this instance.
"""
return self._time.cache
@cache.deleter
def cache(self):
del self._time.cache
def __getattr__(self, attr):
"""
Get dynamic attributes to output format or do timescale conversion.
"""
if attr in self.SCALES and self.scale is not None:
cache = self.cache['scale']
if attr not in cache:
if attr == self.scale:
tm = self
else:
tm = self.replicate()
tm._set_scale(attr)
if tm.shape:
# Prevent future modification of cached array-like object
tm.writeable = False
cache[attr] = tm
return cache[attr]
elif attr in self.FORMATS:
cache = self.cache['format']
if attr not in cache:
if attr == self.format:
tm = self
else:
tm = self.replicate(format=attr)
value = tm._shaped_like_input(tm._time.to_value(parent=tm))
cache[attr] = value
return cache[attr]
elif attr in TIME_SCALES: # allowed ones done above (self.SCALES)
if self.scale is None:
raise ScaleValueError("Cannot convert TimeDelta with "
"undefined scale to any defined scale.")
else:
raise ScaleValueError("Cannot convert {0} with scale "
"'{1}' to scale '{2}'"
.format(self.__class__.__name__,
self.scale, attr))
else:
# Should raise AttributeError
return self.__getattribute__(attr)
@override__dir__
def __dir__(self):
result = set(self.SCALES)
result.update(self.FORMATS)
return result
def _match_shape(self, val):
"""
Ensure that `val` is matched to length of self. If val has length 1
then broadcast, otherwise cast to double and make sure shape matches.
"""
val = _make_array(val, copy=True) # be conservative and copy
if val.size > 1 and val.shape != self.shape:
try:
# check the value can be broadcast to the shape of self.
val = np.broadcast_to(val, self.shape, subok=True)
except Exception:
raise ValueError('Attribute shape must match or be '
'broadcastable to that of Time object. '
'Typically, give either a single value or '
'one for each time.')
return val
def get_delta_ut1_utc(self, iers_table=None, return_status=False):
"""Find UT1 - UTC differences by interpolating in IERS Table.
Parameters
----------
iers_table : ``astropy.utils.iers.IERS`` table, optional
Table containing UT1-UTC differences from IERS Bulletins A
and/or B. If `None`, use default version (see
``astropy.utils.iers``)
return_status : bool
Whether to return status values. If `False` (default), iers
raises `IndexError` if any time is out of the range
covered by the IERS table.
Returns
-------
ut1_utc : float or float array
UT1-UTC, interpolated in IERS Table
status : int or int array
Status values (if ``return_status=`True```)::
``astropy.utils.iers.FROM_IERS_B``
``astropy.utils.iers.FROM_IERS_A``
``astropy.utils.iers.FROM_IERS_A_PREDICTION``
``astropy.utils.iers.TIME_BEFORE_IERS_RANGE``
``astropy.utils.iers.TIME_BEYOND_IERS_RANGE``
Notes
-----
In normal usage, UT1-UTC differences are calculated automatically
on the first instance ut1 is needed.
Examples
--------
To check in code whether any times are before the IERS table range::
>>> from astropy.utils.iers import TIME_BEFORE_IERS_RANGE
>>> t = Time(['1961-01-01', '2000-01-01'], scale='utc')
>>> delta, status = t.get_delta_ut1_utc(return_status=True)
>>> status == TIME_BEFORE_IERS_RANGE
array([ True, False]...)
"""
if iers_table is None:
from ..utils.iers import IERS
iers_table = IERS.open()
return iers_table.ut1_utc(self.utc, return_status=return_status)
# Property for ERFA DUT arg = UT1 - UTC
def _get_delta_ut1_utc(self, jd1=None, jd2=None):
"""
Get ERFA DUT arg = UT1 - UTC. This getter takes optional jd1 and
jd2 args because it gets called that way when converting time scales.
If delta_ut1_utc is not yet set, this will interpolate them from the
the IERS table.
"""
# Sec. 4.3.1: the arg DUT is the quantity delta_UT1 = UT1 - UTC in
# seconds. It is obtained from tables published by the IERS.
if not hasattr(self, '_delta_ut1_utc'):
from ..utils.iers import IERS_Auto
iers_table = IERS_Auto.open()
# jd1, jd2 are normally set (see above), except if delta_ut1_utc
# is access directly; ensure we behave as expected for that case
if jd1 is None:
self_utc = self.utc
jd1, jd2 = self_utc._time.jd1, self_utc._time.jd2_filled
scale = 'utc'
else:
scale = self.scale
# interpolate UT1-UTC in IERS table
delta = iers_table.ut1_utc(jd1, jd2)
# if we interpolated using UT1 jds, we may be off by one
# second near leap seconds (and very slightly off elsewhere)
if scale == 'ut1':
# calculate UTC using the offset we got; the ERFA routine
# is tolerant of leap seconds, so will do this right
jd1_utc, jd2_utc = erfa.ut1utc(jd1, jd2, delta.to_value(u.s))
# calculate a better estimate using the nearly correct UTC
delta = iers_table.ut1_utc(jd1_utc, jd2_utc)
self._set_delta_ut1_utc(delta)
return self._delta_ut1_utc
def _set_delta_ut1_utc(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_ut1_utc = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_ut1_utc = property(_get_delta_ut1_utc, _set_delta_ut1_utc)
"""UT1 - UTC time scale offset"""
# Property for ERFA DTR arg = TDB - TT
def _get_delta_tdb_tt(self, jd1=None, jd2=None):
if not hasattr(self, '_delta_tdb_tt'):
# If jd1 and jd2 are not provided (which is the case for property
# attribute access) then require that the time scale is TT or TDB.
# Otherwise the computations here are not correct.
if jd1 is None or jd2 is None:
if self.scale not in ('tt', 'tdb'):
raise ValueError('Accessing the delta_tdb_tt attribute '
'is only possible for TT or TDB time '
'scales')
else:
jd1 = self._time.jd1
jd2 = self._time.jd2_filled
# First go from the current input time (which is either
# TDB or TT) to an approximate UT1. Since TT and TDB are
# pretty close (few msec?), assume TT. Similarly, since the
# UT1 terms are very small, use UTC instead of UT1.
njd1, njd2 = erfa.tttai(jd1, jd2)
njd1, njd2 = erfa.taiutc(njd1, njd2)
# subtract 0.5, so UT is fraction of the day from midnight
ut = day_frac(njd1 - 0.5, njd2)[1]
if self.location is None:
from ..coordinates import EarthLocation
location = EarthLocation.from_geodetic(0., 0., 0.)
else:
location = self.location
# Geodetic params needed for d_tdb_tt()
lon = location.lon
rxy = np.hypot(location.x, location.y)
z = location.z
self._delta_tdb_tt = erfa.dtdb(
jd1, jd2, ut, lon.to_value(u.radian),
rxy.to_value(u.km), z.to_value(u.km))
return self._delta_tdb_tt
def _set_delta_tdb_tt(self, val):
del self.cache
if hasattr(val, 'to'): # Matches Quantity but also TimeDelta.
val = val.to(u.second).value
val = self._match_shape(val)
self._delta_tdb_tt = val
# Note can't use @property because _get_delta_tdb_tt is explicitly
# called with the optional jd1 and jd2 args.
delta_tdb_tt = property(_get_delta_tdb_tt, _set_delta_tdb_tt)
"""TDB - TT time scale offset"""
def __sub__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta - something is dealt with in TimeDelta, so we have
# T - Tdelta = T
# T - T = Tdelta
other_is_delta = isinstance(other, TimeDelta)
# we need a constant scale to calculate, which is guaranteed for
# TimeDelta, but not for Time (which can be UTC)
if other_is_delta: # T - Tdelta
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot subtract Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
else: # T - T
# the scales should be compatible (e.g., cannot convert TDB to LOCAL)
if other.scale not in self.SCALES:
raise TypeError("Cannot subtract Time instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
self_time = (self._time if self.scale in TIME_DELTA_SCALES
else self.tai._time)
# set up TimeDelta, subtraction to be done shortly
out = TimeDelta(self_time.jd1, self_time.jd2, format='jd',
scale=self_time.scale)
if other.scale != out.scale:
other = getattr(other, out.scale)
jd1 = out._time.jd1 - other._time.jd1
jd2 = out._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
if other_is_delta:
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __add__(self, other):
if not isinstance(other, Time):
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# Tdelta + something is dealt with in TimeDelta, so we have
# T + Tdelta = T
# T + T = error
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '+')
# ideally, we calculate in the scale of the Time item, since that is
# what we want the output in, but this may not be possible, since
# TimeDelta cannot be converted arbitrarily
out = self.replicate()
if self.scale in other.SCALES:
if other.scale not in (out.scale, None):
other = getattr(other, out.scale)
else:
if other.scale is None:
out._set_scale('tai')
else:
if self.scale not in TIME_TYPES[other.scale]:
raise TypeError("Cannot add Time and TimeDelta instances "
"with scales '{0}' and '{1}'"
.format(self.scale, other.scale))
out._set_scale(other.scale)
# remove attributes that are invalidated by changing time
for attr in ('_delta_ut1_utc', '_delta_tdb_tt'):
if hasattr(out, attr):
delattr(out, attr)
jd1 = out._time.jd1 + other._time.jd1
jd2 = out._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
# Go back to left-side scale if needed
out._set_scale(self.scale)
return out
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
out = self.__sub__(other)
return -out
def _time_comparison(self, other, op):
"""If other is of same class as self, compare difference in self.scale.
Otherwise, return NotImplemented
"""
if other.__class__ is not self.__class__:
try:
other = self.__class__(other, scale=self.scale)
except Exception:
# Let other have a go.
return NotImplemented
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
# Other will also not be able to do it, so raise a TypeError
# immediately, allowing us to explain why it doesn't work.
raise TypeError("Cannot compare {0} instances with scales "
"'{1}' and '{2}'".format(self.__class__.__name__,
self.scale, other.scale))
if self.scale is not None and other.scale is not None:
other = getattr(other, self.scale)
return op((self.jd1 - other.jd1) + (self.jd2 - other.jd2), 0.)
def __lt__(self, other):
return self._time_comparison(other, operator.lt)
def __le__(self, other):
return self._time_comparison(other, operator.le)
def __eq__(self, other):
"""
If other is an incompatible object for comparison, return `False`.
Otherwise, return `True` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.eq)
def __ne__(self, other):
"""
If other is an incompatible object for comparison, return `True`.
Otherwise, return `False` if the time difference between self and
other is zero.
"""
return self._time_comparison(other, operator.ne)
def __gt__(self, other):
return self._time_comparison(other, operator.gt)
def __ge__(self, other):
return self._time_comparison(other, operator.ge)
def to_datetime(self, timezone=None):
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.to_value(timezone))
to_datetime.__doc__ = TimeDatetime.to_value.__doc__
class TimeDelta(Time):
"""
Represent the time difference between two times.
A TimeDelta object is initialized with one or more times in the ``val``
argument. The input times in ``val`` must conform to the specified
``format``. The optional ``val2`` time input should be supplied only for
numeric input formats (e.g. JD) where very high precision (better than
64-bit precision) is required.
The allowed values for ``format`` can be listed with::
>>> list(TimeDelta.FORMATS)
['sec', 'jd', 'datetime']
Note that for time differences, the scale can be among three groups:
geocentric ('tai', 'tt', 'tcg'), barycentric ('tcb', 'tdb'), and rotational
('ut1'). Within each of these, the scales for time differences are the
same. Conversion between geocentric and barycentric is possible, as there
is only a scale factor change, but one cannot convert to or from 'ut1', as
this requires knowledge of the actual times, not just their difference. For
a similar reason, 'utc' is not a valid scale for a time difference: a UTC
day is not always 86400 seconds.
Parameters
----------
val : sequence, ndarray, number, `~astropy.units.Quantity` or `~astropy.time.TimeDelta` object
Value(s) to initialize the time difference(s). Any quantities will
be converted appropriately (with care taken to avoid rounding
errors for regular time units).
val2 : sequence, ndarray, number, or `~astropy.units.Quantity`; optional
Additional values, as needed to preserve precision.
format : str, optional
Format of input value(s)
scale : str, optional
Time scale of input value(s), must be one of the following values:
('tdb', 'tt', 'ut1', 'tcg', 'tcb', 'tai'). If not given (or
``None``), the scale is arbitrary; when added or subtracted from a
``Time`` instance, it will be used without conversion.
copy : bool, optional
Make a copy of the input values
"""
SCALES = TIME_DELTA_SCALES
"""List of time delta scales."""
FORMATS = TIME_DELTA_FORMATS
"""Dict of time delta formats."""
info = TimeDeltaInfo()
def __init__(self, val, val2=None, format=None, scale=None, copy=False):
if isinstance(val, timedelta) and not format:
format = 'datetime'
if isinstance(val, TimeDelta):
if scale is not None:
self._set_scale(scale)
else:
if format is None:
format = 'jd'
self._init_from_vals(val, val2, format, scale, copy)
if scale is not None:
self.SCALES = TIME_DELTA_TYPES[scale]
def replicate(self, *args, **kwargs):
out = super().replicate(*args, **kwargs)
out.SCALES = self.SCALES
return out
def to_datetime(self):
"""
Convert to ``datetime.timedelta`` object.
"""
tm = self.replicate(format='datetime')
return tm._shaped_like_input(tm._time.value)
def _set_scale(self, scale):
"""
This is the key routine that actually does time scale conversions.
This is not public and not connected to the read-only scale property.
"""
if scale == self.scale:
return
if scale not in self.SCALES:
raise ValueError("Scale {0!r} is not in the allowed scales {1}"
.format(scale, sorted(self.SCALES)))
# For TimeDelta, there can only be a change in scale factor,
# which is written as time2 - time1 = scale_offset * time1
scale_offset = SCALE_OFFSETS[(self.scale, scale)]
if scale_offset is None:
self._time.scale = scale
else:
jd1, jd2 = self._time.jd1, self._time.jd2
offset1, offset2 = day_frac(jd1, jd2, factor=scale_offset)
self._time = self.FORMATS[self.format](
jd1 + offset1, jd2 + offset2, scale,
self.precision, self.in_subfmt,
self.out_subfmt, from_jd=True)
def __add__(self, other):
# only deal with TimeDelta + TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
return other.__add__(self)
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot add TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 + other._time.jd1
jd2 = self._time.jd2 + other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __sub__(self, other):
# only deal with TimeDelta - TimeDelta
if isinstance(other, Time):
if not isinstance(other, TimeDelta):
raise OperandTypeError(self, other, '-')
else:
try:
other = TimeDelta(other)
except Exception:
return NotImplemented
# the scales should be compatible (e.g., cannot convert TDB to TAI)
if(self.scale is not None and self.scale not in other.SCALES or
other.scale is not None and other.scale not in self.SCALES):
raise TypeError("Cannot subtract TimeDelta instances with scales "
"'{0}' and '{1}'".format(self.scale, other.scale))
# adjust the scale of other if the scale of self is set (or no scales)
if self.scale is not None or other.scale is None:
out = self.replicate()
if other.scale is not None:
other = getattr(other, self.scale)
else:
out = other.replicate()
jd1 = self._time.jd1 - other._time.jd1
jd2 = self._time.jd2 - other._time.jd2
out._time.jd1, out._time.jd2 = day_frac(jd1, jd2)
return out
def __neg__(self):
"""Negation of a `TimeDelta` object."""
new = self.copy()
new._time.jd1 = -self._time.jd1
new._time.jd2 = -self._time.jd2
return new
def __abs__(self):
"""Absolute value of a `TimeDelta` object."""
jd1, jd2 = self._time.jd1, self._time.jd2
negative = jd1 + jd2 < 0
new = self.copy()
new._time.jd1 = np.where(negative, -jd1, jd1)
new._time.jd2 = np.where(negative, -jd2, jd2)
return new
def __mul__(self, other):
"""Multiplication of `TimeDelta` objects by numbers/arrays."""
# check needed since otherwise the self.jd1 * other multiplication
# would enter here again (via __rmul__)
if isinstance(other, Time):
raise OperandTypeError(self, other, '*')
try: # convert to straight float if dimensionless quantity
other = other.to(1)
except Exception:
pass
try:
jd1, jd2 = day_frac(self.jd1, self.jd2, factor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) * other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rmul__(self, other):
"""Multiplication of numbers/arrays with `TimeDelta` objects."""
return self.__mul__(other)
def __div__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
return self.__truediv__(other)
def __rdiv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return self.__rtruediv__(other)
def __truediv__(self, other):
"""Division of `TimeDelta` objects by numbers/arrays."""
# cannot do __mul__(1./other) as that looses precision
try:
other = other.to(1)
except Exception:
pass
try: # convert to straight float if dimensionless quantity
jd1, jd2 = day_frac(self.jd1, self.jd2, divisor=other)
out = TimeDelta(jd1, jd2, format='jd', scale=self.scale)
except Exception as err: # try downgrading self to a quantity
try:
return self.to(u.day) / other
except Exception:
raise err
if self.format != 'jd':
out = out.replicate(format=self.format)
return out
def __rtruediv__(self, other):
"""Division by `TimeDelta` objects of numbers/arrays."""
return other / self.to(u.day)
def to(self, *args, **kwargs):
return u.Quantity(self._time.jd1 + self._time.jd2,
u.day).to(*args, **kwargs)
def _make_value_equivalent(self, item, value):
"""Coerce setitem value into an equivalent TimeDelta object"""
if not isinstance(value, TimeDelta):
try:
value = self.__class__(value, scale=self.scale)
except Exception:
try:
value = self.__class__(value, scale=self.scale, format=self.format)
except Exception as err:
raise ValueError('cannot convert value to a compatible TimeDelta '
'object: {}'.format(err))
return value
class ScaleValueError(Exception):
pass
def _make_array(val, copy=False):
"""
Take ``val`` and convert/reshape to an array. If ``copy`` is `True`
then copy input values.
Returns
-------
val : ndarray
Array version of ``val``.
"""
val = np.array(val, copy=copy, subok=True)
# Allow only float64, string or object arrays as input
# (object is for datetime, maybe add more specific test later?)
# This also ensures the right byteorder for float64 (closes #2942).
if not (val.dtype == np.float64 or val.dtype.kind in 'OSUa'):
val = np.asanyarray(val, dtype=np.float64)
return val
def _check_for_masked_and_fill(val, val2):
"""
If ``val`` or ``val2`` are masked arrays then fill them and cast
to ndarray.
Returns a mask corresponding to the logical-or of masked elements
in ``val`` and ``val2``. If neither is masked then the return ``mask``
is ``None``.
If either ``val`` or ``val2`` are masked then they are replaced
with filled versions of themselves.
Parameters
----------
val : ndarray or MaskedArray
Input val
val2 : ndarray or MaskedArray
Input val2
Returns
-------
mask, val, val2: ndarray or None
Mask: (None or bool ndarray), val, val2: ndarray
"""
def get_as_filled_ndarray(mask, val):
"""
Fill the given MaskedArray ``val`` from the first non-masked
element in the array. This ensures that upstream Time initialization
will succeed.
Note that nothing happens if there are no masked elements.
"""
fill_value = None
if np.any(val.mask):
# Final mask is the logical-or of inputs
mask = mask | val.mask
# First unmasked element. If all elements are masked then
# use fill_value=None from above which will use val.fill_value.
# As long as the user has set this appropriately then all will
# be fine.
val_unmasked = val.compressed() # 1-d ndarray of unmasked values
if len(val_unmasked) > 0:
fill_value = val_unmasked[0]
# Fill the input ``val``. If fill_value is None then this just returns
# an ndarray view of val (no copy).
val = val.filled(fill_value)
return mask, val
mask = False
if isinstance(val, np.ma.MaskedArray):
mask, val = get_as_filled_ndarray(mask, val)
if isinstance(val2, np.ma.MaskedArray):
mask, val2 = get_as_filled_ndarray(mask, val2)
return mask, val, val2
class OperandTypeError(TypeError):
def __init__(self, left, right, op=None):
op_string = '' if op is None else ' for {0}'.format(op)
super().__init__(
"Unsupported operand type(s){0}: "
"'{1}' and '{2}'".format(op_string,
left.__class__.__name__,
right.__class__.__name__))
|
d7085731c27893e89dca5c8b60f08804c682930b8200b7641925c7aa076135a7 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from .core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
def can_have_arbitrary_unit(value):
"""Test whether the items in value can have arbitrary units
Numbers whose value does not change upon a unit change, i.e.,
zero, infinity, or not-a-number
Parameters
----------
value : number or array
Returns
-------
`True` if each member is either zero or not finite, `False` otherwise
"""
return np.all(np.logical_or(np.equal(value, 0.), ~np.isfinite(value)))
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from .si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from .si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from .si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from .si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
def helper_degree_to_dimensionless(f, unit):
from .si import degree
try:
return [get_converter(unit, degree)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_degree_minute_second_to_radian(f, unit1, unit2, unit3):
from .si import degree, arcmin, arcsec, radian
try:
return [get_converter(unit1, degree),
get_converter(unit2, arcmin),
get_converter(unit3, arcsec)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UFUNC_HELPERS = {}
UNSUPPORTED_UFUNCS = {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# As found out in gh-7058, some numpy 1.13 conda installations also provide
# np.erf, even though upstream doesn't have it. We include it if present.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
# UFUNCS FROM SCIPY.SPECIAL
# available ufuncs in this module are at
# https://docs.scipy.org/doc/scipy/reference/special.html
try:
import scipy
import scipy.special as sps
except ImportError:
pass
else:
from ..utils import minversion
# ufuncs that require dimensionless input and give dimensionless output
dimensionless_to_dimensionless_sps_ufuncs = [
sps.erf, sps.gamma, sps.gammasgn,
sps.psi, sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz,
sps.dawsn, sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2,
sps.exp10, sps.j0, sps.j1, sps.y0, sps.y1, sps.i0, sps.i0e, sps.i1,
sps.i1e, sps.k0, sps.k0e, sps.k1, sps.k1e, sps.itj0y0,
sps.it2j0y0, sps.iti0k0, sps.it2i0k0]
# TODO: Revert https://github.com/astropy/astropy/pull/7219 when astropy
# requires scipy>=0.18.
# See https://github.com/astropy/astropy/issues/7159
if minversion(scipy, "0.18"):
dimensionless_to_dimensionless_sps_ufuncs.append(sps.loggamma)
for ufunc in dimensionless_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require input in degrees and give dimensionless output
degree_to_dimensionless_sps_ufuncs = (
sps.cosdg, sps.sindg, sps.tandg, sps.cotdg)
for ufunc in degree_to_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_dimensionless
# ufuncs that require 2 dimensionless inputs and give dimensionless output.
# note: sps.jv and sps.jn are aliases in some scipy versions, which will
# cause the same key to be written twice, but since both are handled by the
# same helper there is no harm done.
two_arg_dimensionless_sps_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
for ufunc in two_arg_dimensionless_sps_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[sps.cbrt] = helper_cbrt
UFUNC_HELPERS[sps.radian] = helper_degree_minute_second_to_radian
def converters_and_unit(function, method, *args):
"""Determine the required converters and the unit of the ufunc result.
Converters are functions required to convert to a ufunc's expected unit,
e.g., radian for np.sin; or to ensure units of two inputs are consistent,
e.g., for np.add. In these examples, the unit of the result would be
dimensionless_unscaled for np.sin, and the same consistent unit for np.add.
Parameters
----------
function : `~numpy.ufunc`
Numpy universal function
method : str
Method with which the function is evaluated, e.g.,
'__call__', 'reduce', etc.
*args : Quantity or other ndarray subclass
Input arguments to the function
Raises
------
TypeError : when the specified function cannot be used with Quantities
(e.g., np.logical_or), or when the routine does not know how to handle
the specified function (in which case an issue should be raised on
https://github.com/astropy/astropy).
UnitTypeError : when the conversion to the required (or consistent) units
is not possible.
"""
# Check whether we support this ufunc, by getting the helper function
# (defined above) which returns a list of function(s) that convert the
# input(s) to the unit required for the ufunc, as well as the unit the
# result will have (a tuple of units if there are multiple outputs).
try:
ufunc_helper = UFUNC_HELPERS[function]
except KeyError:
if function in UNSUPPORTED_UFUNCS:
raise TypeError("Cannot use function '{0}' with quantities"
.format(function.__name__))
else:
raise TypeError("Unknown ufunc {0}. Please raise issue on "
"https://github.com/astropy/astropy"
.format(function.__name__))
if method == '__call__' or (method == 'outer' and function.nin == 2):
# Find out the units of the arguments passed to the ufunc; usually,
# at least one is a quantity, but for two-argument ufuncs, the second
# could also be a Numpy array, etc. These are given unit=None.
units = [getattr(arg, 'unit', None) for arg in args]
# Determine possible conversion functions, and the result unit.
converters, result_unit = ufunc_helper(function, *units)
if any(converter is False for converter in converters):
# for two-argument ufuncs with a quantity and a non-quantity,
# the quantity normally needs to be dimensionless, *except*
# if the non-quantity can have arbitrary unit, i.e., when it
# is all zero, infinity or NaN. In that case, the non-quantity
# can just have the unit of the quantity
# (this allows, e.g., `q > 0.` independent of unit)
maybe_arbitrary_arg = args[converters.index(False)]
try:
if can_have_arbitrary_unit(maybe_arbitrary_arg):
converters = [None, None]
else:
raise UnitsError("Can only apply '{0}' function to "
"dimensionless quantities when other "
"argument is not a quantity (unless the "
"latter is all zero/infinity/nan)"
.format(function.__name__))
except TypeError:
# _can_have_arbitrary_unit failed: arg could not be compared
# with zero or checked to be finite. Then, ufunc will fail too.
raise TypeError("Unsupported operand type(s) for ufunc {0}: "
"'{1}' and '{2}'"
.format(function.__name__,
args[0].__class__.__name__,
args[1].__class__.__name__))
# In the case of np.power and np.float_power, the unit itself needs to
# be modified by an amount that depends on one of the input values,
# so we need to treat this as a special case.
# TODO: find a better way to deal with this.
if result_unit is False:
if units[0] is None or units[0] == dimensionless_unscaled:
result_unit = dimensionless_unscaled
else:
if units[1] is None:
p = args[1]
else:
p = args[1].to(dimensionless_unscaled).value
try:
result_unit = units[0] ** p
except ValueError as exc:
# Changing the unit does not work for, e.g., array-shaped
# power, but this is OK if we're (scaled) dimensionless.
try:
converters[0] = units[0]._get_converter(
dimensionless_unscaled)
except UnitConversionError:
raise exc
else:
result_unit = dimensionless_unscaled
else: # methods for which the unit should stay the same
nin = function.nin
unit = getattr(args[0], 'unit', None)
if method == 'at' and nin <= 2:
if nin == 1:
units = [unit]
else:
units = [unit, getattr(args[2], 'unit', None)]
converters, result_unit = ufunc_helper(function, *units)
# ensure there is no 'converter' for indices (2nd argument)
converters.insert(1, None)
elif method in {'reduce', 'accumulate', 'reduceat'} and nin == 2:
converters, result_unit = ufunc_helper(function, unit, unit)
converters = converters[:1]
if method == 'reduceat':
# add 'scale' for indices (2nd argument)
converters += [None]
else:
if method in {'reduce', 'accumulate',
'reduceat', 'outer'} and nin != 2:
raise ValueError("{0} only supported for binary functions"
.format(method))
raise TypeError("Unexpected ufunc method {0}. If this should "
"work, please raise an issue on"
"https://github.com/astropy/astropy"
.format(method))
# for all but __call__ method, scaling is not allowed
if unit is not None and result_unit is None:
raise TypeError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as the result is not a "
"Quantity.".format(function.__name__, method))
if (converters[0] is not None or
(unit is not None and unit is not result_unit and
(not result_unit.is_equivalent(unit) or
result_unit.to(unit) != 1.))):
raise UnitsError("Cannot use '{1}' method on ufunc {0} with a "
"Quantity instance as it would change the unit."
.format(function.__name__, method))
return converters, result_unit
def check_output(output, unit, inputs, function=None):
"""Check that function output can be stored in the output array given.
Parameters
----------
output : array or `~astropy.units.Quantity` or tuple
Array that should hold the function output (or tuple of such arrays).
unit : `~astropy.units.Unit` or None, or tuple
Unit that the output will have, or `None` for pure numbers (should be
tuple of same if output is a tuple of outputs).
inputs : tuple
Any input arguments. These should be castable to the output.
function : callable
The function that will be producing the output. If given, used to
give a more informative error message.
Returns
-------
arrays : `~numpy.ndarray` view of ``output`` (or tuple of such views).
Raises
------
UnitTypeError : If ``unit`` is inconsistent with the class of ``output``
TypeError : If the ``inputs`` cannot be cast safely to ``output``.
"""
if isinstance(output, tuple):
return tuple(check_output(output_, unit_, inputs, function)
for output_, unit_ in zip(output, unit))
# ``None`` indicates no actual array is needed. This can happen, e.g.,
# with np.modf(a, out=(None, b)).
if output is None:
return None
if hasattr(output, '__quantity_subclass__'):
# Check that we're not trying to store a plain Numpy array or a
# Quantity with an inconsistent unit (e.g., not angular for Angle).
if unit is None:
raise TypeError("Cannot store non-quantity output{0} in {1} "
"instance".format(
(" from {0} function".format(function.__name__)
if function is not None else ""),
type(output)))
if output.__quantity_subclass__(unit)[0] is not type(output):
raise UnitTypeError(
"Cannot store output with unit '{0}'{1} "
"in {2} instance. Use {3} instance instead."
.format(unit, (" from {0} function".format(function.__name__)
if function is not None else ""), type(output),
output.__quantity_subclass__(unit)[0]))
# Turn into ndarray, so we do not loop into array_wrap/array_ufunc
# if the output is used to store results of a function.
output = output.view(np.ndarray)
else:
# output is not a Quantity, so cannot obtain a unit.
if not (unit is None or unit is dimensionless_unscaled):
raise UnitTypeError("Cannot store quantity with dimension "
"{0}in a non-Quantity instance."
.format("" if function is None else
"resulting from {0} function "
.format(function.__name__)))
# check we can handle the dtype (e.g., that we are not int
# when float is required).
if not np.can_cast(np.result_type(*inputs), output.dtype,
casting='same_kind'):
raise TypeError("Arguments cannot be cast safely to inplace "
"output with dtype={0}".format(output.dtype))
return output
|
4e54158302f498d168dc179040b825846ee3d360875146fab41a55133fcef20a | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines the `Quantity` object, which represents a number with some
associated units. `Quantity` objects support operations like ordinary numbers,
but will deal with unit conversions internally.
"""
# Standard library
import re
import numbers
from fractions import Fraction
import warnings
import numpy as np
# AstroPy
from .core import (Unit, dimensionless_unscaled, get_current_unit_registry,
UnitBase, UnitsError, UnitTypeError)
from .utils import is_effectively_unity
from .format.latex import Latex
from ..utils.compat import NUMPY_LT_1_14
from ..utils.compat.misc import override__dir__
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils.misc import isiterable, InheritDocstrings
from ..utils.data_info import ParentDtypeInfo
from .. import config as _config
from .quantity_helper import (converters_and_unit, can_have_arbitrary_unit,
check_output)
__all__ = ["Quantity", "SpecificTypeQuantity",
"QuantityInfoBase", "QuantityInfo", "allclose", "isclose"]
# We don't want to run doctests in the docstrings we inherit from Numpy
__doctest_skip__ = ['Quantity.*']
_UNIT_NOT_INITIALISED = "(Unit not initialised)"
_UFUNCS_FILTER_WARNINGS = {np.arcsin, np.arccos, np.arccosh, np.arctanh}
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for Quantity
"""
latex_array_threshold = _config.ConfigItem(100,
'The maximum size an array Quantity can be before its LaTeX '
'representation for IPython gets "summarized" (meaning only the first '
'and last few elements are shown with "..." between). Setting this to a '
'negative number means that the value will instead be whatever numpy '
'gets from get_printoptions.')
conf = Conf()
class QuantityIterator:
"""
Flat iterator object to iterate over Quantities
A `QuantityIterator` iterator is returned by ``q.flat`` for any Quantity
``q``. It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
Quantity.flatten : Returns a flattened copy of an array.
Notes
-----
`QuantityIterator` is inspired by `~numpy.ma.core.MaskedIterator`. It
is not exported by the `~astropy.units` module. Instead of
instantiating a `QuantityIterator` directly, use `Quantity.flat`.
"""
def __init__(self, q):
self._quantity = q
self._dataiter = q.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
# For single elements, ndarray.flat.__getitem__ returns scalars; these
# need a new view as a Quantity.
if isinstance(out, type(self._quantity)):
return out
else:
return self._quantity._new_view(out)
def __setitem__(self, index, value):
self._dataiter[index] = self._quantity._to_own_unit(value)
def __next__(self):
"""
Return the next value, or raise StopIteration.
"""
out = next(self._dataiter)
# ndarray.flat._dataiter returns scalars, so need a view as a Quantity.
return self._quantity._new_view(out)
next = __next__
class QuantityInfoBase(ParentDtypeInfo):
# This is on a base class rather than QuantityInfo directly, so that
# it can be used for EarthLocationInfo yet make clear that that class
# should not be considered a typical Quantity subclass by Table.
attrs_from_parent = {'dtype', 'unit'} # dtype and unit taken from parent
_supports_indexing = True
@staticmethod
def default_format(val):
return '{0.value:}'.format(val)
@staticmethod
def possible_string_format_functions(format_):
"""Iterate through possible string-derived format functions.
A string can either be a format specifier for the format built-in,
a new-style format string, or an old-style format string.
This method is overridden in order to suppress printing the unit
in each row since it is already at the top in the column header.
"""
yield lambda format_, val: format(val.value, format_)
yield lambda format_, val: format_.format(val.value)
yield lambda format_, val: format_ % val.value
class QuantityInfo(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ('value', 'unit')
_construct_from_dict_args = ['value']
_represent_as_dict_primary_data = 'value'
def new_like(self, cols, length, metadata_conflicts='warn', name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str
Output column name
Returns
-------
col : Quantity (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(cols, metadata_conflicts, name,
('meta', 'format', 'description'))
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop('shape')
dtype = attrs.pop('dtype')
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {key: (data if key == 'value' else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs}
map['copy'] = False
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
class Quantity(np.ndarray, metaclass=InheritDocstrings):
"""A `~astropy.units.Quantity` represents a number with some associated unit.
Parameters
----------
value : number, `~numpy.ndarray`, `Quantity` object (sequence), str
The numerical value of this quantity in the units given by unit. If a
`Quantity` or sequence of them (or any other valid object with a
``unit`` attribute), creates a new `Quantity` object, converting to
`unit` units as needed. If a string, it is converted to a number or
`Quantity`, depending on whether a unit is present.
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit associated with the input value.
Must be an `~astropy.units.UnitBase` object or a string parseable by
the :mod:`~astropy.units` package.
dtype : ~numpy.dtype, optional
The dtype of the resulting Numpy array or scalar that will
hold the value. If not provided, it is determined from the input,
except that any input that cannot represent float (integer and bool)
is converted to float.
copy : bool, optional
If `True` (default), then the value is copied. Otherwise, a copy will
only be made if ``__array__`` returns a copy, if value is a nested
sequence, or if a copy is needed to satisfy an explicitly given
``dtype``. (The `False` option is intended mostly for internal use,
to speed up initialization where a copy is known to have been made.
Use with care.)
order : {'C', 'F', 'A'}, optional
Specify the order of the array. As in `~numpy.array`. This parameter
is ignored if the input is a `Quantity` and ``copy=False``.
subok : bool, optional
If `False` (default), the returned array will be forced to be a
`Quantity`. Otherwise, `Quantity` subclasses will be passed through,
or a subclass appropriate for the unit will be used (such as
`~astropy.units.Dex` for ``u.dex(u.AA)``).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting array
should have. Ones will be pre-pended to the shape as needed to meet
this requirement. This parameter is ignored if the input is a
`Quantity` and ``copy=False``.
Raises
------
TypeError
If the value provided is not a Python numeric type.
TypeError
If the unit provided is not either a :class:`~astropy.units.Unit`
object or a parseable string unit.
Notes
-----
Quantities can also be created by multiplying a number or array with a
:class:`~astropy.units.Unit`. See http://docs.astropy.org/en/latest/units/
"""
# Need to set a class-level default for _equivalencies, or
# Constants can not initialize properly
_equivalencies = []
# Default unit for initialization; can be overridden by subclasses,
# possibly to `None` to indicate there is no default unit.
_default_unit = dimensionless_unscaled
# Ensures views have an undefined unit.
_unit = None
__array_priority__ = 10000
def __new__(cls, value, unit=None, dtype=None, copy=True, order=None,
subok=False, ndmin=0):
if unit is not None:
# convert unit first, to avoid multiple string->unit conversions
unit = Unit(unit)
# if we allow subclasses, allow a class from the unit.
if subok:
qcls = getattr(unit, '_quantity_class', cls)
if issubclass(qcls, cls):
cls = qcls
# optimize speed for Quantity with no dtype given, copy=False
if isinstance(value, Quantity):
if unit is not None and unit is not value.unit:
value = value.to(unit)
# the above already makes a copy (with float dtype)
copy = False
if type(value) is not cls and not (subok and
isinstance(value, cls)):
value = value.view(cls)
if dtype is None:
if not copy:
return value
if not np.can_cast(np.float32, value.dtype):
dtype = float
return np.array(value, dtype=dtype, copy=copy, order=order,
subok=True, ndmin=ndmin)
# Maybe str, or list/tuple of Quantity? If so, this may set value_unit.
# To ensure array remains fast, we short-circuit it.
value_unit = None
if not isinstance(value, np.ndarray):
if isinstance(value, str):
# The first part of the regex string matches any integer/float;
# the second parts adds possible trailing .+-, which will break
# the float function below and ensure things like 1.2.3deg
# will not work.
pattern = (r'\s*[+-]?'
r'((\d+\.?\d*)|(\.\d+)|([nN][aA][nN])|'
r'([iI][nN][fF]([iI][nN][iI][tT][yY]){0,1}))'
r'([eE][+-]?\d+)?'
r'[.+-]?')
v = re.match(pattern, value)
unit_string = None
try:
value = float(v.group())
except Exception:
raise TypeError('Cannot parse "{0}" as a {1}. It does not '
'start with a number.'
.format(value, cls.__name__))
unit_string = v.string[v.end():].strip()
if unit_string:
value_unit = Unit(unit_string)
if unit is None:
unit = value_unit # signal no conversion needed below.
elif (isiterable(value) and len(value) > 0 and
all(isinstance(v, Quantity) for v in value)):
# Convert all quantities to the same unit.
if unit is None:
unit = value[0].unit
value = [q.to_value(unit) for q in value]
value_unit = unit # signal below that conversion has been done
if value_unit is None:
# If the value has a `unit` attribute and if not None
# (for Columns with uninitialized unit), treat it like a quantity.
value_unit = getattr(value, 'unit', None)
if value_unit is None:
# Default to dimensionless for no (initialized) unit attribute.
if unit is None:
unit = cls._default_unit
value_unit = unit # signal below that no conversion is needed
else:
try:
value_unit = Unit(value_unit)
except Exception as exc:
raise TypeError("The unit attribute {0!r} of the input could "
"not be parsed as an astropy Unit, raising "
"the following exception:\n{1}"
.format(value.unit, exc))
if unit is None:
unit = value_unit
elif unit is not value_unit:
copy = False # copy will be made in conversion at end
value = np.array(value, dtype=dtype, copy=copy, order=order,
subok=False, ndmin=ndmin)
# check that array contains numbers or long int objects
if (value.dtype.kind in 'OSU' and
not (value.dtype.kind == 'O' and
isinstance(value.item(() if value.ndim == 0 else 0),
numbers.Number))):
raise TypeError("The value must be a valid Python or "
"Numpy numeric type.")
# by default, cast any integer, boolean, etc., to float
if dtype is None and (not np.can_cast(np.float32, value.dtype)
or value.dtype.kind == 'O'):
value = value.astype(float)
value = value.view(cls)
value._set_unit(value_unit)
if unit is value_unit:
return value
else:
# here we had non-Quantity input that had a "unit" attribute
# with a unit different from the desired one. So, convert.
return value.to(unit)
def __array_finalize__(self, obj):
# If we're a new object or viewing an ndarray, nothing has to be done.
if obj is None or obj.__class__ is np.ndarray:
return
# If our unit is not set and obj has a valid one, use it.
if self._unit is None:
unit = getattr(obj, '_unit', None)
if unit is not None:
self._set_unit(unit)
# Copy info if the original had `info` defined. Because of the way the
# DataInfo works, `'info' in obj.__dict__` is False until the
# `info` attribute is accessed or set.
if 'info' in obj.__dict__:
self.info = obj.info
def __array_wrap__(self, obj, context=None):
if context is None:
# Methods like .squeeze() created a new `ndarray` and then call
# __array_wrap__ to turn the array into self's subclass.
return self._new_view(obj)
raise NotImplementedError('__array_wrap__ should not be used '
'with a context any more, since we require '
'numpy >=1.13. Please raise an issue on '
'https://github.com/astropy/astropy')
def __array_ufunc__(self, function, method, *inputs, **kwargs):
"""Wrap numpy ufuncs, taking care of units.
Parameters
----------
function : callable
ufunc to wrap.
method : str
Ufunc method: ``__call__``, ``at``, ``reduce``, etc.
inputs : tuple
Input arrays.
kwargs : keyword arguments
As passed on, with ``out`` containing possible quantity output.
Returns
-------
result : `~astropy.units.Quantity`
Results of the ufunc, with the unit set properly.
"""
# Determine required conversion functions -- to bring the unit of the
# input to that expected (e.g., radian for np.sin), or to get
# consistent units between two inputs (e.g., in np.add) --
# and the unit of the result (or tuple of units for nout > 1).
converters, unit = converters_and_unit(function, method, *inputs)
out = kwargs.get('out', None)
# Avoid loop back by turning any Quantity output into array views.
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
if function.nout == 1:
out = out[0]
out_array = check_output(out, unit, inputs, function=function)
# Ensure output argument remains a tuple.
kwargs['out'] = (out_array,) if function.nout == 1 else out_array
# Same for inputs, but here also convert if necessary.
arrays = [(converter(input_.value) if converter else
getattr(input_, 'value', input_))
for input_, converter in zip(inputs, converters)]
# Call our superclass's __array_ufunc__
result = super().__array_ufunc__(function, method, *arrays, **kwargs)
# If unit is None, a plain array is expected (e.g., comparisons), which
# means we're done.
# We're also done if the result was None (for method 'at') or
# NotImplemented, which can happen if other inputs/outputs override
# __array_ufunc__; hopefully, they can then deal with us.
if unit is None or result is None or result is NotImplemented:
return result
return self._result_as_quantity(result, unit, out)
def _result_as_quantity(self, result, unit, out):
"""Turn result into a quantity with the given unit.
If no output is given, it will take a view of the array as a quantity,
and set the unit. If output is given, those should be quantity views
of the result arrays, and the function will just set the unit.
Parameters
----------
result : `~numpy.ndarray` or tuple of `~numpy.ndarray`
Array(s) which need to be turned into quantity.
unit : `~astropy.units.Unit` or None
Unit for the quantities to be returned (or `None` if the result
should not be a quantity). Should be tuple if result is a tuple.
out : `~astropy.units.Quantity` or None
Possible output quantity. Should be `None` or a tuple if result
is a tuple.
Returns
-------
out : `~astropy.units.Quantity`
With units set.
"""
if isinstance(result, tuple):
if out is None:
out = (None,) * len(result)
return tuple(self._result_as_quantity(result_, unit_, out_)
for (result_, unit_, out_) in
zip(result, unit, out))
if out is None:
# View the result array as a Quantity with the proper unit.
return result if unit is None else self._new_view(result, unit)
# For given output, just set the unit. We know the unit is not None and
# the output is of the correct Quantity subclass, as it was passed
# through check_output.
out._set_unit(unit)
return out
def __quantity_subclass__(self, unit):
"""
Overridden by subclasses to change what kind of view is
created based on the output unit of an operation.
Parameters
----------
unit : UnitBase
The unit for which the appropriate class should be returned
Returns
-------
tuple :
- `Quantity` subclass
- bool: True if subclasses of the given class are ok
"""
return Quantity, True
def _new_view(self, obj=None, unit=None):
"""
Create a Quantity view of some array-like input, and set the unit
By default, return a view of ``obj`` of the same class as ``self`` and
with the same unit. Subclasses can override the type of class for a
given unit using ``__quantity_subclass__``, and can ensure properties
other than the unit are copied using ``__array_finalize__``.
If the given unit defines a ``_quantity_class`` of which ``self``
is not an instance, a view using this class is taken.
Parameters
----------
obj : ndarray or scalar, optional
The array to create a view of. If obj is a numpy or python scalar,
it will be converted to an array scalar. By default, ``self``
is converted.
unit : `UnitBase`, or anything convertible to a :class:`~astropy.units.Unit`, optional
The unit of the resulting object. It is used to select a
subclass, and explicitly assigned to the view if given.
If not given, the subclass and unit will be that of ``self``.
Returns
-------
view : Quantity subclass
"""
# Determine the unit and quantity subclass that we need for the view.
if unit is None:
unit = self.unit
quantity_subclass = self.__class__
else:
# In principle, could gain time by testing unit is self.unit
# as well, and then quantity_subclass = self.__class__, but
# Constant relies on going through `__quantity_subclass__`.
unit = Unit(unit)
quantity_subclass = getattr(unit, '_quantity_class', Quantity)
if isinstance(self, quantity_subclass):
quantity_subclass, subok = self.__quantity_subclass__(unit)
if subok:
quantity_subclass = self.__class__
# We only want to propagate information from ``self`` to our new view,
# so obj should be a regular array. By using ``np.array``, we also
# convert python and numpy scalars, which cannot be viewed as arrays
# and thus not as Quantity either, to zero-dimensional arrays.
# (These are turned back into scalar in `.value`)
# Note that for an ndarray input, the np.array call takes only double
# ``obj.__class is np.ndarray``. So, not worth special-casing.
if obj is None:
obj = self.view(np.ndarray)
else:
obj = np.array(obj, copy=False)
# Take the view, set the unit, and update possible other properties
# such as ``info``, ``wrap_angle`` in `Longitude`, etc.
view = obj.view(quantity_subclass)
view._set_unit(unit)
view.__array_finalize__(self)
return view
def _set_unit(self, unit):
"""Set the unit.
This is used anywhere the unit is set or modified, i.e., in the
initilizer, in ``__imul__`` and ``__itruediv__`` for in-place
multiplication and division by another unit, as well as in
``__array_finalize__`` for wrapping up views. For Quantity, it just
sets the unit, but subclasses can override it to check that, e.g.,
a unit is consistent.
"""
if not isinstance(unit, UnitBase):
# Trying to go through a string ensures that, e.g., Magnitudes with
# dimensionless physical unit become Quantity with units of mag.
unit = Unit(str(unit), parse_strict='silent')
if not isinstance(unit, UnitBase):
raise UnitTypeError(
"{0} instances require {1} units, not {2} instances."
.format(type(self).__name__, UnitBase, type(unit)))
self._unit = unit
def __deepcopy__(self, memo):
# If we don't define this, ``copy.deepcopy(quantity)`` will
# return a bare Numpy array.
return self.copy()
def __reduce__(self):
# patch to pickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
object_state = list(super().__reduce__())
object_state[2] = (object_state[2], self.__dict__)
return tuple(object_state)
def __setstate__(self, state):
# patch to unpickle Quantity objects (ndarray subclasses), see
# http://www.mail-archive.com/[email protected]/msg02446.html
nd_state, own_state = state
super().__setstate__(nd_state)
self.__dict__.update(own_state)
info = QuantityInfo()
def _to_value(self, unit, equivalencies=[]):
"""Helper method for to and to_value."""
if equivalencies == []:
equivalencies = self._equivalencies
return self.unit.to(unit, self.view(np.ndarray),
equivalencies=equivalencies)
def to(self, unit, equivalencies=[]):
"""
Return a new `~astropy.units.Quantity` object with the specified unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance, str
An object that represents the unit to convert to. Must be
an `~astropy.units.UnitBase` object or a string parseable
by the `~astropy.units` package.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not
directly convertible. See :ref:`unit_equivalencies`.
If not provided or ``[]``, class default equivalencies will be used
(none for `~astropy.units.Quantity`, but may be set for subclasses)
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
See also
--------
to_value : get the numerical value in a given unit.
"""
# We don't use `to_value` below since we always want to make a copy
# and don't want to slow down this method (esp. the scalar case).
unit = Unit(unit)
return self._new_view(self._to_value(unit, equivalencies), unit)
def to_value(self, unit=None, equivalencies=[]):
"""
The numerical value, possibly in a different unit.
Parameters
----------
unit : `~astropy.units.UnitBase` instance or str, optional
The unit in which the value should be given. If not given or `None`,
use the current unit.
equivalencies : list of equivalence pairs, optional
A list of equivalence pairs to try if the units are not directly
convertible (see :ref:`unit_equivalencies`). If not provided or
``[]``, class default equivalencies will be used (none for
`~astropy.units.Quantity`, but may be set for subclasses).
If `None`, no equivalencies will be applied at all, not even any
set globally or within a context.
Returns
-------
value : `~numpy.ndarray` or scalar
The value in the units specified. For arrays, this will be a view
of the data if no unit conversion was necessary.
See also
--------
to : Get a new instance in a different unit.
"""
if unit is None or unit is self.unit:
value = self.view(np.ndarray)
else:
unit = Unit(unit)
# We want a view if the unit does not change. One could check
# with "==", but that calculates the scale that we need anyway.
# TODO: would be better for `unit.to` to have an in-place flag.
try:
scale = self.unit._to(unit)
except Exception:
# Short-cut failed; try default (maybe equivalencies help).
value = self._to_value(unit, equivalencies)
else:
value = self.view(np.ndarray)
if not is_effectively_unity(scale):
# not in-place!
value = value * scale
return value if self.shape else value.item()
value = property(to_value,
doc="""The numerical value of this instance.
See also
--------
to_value : Get the numerical value in a given unit.
""")
@property
def unit(self):
"""
A `~astropy.units.UnitBase` object representing the unit of this
quantity.
"""
return self._unit
@property
def equivalencies(self):
"""
A list of equivalencies that will be applied by default during
unit conversions.
"""
return self._equivalencies
@property
def si(self):
"""
Returns a copy of the current `Quantity` instance with SI units. The
value of the resulting object will be scaled.
"""
si_unit = self.unit.si
return self._new_view(self.value * si_unit.scale,
si_unit / si_unit.scale)
@property
def cgs(self):
"""
Returns a copy of the current `Quantity` instance with CGS units. The
value of the resulting object will be scaled.
"""
cgs_unit = self.unit.cgs
return self._new_view(self.value * cgs_unit.scale,
cgs_unit / cgs_unit.scale)
@property
def isscalar(self):
"""
True if the `value` of this quantity is a scalar, or False if it
is an array-like object.
.. note::
This is subtly different from `numpy.isscalar` in that
`numpy.isscalar` returns False for a zero-dimensional array
(e.g. ``np.array(1)``), while this is True for quantities,
since quantities cannot represent true numpy scalars.
"""
return not self.shape
# This flag controls whether convenience conversion members, such
# as `q.m` equivalent to `q.to_value(u.m)` are available. This is
# not turned on on Quantity itself, but is on some subclasses of
# Quantity, such as `astropy.coordinates.Angle`.
_include_easy_conversion_members = False
@override__dir__
def __dir__(self):
"""
Quantities are able to directly convert to other units that
have the same physical type. This function is implemented in
order to make autocompletion still work correctly in IPython.
"""
if not self._include_easy_conversion_members:
return []
extra_members = set()
equivalencies = Unit._normalize_equivalencies(self.equivalencies)
for equivalent in self.unit._get_units_with_same_physical_type(
equivalencies):
extra_members.update(equivalent.names)
return extra_members
def __getattr__(self, attr):
"""
Quantities are able to directly convert to other units that
have the same physical type.
"""
if not self._include_easy_conversion_members:
raise AttributeError(
"'{0}' object has no '{1}' member".format(
self.__class__.__name__,
attr))
def get_virtual_unit_attribute():
registry = get_current_unit_registry().registry
to_unit = registry.get(attr, None)
if to_unit is None:
return None
try:
return self.unit.to(
to_unit, self.value, equivalencies=self.equivalencies)
except UnitsError:
return None
value = get_virtual_unit_attribute()
if value is None:
raise AttributeError(
"{0} instance has no attribute '{1}'".format(
self.__class__.__name__, attr))
else:
return value
# Equality (return False if units do not match) needs to be handled
# explicitly for numpy >=1.9, since it no longer traps errors.
def __eq__(self, other):
try:
try:
return super().__eq__(other)
except DeprecationWarning:
# We treat the DeprecationWarning separately, since it may
# mask another Exception. But we do not want to just use
# np.equal, since super's __eq__ treats recarrays correctly.
return np.equal(self, other)
except UnitsError:
return False
except TypeError:
return NotImplemented
def __ne__(self, other):
try:
try:
return super().__ne__(other)
except DeprecationWarning:
return np.not_equal(self, other)
except UnitsError:
return True
except TypeError:
return NotImplemented
# Arithmetic operations
def __mul__(self, other):
""" Multiplication between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), other * self.unit)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__mul__(other)
def __imul__(self, other):
"""In-place multiplication between `Quantity` objects and others."""
if isinstance(other, (UnitBase, str)):
self._set_unit(other * self.unit)
return self
return super().__imul__(other)
def __rmul__(self, other):
""" Right Multiplication between `Quantity` objects and other
objects.
"""
return self.__mul__(other)
def __truediv__(self, other):
""" Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
try:
return self._new_view(self.copy(), self.unit / other)
except UnitsError: # let other try to deal with it
return NotImplemented
return super().__truediv__(other)
def __itruediv__(self, other):
"""Inplace division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
self._set_unit(self.unit / other)
return self
return super().__itruediv__(other)
def __rtruediv__(self, other):
""" Right Division between `Quantity` objects and other objects."""
if isinstance(other, (UnitBase, str)):
return self._new_view(1. / self.value, other / self.unit)
return super().__rtruediv__(other)
def __div__(self, other):
""" Division between `Quantity` objects. """
return self.__truediv__(other)
def __idiv__(self, other):
""" Division between `Quantity` objects. """
return self.__itruediv__(other)
def __rdiv__(self, other):
""" Division between `Quantity` objects. """
return self.__rtruediv__(other)
def __pow__(self, other):
if isinstance(other, Fraction):
# Avoid getting object arrays by raising the value to a Fraction.
return self._new_view(self.value ** float(other),
self.unit ** other)
return super().__pow__(other)
# For Py>=3.5
def __matmul__(self, other, reverse=False):
result_unit = self.unit * getattr(other, 'unit', dimensionless_unscaled)
result_array = np.matmul(self.value, getattr(other, 'value', other))
return self._new_view(result_array, result_unit)
def __rmatmul__(self, other):
result_unit = self.unit * getattr(other, 'unit', dimensionless_unscaled)
result_array = np.matmul(getattr(other, 'value', other), self.value)
return self._new_view(result_array, result_unit)
# In numpy 1.13, 1.14, a np.positive ufunc exists, but ndarray.__pos__
# does not go through it, so we define it, to allow subclasses to override
# it inside __array_ufunc__. This can be removed if a solution to
# https://github.com/numpy/numpy/issues/9081 is merged.
def __pos__(self):
"""Plus the quantity."""
return np.positive(self)
# other overrides of special functions
def __hash__(self):
return hash(self.value) ^ hash(self.unit)
def __iter__(self):
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value is not iterable"
.format(cls=self.__class__.__name__))
# Otherwise return a generator
def quantity_iter():
for val in self.value:
yield self._new_view(val)
return quantity_iter()
def __getitem__(self, key):
try:
out = super().__getitem__(key)
except IndexError:
# We want zero-dimensional Quantity objects to behave like scalars,
# so they should raise a TypeError rather than an IndexError.
if self.isscalar:
raise TypeError(
"'{cls}' object with a scalar value does not support "
"indexing".format(cls=self.__class__.__name__))
else:
raise
# For single elements, ndarray.__getitem__ returns scalars; these
# need a new view as a Quantity.
if type(out) is not type(self):
out = self._new_view(out)
return out
def __setitem__(self, i, value):
# update indices in info if the info property has been accessed
# (in which case 'info' in self.__dict__ is True; this is guaranteed
# to be the case if we're part of a table).
if not self.isscalar and 'info' in self.__dict__:
self.info.adjust_indices(i, value, len(self))
self.view(np.ndarray).__setitem__(i, self._to_own_unit(value))
# __contains__ is OK
def __bool__(self):
"""Quantities should always be treated as non-False; there is too much
potential for ambiguity otherwise.
"""
warnings.warn('The truth value of a Quantity is ambiguous. '
'In the future this will raise a ValueError.',
AstropyDeprecationWarning)
return True
def __len__(self):
if self.isscalar:
raise TypeError("'{cls}' object with a scalar value has no "
"len()".format(cls=self.__class__.__name__))
else:
return len(self.value)
# Numerical types
def __float__(self):
try:
return float(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __int__(self):
try:
return int(self.to_value(dimensionless_unscaled))
except (UnitsError, TypeError):
raise TypeError('only dimensionless scalar quantities can be '
'converted to Python scalars')
def __index__(self):
# for indices, we do not want to mess around with scaling at all,
# so unlike for float, int, we insist here on unscaled dimensionless
try:
assert self.unit.is_unity()
return self.value.__index__()
except Exception:
raise TypeError('only integer dimensionless scalar quantities '
'can be converted to a Python index')
@property
def _unitstr(self):
if self.unit is None:
unitstr = _UNIT_NOT_INITIALISED
else:
unitstr = str(self.unit)
if unitstr:
unitstr = ' ' + unitstr
return unitstr
# Display
# TODO: we may want to add a hook for dimensionless quantities?
def __str__(self):
return '{0}{1:s}'.format(self.value, self._unitstr)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
sep = ',' if NUMPY_LT_1_14 else ', '
arrstr = np.array2string(self.view(np.ndarray), separator=sep,
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
def _repr_latex_(self):
"""
Generate a latex representation of the quantity and its unit.
The behavior of this function can be altered via the
`numpy.set_printoptions` function and its various keywords. The
exception to this is the ``threshold`` keyword, which is controlled via
the ``[units.quantity]`` configuration item ``latex_array_threshold``.
This is treated separately because the numpy default of 1000 is too big
for most browsers to handle.
Returns
-------
lstr
A LaTeX string with the contents of this Quantity
"""
# need to do try/finally because "threshold" cannot be overridden
# with array2string
pops = np.get_printoptions()
format_spec = '.{}g'.format(pops['precision'])
def float_formatter(value):
return Latex.format_exponential_notation(value,
format_spec=format_spec)
try:
formatter = {'float_kind': float_formatter}
if conf.latex_array_threshold > -1:
np.set_printoptions(threshold=conf.latex_array_threshold,
formatter=formatter)
# the view is needed for the scalar case - value might be float
if NUMPY_LT_1_14: # style deprecated in 1.14
latex_value = np.array2string(
self.view(np.ndarray),
style=(float_formatter if self.dtype.kind == 'f'
else repr),
max_line_width=np.inf, separator=',~')
else:
latex_value = np.array2string(
self.view(np.ndarray),
max_line_width=np.inf, separator=',~')
latex_value = latex_value.replace('...', r'\dots')
finally:
np.set_printoptions(**pops)
# Format unit
# [1:-1] strips the '$' on either side needed for math mode
latex_unit = (self.unit._repr_latex_()[1:-1] # note this is unicode
if self.unit is not None
else _UNIT_NOT_INITIALISED)
return r'${0} \; {1}$'.format(latex_value, latex_unit)
def __format__(self, format_spec):
"""
Format quantities using the new-style python formatting codes
as specifiers for the number.
If the format specifier correctly applies itself to the value,
then it is used to format only the value. If it cannot be
applied to the value, then it is applied to the whole string.
"""
try:
value = format(self.value, format_spec)
full_format_spec = "s"
except ValueError:
value = self.value
full_format_spec = format_spec
return format("{0}{1:s}".format(value, self._unitstr),
full_format_spec)
def decompose(self, bases=[]):
"""
Generates a new `Quantity` with the units
decomposed. Decomposed units have only irreducible units in
them (see `astropy.units.UnitBase.decompose`).
Parameters
----------
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
return self._decompose(False, bases=bases)
def _decompose(self, allowscaledunits=False, bases=[]):
"""
Generates a new `Quantity` with the units decomposed. Decomposed
units have only irreducible units in them (see
`astropy.units.UnitBase.decompose`).
Parameters
----------
allowscaledunits : bool
If True, the resulting `Quantity` may have a scale factor
associated with it. If False, any scaling in the unit will
be subsumed into the value of the resulting `Quantity`
bases : sequence of UnitBase, optional
The bases to decompose into. When not provided,
decomposes down to any irreducible units. When provided,
the decomposed result will only contain the given units.
This will raises a `~astropy.units.UnitsError` if it's not possible
to do so.
Returns
-------
newq : `~astropy.units.Quantity`
A new object equal to this quantity with units decomposed.
"""
new_unit = self.unit.decompose(bases=bases)
# Be careful here because self.value usually is a view of self;
# be sure that the original value is not being modified.
if not allowscaledunits and hasattr(new_unit, 'scale'):
new_value = self.value * new_unit.scale
new_unit = new_unit / new_unit.scale
return self._new_view(new_value, new_unit)
else:
return self._new_view(self.copy(), new_unit)
# These functions need to be overridden to take into account the units
# Array conversion
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-conversion
def item(self, *args):
return self._new_view(super().item(*args))
def tolist(self):
raise NotImplementedError("cannot make a list of Quantities. Get "
"list of values with q.value.list()")
def _to_own_unit(self, value, check_precision=True):
try:
_value = value.to_value(self.unit)
except AttributeError:
# We're not a Quantity, so let's try a more general conversion.
# Plain arrays will be converted to dimensionless in the process,
# but anything with a unit attribute will use that.
try:
_value = Quantity(value).to_value(self.unit)
except UnitsError as exc:
# last chance: if this was not something with a unit
# and is all 0, inf, or nan, we treat it as arbitrary unit.
if (not hasattr(value, 'unit') and
can_have_arbitrary_unit(value)):
_value = value
else:
raise exc
if check_precision:
value_dtype = getattr(value, 'dtype', None)
if self.dtype != value_dtype:
self_dtype_array = np.array(_value, self.dtype)
value_dtype_array = np.array(_value, dtype=value_dtype,
copy=False)
if not np.all(np.logical_or(self_dtype_array ==
value_dtype_array,
np.isnan(value_dtype_array))):
raise TypeError("cannot convert value type to array type "
"without precision loss")
return _value
def itemset(self, *args):
if len(args) == 0:
raise ValueError("itemset must have at least one argument")
self.view(np.ndarray).itemset(*(args[:-1] +
(self._to_own_unit(args[-1]),)))
def tostring(self, order='C'):
raise NotImplementedError("cannot write Quantities to string. Write "
"array with q.value.tostring(...).")
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("cannot write Quantities to file. Write "
"array with q.value.tofile(...)")
def dump(self, file):
raise NotImplementedError("cannot dump Quantities to file. Write "
"array with q.value.dump()")
def dumps(self):
raise NotImplementedError("cannot dump Quantities to string. Write "
"array with q.value.dumps()")
# astype, byteswap, copy, view, getfield, setflags OK as is
def fill(self, value):
self.view(np.ndarray).fill(self._to_own_unit(value))
# Shape manipulation: resize cannot be done (does not own data), but
# shape, transpose, swapaxes, flatten, ravel, squeeze all OK. Only
# the flat iterator needs to be overwritten, otherwise single items are
# returned as numbers.
@property
def flat(self):
"""A 1-D iterator over the Quantity array.
This returns a ``QuantityIterator`` instance, which behaves the same
as the `~numpy.flatiter` instance returned by `~numpy.ndarray.flat`,
and is similar to, but not a subclass of, Python's built-in iterator
object.
"""
return QuantityIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
# Item selection and manipulation
# take, repeat, sort, compress, diagonal OK
def put(self, indices, values, mode='raise'):
self.view(np.ndarray).put(indices, self._to_own_unit(values), mode)
def choose(self, choices, out=None, mode='raise'):
raise NotImplementedError("cannot choose based on quantity. Choose "
"using array with q.value.choose(...)")
# ensure we do not return indices as quantities
def argsort(self, axis=-1, kind='quicksort', order=None):
return self.view(np.ndarray).argsort(axis=axis, kind=kind, order=order)
def searchsorted(self, v, *args, **kwargs):
return np.searchsorted(np.array(self),
self._to_own_unit(v, check_precision=False),
*args, **kwargs) # avoid numpy 1.6 problem
def argmax(self, axis=None, out=None):
return self.view(np.ndarray).argmax(axis, out=out)
def argmin(self, axis=None, out=None):
return self.view(np.ndarray).argmin(axis, out=out)
# Calculation -- override ndarray methods to take into account units.
# We use the corresponding numpy functions to evaluate the results, since
# the methods do not always allow calling with keyword arguments.
# For instance, np.array([0.,2.]).clip(a_min=0., a_max=1.) gives
# TypeError: 'a_max' is an invalid keyword argument for this function.
def _wrap_function(self, function, *args, unit=None, out=None, **kwargs):
"""Wrap a numpy function that processes self, returning a Quantity.
Parameters
----------
function : callable
Numpy function to wrap.
args : positional arguments
Any positional arguments to the function beyond the first argument
(which will be set to ``self``).
kwargs : keyword arguments
Keyword arguments to the function.
If present, the following arguments are treated specially:
unit : `~astropy.units.Unit`
Unit of the output result. If not given, the unit of ``self``.
out : `~astropy.units.Quantity`
A Quantity instance in which to store the output.
Notes
-----
Output should always be assigned via a keyword argument, otherwise
no proper account of the unit is taken.
Returns
-------
out : `~astropy.units.Quantity`
Result of the function call, with the unit set properly.
"""
if unit is None:
unit = self.unit
# Ensure we don't loop back by turning any Quantity into array views.
args = (self.value,) + tuple((arg.value if isinstance(arg, Quantity)
else arg) for arg in args)
if out is not None:
# If pre-allocated output is used, check it is suitable.
# This also returns array view, to ensure we don't loop back.
arrays = tuple(arg for arg in args if isinstance(arg, np.ndarray))
kwargs['out'] = check_output(out, unit, arrays, function=function)
# Apply the function and turn it back into a Quantity.
result = function(*args, **kwargs)
return self._result_as_quantity(result, unit, out)
def clip(self, a_min, a_max, out=None):
return self._wrap_function(np.clip, self._to_own_unit(a_min),
self._to_own_unit(a_max), out=out)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return self._wrap_function(np.trace, offset, axis1, axis2, dtype,
out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.var, axis, dtype,
out=out, ddof=ddof, unit=self.unit**2)
def std(self, axis=None, dtype=None, out=None, ddof=0):
return self._wrap_function(np.std, axis, dtype, out=out, ddof=ddof)
def mean(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.mean, axis, dtype, out=out)
def ptp(self, axis=None, out=None):
return self._wrap_function(np.ptp, axis, out=out)
def round(self, decimals=0, out=None):
return self._wrap_function(np.round, decimals, out=out)
def max(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.max, axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.min, axis, out=out, keepdims=keepdims)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
return self._wrap_function(np.sum, axis, dtype, out=out,
keepdims=keepdims)
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
if not self.unit.is_unity():
raise ValueError("cannot use prod on scaled or "
"non-dimensionless Quantity arrays")
return self._wrap_function(np.prod, axis, dtype, out=out,
keepdims=keepdims)
def dot(self, b, out=None):
result_unit = self.unit * getattr(b, 'unit', dimensionless_unscaled)
return self._wrap_function(np.dot, b, out=out, unit=result_unit)
def cumsum(self, axis=None, dtype=None, out=None):
return self._wrap_function(np.cumsum, axis, dtype, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
if not self.unit.is_unity():
raise ValueError("cannot use cumprod on scaled or "
"non-dimensionless Quantity arrays")
return self._wrap_function(np.cumprod, axis, dtype, out=out)
# Calculation: override methods that do not make sense.
def all(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.all(...)")
def any(self, axis=None, out=None):
raise NotImplementedError("cannot evaluate truth value of quantities. "
"Evaluate array with q.value.any(...)")
# Calculation: numpy functions that can be overridden with methods.
def diff(self, n=1, axis=-1):
return self._wrap_function(np.diff, n, axis)
def ediff1d(self, to_end=None, to_begin=None):
return self._wrap_function(np.ediff1d, to_end, to_begin)
def nansum(self, axis=None, out=None, keepdims=False):
return self._wrap_function(np.nansum, axis,
out=out, keepdims=keepdims)
def insert(self, obj, values, axis=None):
"""
Insert values along the given axis before the given indices and return
a new `~astropy.units.Quantity` object.
This is a thin wrapper around the `numpy.insert` function.
Parameters
----------
obj : int, slice or sequence of ints
Object that defines the index or indices before which ``values`` is
inserted.
values : array-like
Values to insert. If the type of ``values`` is different
from that of quantity, ``values`` is converted to the matching type.
``values`` should be shaped so that it can be broadcast appropriately
The unit of ``values`` must be consistent with this quantity.
axis : int, optional
Axis along which to insert ``values``. If ``axis`` is None then
the quantity array is flattened before insertion.
Returns
-------
out : `~astropy.units.Quantity`
A copy of quantity with ``values`` inserted. Note that the
insertion does not occur in-place: a new quantity array is returned.
Examples
--------
>>> import astropy.units as u
>>> q = [1, 2] * u.m
>>> q.insert(0, 50 * u.cm)
<Quantity [ 0.5, 1., 2.] m>
>>> q = [[1, 2], [3, 4]] * u.m
>>> q.insert(1, [10, 20] * u.m, axis=0)
<Quantity [[ 1., 2.],
[ 10., 20.],
[ 3., 4.]] m>
>>> q.insert(1, 10 * u.m, axis=1)
<Quantity [[ 1., 10., 2.],
[ 3., 10., 4.]] m>
"""
out_array = np.insert(self.value, obj, self._to_own_unit(values), axis)
return self._new_view(out_array)
class SpecificTypeQuantity(Quantity):
"""Superclass for Quantities of specific physical type.
Subclasses of these work just like :class:`~astropy.units.Quantity`, except
that they are for specific physical types (and may have methods that are
only appropriate for that type). Astropy examples are
:class:`~astropy.coordinates.Angle` and
:class:`~astropy.coordinates.Distance`
At a minimum, subclasses should set ``_equivalent_unit`` to the unit
associated with the physical type.
"""
# The unit for the specific physical type. Instances can only be created
# with units that are equivalent to this.
_equivalent_unit = None
# The default unit used for views. Even with `None`, views of arrays
# without units are possible, but will have an uninitalized unit.
_unit = None
# Default unit for initialization through the constructor.
_default_unit = None
# ensure that we get precedence over our superclass.
__array_priority__ = Quantity.__array_priority__ + 10
def __quantity_subclass__(self, unit):
if unit.is_equivalent(self._equivalent_unit):
return type(self), True
else:
return super().__quantity_subclass__(unit)[0], False
def _set_unit(self, unit):
if unit is None or not unit.is_equivalent(self._equivalent_unit):
raise UnitTypeError(
"{0} instances require units equivalent to '{1}'"
.format(type(self).__name__, self._equivalent_unit) +
(", but no unit was given." if unit is None else
", so cannot set it to '{0}'.".format(unit)))
super()._set_unit(unit)
def isclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.isclose`.
"""
return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Notes
-----
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
actual = Quantity(actual, subok=True, copy=False)
desired = Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = Quantity(0)
else:
atol = Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except UnitsError:
raise UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(dimensionless_unscaled)
except Exception:
raise UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
|
baf168833e49a6c17c624f67ed1dd05d9679cfe7d8c71cb1ff5ec820f4ccc1f8 | """
In this module, we define the coordinate representation classes, which are
used to represent low-level cartesian, spherical, cylindrical, and other
coordinates.
"""
import abc
import functools
import operator
from collections import OrderedDict
import inspect
import warnings
import numpy as np
import astropy.units as u
from .angles import Angle, Longitude, Latitude
from .distances import Distance
from ..utils import ShapedLikeNDArray, classproperty
from ..utils import deprecated_attribute
from ..utils.exceptions import AstropyDeprecationWarning
from ..utils.misc import InheritDocstrings
from ..utils.compat import NUMPY_LT_1_14
__all__ = ["BaseRepresentationOrDifferential", "BaseRepresentation",
"CartesianRepresentation", "SphericalRepresentation",
"UnitSphericalRepresentation", "RadialRepresentation",
"PhysicsSphericalRepresentation", "CylindricalRepresentation",
"BaseDifferential", "CartesianDifferential",
"BaseSphericalDifferential", "BaseSphericalCosLatDifferential",
"SphericalDifferential", "SphericalCosLatDifferential",
"UnitSphericalDifferential", "UnitSphericalCosLatDifferential",
"RadialDifferential", "CylindricalDifferential",
"PhysicsSphericalDifferential"]
# Module-level dict mapping representation string alias names to classes.
# This is populated by the metaclass init so all representation and differential
# classes get registered automatically.
REPRESENTATION_CLASSES = {}
DIFFERENTIAL_CLASSES = {}
# recommended_units deprecation message; if the attribute is removed later,
# also remove its use in BaseFrame._get_representation_info.
_recommended_units_deprecation = """
The 'recommended_units' attribute is deprecated since 3.0 and may be removed
in a future version. Its main use, of representing angles in degrees in frames,
is now done automatically in frames. Further overrides are discouraged but can
be done using a frame's ``frame_specific_representation_info``.
"""
def _array2string(values, prefix=''):
# Work around version differences for array2string.
kwargs = {'separator': ', ', 'prefix': prefix}
kwargs['formatter'] = {}
if NUMPY_LT_1_14: # in 1.14, style is no longer used (and deprecated)
kwargs['style'] = repr
return np.array2string(values, **kwargs)
def _combine_xyz(x, y, z, xyz_axis=0):
"""
Combine components ``x``, ``y``, ``z`` into a single Quantity array.
Parameters
----------
x, y, z : `~astropy.units.Quantity`
The individual x, y, and z components.
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``, i.e., using the default of ``0``,
the shape will be ``(3,) + x.shape``.
"""
# Get x, y, z to the same units (this is very fast for identical units)
# since np.stack cannot deal with quantity.
cls = x.__class__
unit = x.unit
x = x.value
y = y.to_value(unit)
z = z.to_value(unit)
xyz = np.stack([x, y, z], axis=xyz_axis)
return cls(xyz, unit=unit, copy=False)
class BaseRepresentationOrDifferential(ShapedLikeNDArray):
"""3D coordinate representations and differentials.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D point or differential. The names are the
keys and the subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
# Ensure multiplication/division with ndarray or Quantity doesn't lead to
# object arrays.
__array_priority__ = 50000
def __init__(self, *args, **kwargs):
# make argument a list, so we can pop them off.
args = list(args)
components = self.components
attrs = []
for component in components:
try:
attrs.append(args.pop(0) if args else kwargs.pop(component))
except KeyError:
raise TypeError('__init__() missing 1 required positional '
'argument: {0!r}'.format(component))
copy = args.pop(0) if args else kwargs.pop('copy', True)
if args:
raise TypeError('unexpected arguments: {0}'.format(args))
if kwargs:
for component in components:
if component in kwargs:
raise TypeError("__init__() got multiple values for "
"argument {0!r}".format(component))
raise TypeError('unexpected keyword arguments: {0}'.format(kwargs))
# Pass attributes through the required initializing classes.
attrs = [self.attr_classes[component](attr, copy=copy)
for component, attr in zip(components, attrs)]
try:
attrs = np.broadcast_arrays(*attrs, subok=True)
except ValueError:
if len(components) <= 2:
c_str = ' and '.join(components)
else:
c_str = ', '.join(components[:2]) + ', and ' + components[2]
raise ValueError("Input parameters {0} cannot be broadcast"
.format(c_str))
# Set private attributes for the attributes. (If not defined explicitly
# on the class, the metaclass will define properties to access these.)
for component, attr in zip(components, attrs):
setattr(self, '_' + component, attr)
@classmethod
def get_name(cls):
"""Name of the representation or differential.
In lower case, with any trailing 'representation' or 'differential'
removed. (E.g., 'spherical' for
`~astropy.coordinates.SphericalRepresentation` or
`~astropy.coordinates.SphericalDifferential`.)
"""
name = cls.__name__.lower()
if name.endswith('representation'):
name = name[:-14]
elif name.endswith('differential'):
name = name[:-12]
return name
# The two methods that any subclass has to define.
@classmethod
@abc.abstractmethod
def from_cartesian(cls, other):
"""Create a representation of this class from a supplied Cartesian one.
Parameters
----------
other : `CartesianRepresentation`
The representation to turn into this class
Returns
-------
representation : object of this class
A new representation of this class's type.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@abc.abstractmethod
def to_cartesian(self):
"""Convert the representation to its Cartesian form.
Note that any differentials get dropped.
Returns
-------
cartrepr : `CartesianRepresentation`
The representation in Cartesian form.
"""
# Note: the above docstring gets overridden for differentials.
raise NotImplementedError()
@property
def components(self):
"""A tuple with the in-order names of the coordinate components."""
return tuple(self.attr_classes)
def _apply(self, method, *args, **kwargs):
"""Create a new representation or differential with ``method`` applied
to the component data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays (e.g., ``x``, ``y``, and ``z`` for
`~astropy.coordinates.CartesianRepresentation`), with the results used
to create a new instance.
Internally, it is also used to apply functions to the components
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
if callable(method):
apply_method = lambda array: method(array, *args, **kwargs)
else:
apply_method = operator.methodcaller(method, *args, **kwargs)
new = super().__new__(self.__class__)
for component in self.components:
setattr(new, '_' + component,
apply_method(getattr(self, component)))
return new
@property
def shape(self):
"""The shape of the instance and underlying arrays.
Like `~numpy.ndarray.shape`, can be set to a new shape by assigning a
tuple. Note that if different instances share some but not all
underlying data, setting the shape of one instance can make the other
instance unusable. Hence, it is strongly recommended to get new,
reshaped instances with the ``reshape`` method.
Raises
------
AttributeError
If the shape of any of the components cannot be changed without the
arrays being copied. For these cases, use the ``reshape`` method
(which copies any arrays that cannot be reshaped in-place).
"""
return getattr(self, self.components[0]).shape
@shape.setter
def shape(self, shape):
# We keep track of arrays that were already reshaped since we may have
# to return those to their original shape if a later shape-setting
# fails. (This can happen since coordinates are broadcast together.)
reshaped = []
oldshape = self.shape
for component in self.components:
val = getattr(self, component)
if val.size > 1:
try:
val.shape = shape
except AttributeError:
for val2 in reshaped:
val2.shape = oldshape
raise
else:
reshaped.append(val)
# Required to support multiplication and division, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _scale_operation(self, op, *args):
raise NotImplementedError()
def __mul__(self, other):
return self._scale_operation(operator.mul, other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self._scale_operation(operator.truediv, other)
def __div__(self, other): # pragma: py2
return self._scale_operation(operator.truediv, other)
def __neg__(self):
return self._scale_operation(operator.neg)
# Follow numpy convention and make an independent copy.
def __pos__(self):
return self.copy()
# Required to support addition and subtraction, and defined by the base
# representation and differential classes.
@abc.abstractmethod
def _combine_operation(self, op, other, reverse=False):
raise NotImplementedError()
def __add__(self, other):
return self._combine_operation(operator.add, other)
def __radd__(self, other):
return self._combine_operation(operator.add, other, reverse=True)
def __sub__(self, other):
return self._combine_operation(operator.sub, other)
def __rsub__(self, other):
return self._combine_operation(operator.sub, other, reverse=True)
# The following are used for repr and str
@property
def _values(self):
"""Turn the coordinates into a record array with the coordinate values.
The record array fields will have the component names.
"""
coo_items = [(c, getattr(self, c)) for c in self.components]
result = np.empty(self.shape, [(c, coo.dtype) for c, coo in coo_items])
for c, coo in coo_items:
result[c] = coo.value
return result
@property
def _units(self):
"""Return a dictionary with the units of the coordinate components."""
return dict([(component, getattr(self, component).unit)
for component in self.components])
@property
def _unitstr(self):
units_set = set(self._units.values())
if len(units_set) == 1:
unitstr = units_set.pop().to_string()
else:
unitstr = '({0})'.format(
', '.join([self._units[component].to_string()
for component in self.components]))
return unitstr
def __str__(self):
return '{0} {1:s}'.format(_array2string(self._values), self._unitstr)
def __repr__(self):
prefixstr = ' '
arrstr = _array2string(self._values, prefix=prefixstr)
diffstr = ''
if getattr(self, 'differentials', None):
diffstr = '\n (has differentials w.r.t.: {0})'.format(
', '.join([repr(key) for key in self.differentials.keys()]))
unitstr = ('in ' + self._unitstr) if self._unitstr else '[dimensionless]'
return '<{0} ({1}) {2:s}\n{3}{4}{5}>'.format(
self.__class__.__name__, ', '.join(self.components),
unitstr, prefixstr, arrstr, diffstr)
def _make_getter(component):
"""Make an attribute getter for use in a property.
Parameters
----------
component : str
The name of the component that should be accessed. This assumes the
actual value is stored in an attribute of that name prefixed by '_'.
"""
# This has to be done in a function to ensure the reference to component
# is not lost/redirected.
component = '_' + component
def get_component(self):
return getattr(self, component)
return get_component
# Need to also subclass ABCMeta rather than type, so that this meta class can
# be combined with a ShapedLikeNDArray subclass (which is an ABC). Without it:
# "TypeError: metaclass conflict: the metaclass of a derived class must be a
# (non-strict) subclass of the metaclasses of all its bases"
class MetaBaseRepresentation(InheritDocstrings, abc.ABCMeta):
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Register representation name (except for BaseRepresentation)
if cls.__name__ == 'BaseRepresentation':
return
if 'attr_classes' not in dct:
raise NotImplementedError('Representations must have an '
'"attr_classes" class attribute.')
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in REPRESENTATION_CLASSES:
raise ValueError("Representation class {0} already defined"
.format(repr_name))
REPRESENTATION_CLASSES[repr_name] = cls
# define getters for any component that does not yet have one.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("The '{0}' component of the points(s)."
.format(component))))
class BaseRepresentation(BaseRepresentationOrDifferential,
metaclass=MetaBaseRepresentation):
"""Base for representing a point in a 3D coordinate system.
Parameters
----------
comp1, comp2, comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D points. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
subclass instance, or a dictionary with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All representation classes should subclass this base representation class,
and define an ``attr_classes`` attribute, an `~collections.OrderedDict`
which maps component names to the class that creates them. They must also
define a ``to_cartesian`` method and a ``from_cartesian`` class method. By
default, transformations are done via the cartesian system, but classes
that want to define a smarter transformation path can overload the
``represent_as`` method. If one wants to use an associated differential
class, one should also define ``unit_vectors`` and ``scale_factors``
methods (see those methods for details).
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
def __init__(self, *args, differentials=None, **kwargs):
# Handle any differentials passed in.
super().__init__(*args, **kwargs)
self._differentials = self._validate_differentials(differentials)
def _validate_differentials(self, differentials):
"""
Validate that the provided differentials are appropriate for this
representation and recast/reshape as necessary and then return.
Note that this does *not* set the differentials on
``self._differentials``, but rather leaves that for the caller.
"""
# Now handle the actual validation of any specified differential classes
if differentials is None:
differentials = dict()
elif isinstance(differentials, BaseDifferential):
# We can't handle auto-determining the key for this combo
if (isinstance(differentials, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
raise ValueError("To attach a RadialDifferential to a "
"UnitSphericalRepresentation, you must supply "
"a dictionary with an appropriate key.")
key = differentials._get_deriv_key(self)
differentials = {key: differentials}
for key in differentials:
try:
diff = differentials[key]
except TypeError:
raise TypeError("'differentials' argument must be a "
"dictionary-like object")
diff._check_base(self)
if (isinstance(diff, RadialDifferential) and
isinstance(self, UnitSphericalRepresentation)):
# We trust the passing of a key for a RadialDifferential
# attached to a UnitSphericalRepresentation because it will not
# have a paired component name (UnitSphericalRepresentation has
# no .distance) to automatically determine the expected key
pass
else:
expected_key = diff._get_deriv_key(self)
if key != expected_key:
raise ValueError("For differential object '{0}', expected "
"unit key = '{1}' but received key = '{2}'"
.format(repr(diff), expected_key, key))
# For now, we are very rigid: differentials must have the same shape
# as the representation. This makes it easier to handle __getitem__
# and any other shape-changing operations on representations that
# have associated differentials
if diff.shape != self.shape:
# TODO: message of IncompatibleShapeError is not customizable,
# so use a valueerror instead?
raise ValueError("Shape of differentials must be the same "
"as the shape of the representation ({0} vs "
"{1})".format(diff.shape, self.shape))
return differentials
def _raise_if_has_differentials(self, op_name):
"""
Used to raise a consistent exception for any operation that is not
supported when a representation has differentials attached.
"""
if self.differentials:
raise TypeError("Operation '{0}' is not supported when "
"differentials are attached to a {1}."
.format(op_name, self.__class__.__name__))
@property
def _compatible_differentials(self):
return [DIFFERENTIAL_CLASSES[self.get_name()]]
@property
def differentials(self):
"""A dictionary of differential class instances.
The keys of this dictionary must be a string representation of the SI
unit with which the differential (derivative) is taken. For example, for
a velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
"""
return self._differentials
# We do not make unit_vectors and scale_factors abstract methods, since
# they are only necessary if one also defines an associated Differential.
# Also, doing so would break pre-differential representation subclasses.
def unit_vectors(self):
r"""Cartesian unit vectors in the direction of each component.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented unit vectors"
.format(type(self)))
def scale_factors(self):
r"""Scale factors for each component's direction.
Given unit vectors :math:`\hat{e}_c` and scale factors :math:`f_c`,
a change in one component of :math:`\delta c` corresponds to a change
in representation of :math:`\delta c \times f_c \times \hat{e}_c`.
Returns
-------
scale_factors : dict of `~astropy.units.Quantity`
The keys are the component names.
"""
raise NotImplementedError("{} has not implemented scale factors."
.format(type(self)))
def _re_represent_differentials(self, new_rep, differential_class):
"""Re-represent the differentials to the specified classes.
This returns a new dictionary with the same keys but with the
attached differentials converted to the new differential classes.
"""
if differential_class is None:
return dict()
if not self.differentials and differential_class:
raise ValueError("No differentials associated with this "
"representation!")
elif (len(self.differentials) == 1 and
inspect.isclass(differential_class) and
issubclass(differential_class, BaseDifferential)):
# TODO: is there a better way to do this?
differential_class = {
list(self.differentials.keys())[0]: differential_class
}
elif set(differential_class.keys()) != set(self.differentials.keys()):
ValueError("Desired differential classes must be passed in "
"as a dictionary with keys equal to a string "
"representation of the unit of the derivative "
"for each differential stored with this "
"representation object ({0})"
.format(self.differentials))
new_diffs = dict()
for k in self.differentials:
diff = self.differentials[k]
try:
new_diffs[k] = diff.represent_as(differential_class[k],
base=self)
except Exception:
if (differential_class[k] not in
new_rep._compatible_differentials):
raise TypeError("Desired differential class {0} is not "
"compatible with the desired "
"representation class {1}"
.format(differential_class[k],
new_rep.__class__))
else:
raise
return new_diffs
def represent_as(self, other_class, differential_class=None):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
differential_class : dict of `~astropy.coordinates.BaseDifferential`, optional
Classes in which the differentials should be represented.
Can be a single class if only a single differential is attached,
otherwise it should be a `dict` keyed by the same keys as the
differentials.
"""
if other_class is self.__class__ and not differential_class:
return self.without_differentials()
else:
if isinstance(other_class, str):
raise ValueError("Input to a representation's represent_as "
"must be a class, not a string. For "
"strings, use frame objects")
# The default is to convert via cartesian coordinates
new_rep = other_class.from_cartesian(self.to_cartesian())
new_rep._differentials = self._re_represent_differentials(
new_rep, differential_class)
return new_rep
def with_differentials(self, differentials):
"""
Create a new representation with the same positions as this
representation, but with these new differentials.
Differential keys that already exist in this object's differential dict
are overwritten.
Parameters
----------
differentials : Sequence of `~astropy.coordinates.BaseDifferential`
The differentials for the new representation to have.
Returns
-------
newrepr
A copy of this representation, but with the ``differentials`` as
its differentials.
"""
if not differentials:
return self
args = [getattr(self, component) for component in self.components]
# We shallow copy the differentials dictionary so we don't update the
# current object's dictionary when adding new keys
new_rep = self.__class__(*args, differentials=self.differentials.copy(),
copy=False)
new_rep._differentials.update(
new_rep._validate_differentials(differentials))
return new_rep
def without_differentials(self):
"""Return a copy of the representation without attached differentials.
Returns
-------
newrepr
A shallow copy of this representation, without any differentials.
If no differentials were present, no copy is made.
"""
if not self._differentials:
return self
args = [getattr(self, component) for component in self.components]
return self.__class__(*args, copy=False)
@classmethod
def from_representation(cls, representation):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
"""
return representation.represent_as(cls)
def _apply(self, method, *args, **kwargs):
"""Create a new representation with ``method`` applied to the component
data.
This is not a simple inherit from ``BaseRepresentationOrDifferential``
because we need to call ``._apply()`` on any associated differential
classes.
See docstring for `BaseRepresentationOrDifferential._apply`.
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
rep = super()._apply(method, *args, **kwargs)
rep._differentials = dict(
[(k, diff._apply(method, *args, **kwargs))
for k, diff in self._differentials.items()])
return rep
def _scale_operation(self, op, *args):
"""Scale all non-angular components, leaving angular ones unchanged.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
self._raise_if_has_differentials(op.__name__)
results = []
for component, cls in self.attr_classes.items():
value = getattr(self, component)
if issubclass(cls, Angle):
results.append(value)
else:
results.append(op(value, *args))
# try/except catches anything that cannot initialize the class, such
# as operations that returned NotImplemented or a representation
# instead of a quantity (as would happen for, e.g., rep * rep).
try:
return self.__class__(*results)
except Exception:
return NotImplemented
def _combine_operation(self, op, other, reverse=False):
"""Combine two representation.
By default, operate on the cartesian representations of both.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self.from_cartesian(result)
# We need to override this setter to support differentials
@BaseRepresentationOrDifferential.shape.setter
def shape(self, shape):
orig_shape = self.shape
# See: https://stackoverflow.com/questions/3336767/ for an example
BaseRepresentationOrDifferential.shape.fset(self, shape)
# also try to perform shape-setting on any associated differentials
try:
for k in self.differentials:
self.differentials[k].shape = shape
except Exception:
BaseRepresentationOrDifferential.shape.fset(self, orig_shape)
for k in self.differentials:
self.differentials[k].shape = orig_shape
raise
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Note that any associated differentials will be dropped during this
operation.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.sqrt(functools.reduce(
operator.add, (getattr(self, component)**2
for component, cls in self.attr_classes.items()
if not issubclass(cls, Angle))))
def mean(self, *args, **kwargs):
"""Vector mean.
Averaging is done by converting the representation to cartesian, and
taking the mean of the x, y, and z components. The result is converted
back to the same representation as the input.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
mean : representation
Vector mean, in the same representation as that of the input.
"""
self._raise_if_has_differentials('mean')
return self.from_cartesian(self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
Adding is done by converting the representation to cartesian, and
summing the x, y, and z components. The result is converted back to the
same representation as the input.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
Returns
-------
sum : representation
Vector sum, in the same representation as that of the input.
"""
self._raise_if_has_differentials('sum')
return self.from_cartesian(self.to_cartesian().sum(*args, **kwargs))
def dot(self, other):
"""Dot product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : `~astropy.coordinates.BaseRepresentation`
The representation to take the dot product with.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of the
cartesian representations of ``self`` and ``other``.
"""
return self.to_cartesian().dot(other)
def cross(self, other):
"""Vector cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to the type of representation of ``self``.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : representation
With vectors perpendicular to both ``self`` and ``other``, in the
same type of representation as ``self``.
"""
self._raise_if_has_differentials('cross')
return self.from_cartesian(self.to_cartesian().cross(other))
class CartesianRepresentation(BaseRepresentation):
"""
Representation of points in 3D cartesian coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the point(s). If ``x``, ``y``, and ``z``
have different shapes, they should be broadcastable. If not quantity,
``unit`` should be set. If only ``x`` is given, it is assumed that it
contains an array with the 3 coordinates stored along ``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the coordinates will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided rather than distinct ``x``, ``y``, and ``z`` (default: 0).
differentials : dict, `CartesianDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CartesianDifferential` instance, or a dictionary of
`CartesianDifferential` s with keys set to a string representation of
the SI unit with which the differential (derivative) is taken. For
example, for a velocity differential on a positional representation, the
key would be ``'s'`` for seconds, indicating that the derivative is a
time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('x', u.Quantity),
('y', u.Quantity),
('z', u.Quantity)])
_xyz = None
def __init__(self, x, y=None, z=None, unit=None, xyz_axis=None,
differentials=None, copy=True):
if y is None and z is None:
if isinstance(x, np.ndarray) and x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
x = u.Quantity(x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._xyz = x
if xyz_axis:
x = np.moveaxis(x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._x, self._y, self._z = x
self._differentials = self._validate_differentials(differentials)
return
else:
x, y, z = x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if x, y, and z are "
"in a single array passed in through x, "
"i.e., y and z should not be not given.")
if y is None or z is None:
raise ValueError("x, y, and z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
x = u.Quantity(x, unit, copy=copy, subok=True)
y = u.Quantity(y, unit, copy=copy, subok=True)
z = u.Quantity(z, unit, copy=copy, subok=True)
copy = False
super().__init__(x, y, z, copy=copy, differentials=differentials)
if not (self._x.unit.is_equivalent(self._y.unit) and
self._x.unit.is_equivalent(self._z.unit)):
raise u.UnitsError("x, y, and z should have matching physical types")
def unit_vectors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
o = np.broadcast_to(0.*u.one, self.shape, subok=True)
return OrderedDict(
(('x', CartesianRepresentation(l, o, o, copy=False)),
('y', CartesianRepresentation(o, l, o, copy=False)),
('z', CartesianRepresentation(o, o, l, copy=False))))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('x', l), ('y', l), ('z', l)))
def get_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._xyz is not None:
if self._xyz_axis == xyz_axis:
return self._xyz
else:
return np.moveaxis(self._xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._x, self._y, self._z, xyz_axis=xyz_axis)
xyz = property(get_xyz)
@classmethod
def from_cartesian(cls, other):
return other
def to_cartesian(self):
return self
def transform(self, matrix):
"""
Transform the cartesian coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : `~numpy.ndarray`
A 3x3 transformation matrix, such as a rotation matrix.
Examples
--------
We can start off by creating a cartesian representation object:
>>> from astropy import units as u
>>> from astropy.coordinates import CartesianRepresentation
>>> rep = CartesianRepresentation([1, 2] * u.pc,
... [2, 3] * u.pc,
... [3, 4] * u.pc)
We now create a rotation matrix around the z axis:
>>> from astropy.coordinates.matrix_utilities import rotation_matrix
>>> rotation = rotation_matrix(30 * u.deg, axis='z')
Finally, we can apply this transformation:
>>> rep_new = rep.transform(rotation)
>>> rep_new.xyz # doctest: +FLOAT_CMP
<Quantity [[ 1.8660254 , 3.23205081],
[ 1.23205081, 1.59807621],
[ 3. , 4. ]] pc>
"""
# Avoid doing gratuitous np.array for things that look like arrays.
try:
matrix_shape = matrix.shape
except AttributeError:
matrix = np.array(matrix)
matrix_shape = matrix.shape
if matrix_shape[-2:] != (3, 3):
raise ValueError("tried to do matrix multiplication with an array "
"that doesn't end in 3x3")
# TODO: since this is likely to be a widely used function in coordinate
# transforms, it should be optimized (for example in Cython).
# Get xyz once since it's an expensive operation
oldxyz = self.xyz
# Note that neither dot nor einsum handles Quantity properly, so we use
# the arrays and put the unit back in the end.
if self.isscalar and not matrix_shape[:-2]:
# a fast path for scalar coordinates.
newxyz = matrix.dot(oldxyz.value)
else:
# Matrix multiply all pmat items and coordinates, broadcasting the
# remaining dimensions.
if np.__version__ == '1.14.0':
# there is a bug in numpy v1.14.0 (fixed in 1.14.1) that causes
# this einsum call to break with the default of optimize=True
# see https://github.com/astropy/astropy/issues/7051
newxyz = np.einsum('...ij,j...->i...', matrix, oldxyz.value,
optimize=False)
else:
newxyz = np.einsum('...ij,j...->i...', matrix, oldxyz.value)
newxyz = u.Quantity(newxyz, oldxyz.unit, copy=False)
# Handle differentials attached to this representation
if self.differentials:
# TODO: speed this up going via d.d_xyz.
new_diffs = dict(
(k, d.from_cartesian(d.to_cartesian().transform(matrix)))
for k, d in self.differentials.items())
else:
new_diffs = None
return self.__class__(*newxyz, copy=False, differentials=new_diffs)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
try:
other_c = other.to_cartesian()
except Exception:
return NotImplemented
first, second = ((self, other_c) if not reverse else
(other_c, self))
return self.__class__(*(op(getattr(first, component),
getattr(second, component))
for component in first.components))
def mean(self, *args, **kwargs):
"""Vector mean.
Returns a new CartesianRepresentation instance with the means of the
x, y, and z components.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._apply('mean', *args, **kwargs)
def sum(self, *args, **kwargs):
"""Vector sum.
Returns a new CartesianRepresentation instance with the sums of the
x, y, and z components.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._apply('sum', *args, **kwargs)
def dot(self, other):
"""Dot product of two representations.
Note that any associated differentials will be dropped during this
operation.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
dot_product : `~astropy.units.Quantity`
The sum of the product of the x, y, and z components of ``self``
and ``other``.
"""
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take dot product with another "
"representation, not a {0} instance."
.format(type(other)))
return functools.reduce(operator.add,
(getattr(self, component) *
getattr(other_c, component)
for component in self.components))
def cross(self, other):
"""Cross product of two representations.
Parameters
----------
other : representation
If not already cartesian, it is converted.
Returns
-------
cross_product : `~astropy.coordinates.CartesianRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
try:
other_c = other.to_cartesian()
except Exception:
raise TypeError("cannot only take cross product with another "
"representation, not a {0} instance."
.format(type(other)))
return self.__class__(self.y * other_c.z - self.z * other_c.y,
self.z * other_c.x - self.x * other_c.z,
self.x * other_c.y - self.y * other_c.x)
class UnitSphericalRepresentation(BaseRepresentation):
"""
Representation of points on a unit sphere.
Parameters
----------
lon, lat : `~astropy.units.Quantity` or str
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude)])
@classproperty
def _dimensional_representation(cls):
return SphericalRepresentation
def __init__(self, lon, lat, differentials=None, copy=True):
super().__init__(lon, lat, differentials=differentials, copy=copy)
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
# Could let the metaclass define these automatically, but good to have
# a bit clearer docstrings.
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = np.broadcast_to(1./u.radian, self.shape, subok=True)
sf_lon = sf_lat if omit_coslat else np.cos(self.lat) / u.radian
return OrderedDict((('lon', sf_lon),
('lat', sf_lat)))
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = np.cos(self.lat) * np.cos(self.lon)
y = np.cos(self.lat) * np.sin(self.lon)
z = np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, copy=False)
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat, r=1.0,
copy=False)
elif issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, distance=1.0,
copy=False)
return super().represent_as(other_class, differential_class)
def __mul__(self, other):
self._raise_if_has_differentials('multiplication')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. * other)
def __truediv__(self, other):
self._raise_if_has_differentials('division')
return self._dimensional_representation(lon=self.lon, lat=self.lat,
distance=1. / other)
def __neg__(self):
self._raise_if_has_differentials('negation')
return self.__class__(self.lon + 180. * u.deg, -self.lat, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units, which is
always unity for vectors on the unit sphere.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return u.Quantity(np.ones(self.shape), u.dimensionless_unscaled,
copy=False)
def _combine_operation(self, op, other, reverse=False):
self._raise_if_has_differentials(op.__name__)
result = self.to_cartesian()._combine_operation(op, other, reverse)
if result is NotImplemented:
return NotImplemented
else:
return self._dimensional_representation.from_cartesian(result)
def mean(self, *args, **kwargs):
"""Vector mean.
The representation is converted to cartesian, the means of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.mean` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('mean')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().mean(*args, **kwargs))
def sum(self, *args, **kwargs):
"""Vector sum.
The representation is converted to cartesian, the sums of the x, y,
and z components are calculated, and the result is converted to a
`~astropy.coordinates.SphericalRepresentation`.
Refer to `~numpy.sum` for full documentation of the arguments, noting
that ``axis`` is the entry in the ``shape`` of the representation, and
that the ``out`` argument cannot be used.
"""
self._raise_if_has_differentials('sum')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().sum(*args, **kwargs))
def cross(self, other):
"""Cross product of two representations.
The calculation is done by converting both ``self`` and ``other``
to `~astropy.coordinates.CartesianRepresentation`, and converting the
result back to `~astropy.coordinates.SphericalRepresentation`.
Parameters
----------
other : representation
The representation to take the cross product with.
Returns
-------
cross_product : `~astropy.coordinates.SphericalRepresentation`
With vectors perpendicular to both ``self`` and ``other``.
"""
self._raise_if_has_differentials('cross')
return self._dimensional_representation.from_cartesian(
self.to_cartesian().cross(other))
class RadialRepresentation(BaseRepresentation):
"""
Representation of the distance of points from the origin.
Note that this is mostly intended as an internal helper representation.
It can do little else but being used as a scale in multiplication.
Parameters
----------
distance : `~astropy.units.Quantity`
The distance of the point(s) from the origin.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('distance', u.Quantity)])
def __init__(self, distance, differentials=None, copy=True):
super().__init__(distance, copy=copy, differentials=differentials)
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
"""Cartesian unit vectors are undefined for radial representation."""
raise NotImplementedError('Cartesian unit vectors are undefined for '
'{0} instances'.format(self.__class__))
def scale_factors(self):
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('distance', l),))
def to_cartesian(self):
"""Cannot convert radial representation to cartesian."""
raise NotImplementedError('cannot convert {0} instance to cartesian.'
.format(self.__class__))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to radial coordinate.
"""
return cls(distance=cart.norm(), copy=False)
def _scale_operation(self, op, *args):
self._raise_if_has_differentials(op.__name__)
return op(self.distance, *args)
def norm(self):
"""Vector norm.
Just the distance itself.
Returns
-------
norm : `~astropy.units.Quantity`
Dimensionless ones, with the same shape as the representation.
"""
return self.distance
def _combine_operation(self, op, other, reverse=False):
return NotImplemented
class SphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity`
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('lon', Longitude),
('lat', Latitude),
('distance', u.Quantity)])
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat, distance, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy,
differentials=differentials)
if self._distance.unit.physical_type == 'length':
self._distance = self._distance.view(Distance)
@property
def _compatible_differentials(self):
return [UnitSphericalDifferential, UnitSphericalCosLatDifferential,
SphericalDifferential, SphericalCosLatDifferential,
RadialDifferential]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return OrderedDict(
(('lon', CartesianRepresentation(-sinlon, coslon, 0., copy=False)),
('lat', CartesianRepresentation(-sinlat*coslon, -sinlat*sinlon,
coslat, copy=False)),
('distance', CartesianRepresentation(coslat*coslon, coslat*sinlon,
sinlat, copy=False))))
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('lon', sf_lon),
('lat', sf_lat),
('distance', sf_distance)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, PhysicsSphericalRepresentation):
return other_class(phi=self.lon, theta=90 * u.deg - self.lat,
r=self.distance, copy=False)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.lon, lat=self.lat, copy=False)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
x = d * np.cos(self.lat) * np.cos(self.lon)
y = d * np.cos(self.lat) * np.sin(self.lon)
z = d * np.sin(self.lat)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
lon = np.arctan2(cart.y, cart.x)
lat = np.arctan2(cart.z, s)
return cls(lon=lon, lat=lat, distance=r, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
class PhysicsSphericalRepresentation(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates (using the physics
convention of using ``phi`` and ``theta`` for azimuth and inclination
from the pole).
Parameters
----------
phi, theta : `~astropy.units.Quantity` or str
The azimuth and inclination of the point(s), in angular units. The
inclination should be between 0 and 180 degrees, and the azimuth will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`. If ``copy`` is False, `phi`
will be changed inplace if it is not between 0 and 360 degrees.
r : `~astropy.units.Quantity`
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `PhysicsSphericalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`PhysicsSphericalDifferential` instance, or a dictionary of of
differential instances with keys set to a string representation of the
SI unit with which the differential (derivative) is taken. For example,
for a velocity differential on a positional representation, the key
would be ``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('phi', Angle),
('theta', Angle),
('r', u.Quantity)])
def __init__(self, phi, theta, r, differentials=None, copy=True):
super().__init__(phi, theta, r, copy=copy, differentials=differentials)
# Wrap/validate phi/theta
if copy:
self._phi = self._phi.wrap_at(360 * u.deg)
else:
# necessary because the above version of `wrap_at` has to be a copy
self._phi.wrap_at(360 * u.deg, inplace=True)
if np.any(self._theta < 0.*u.deg) or np.any(self._theta > 180.*u.deg):
raise ValueError('Inclination angle(s) must be within '
'0 deg <= angle <= 180 deg, '
'got {0}'.format(theta.to(u.degree)))
if self._r.unit.physical_type == 'length':
self._r = self._r.view(Distance)
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def theta(self):
"""
The elevation of the point(s).
"""
return self._theta
@property
def r(self):
"""
The distance from the origin to the point(s).
"""
return self._r
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
sintheta, costheta = np.sin(self.theta), np.cos(self.theta)
return OrderedDict(
(('phi', CartesianRepresentation(-sinphi, cosphi, 0., copy=False)),
('theta', CartesianRepresentation(costheta*cosphi,
costheta*sinphi,
-sintheta, copy=False)),
('r', CartesianRepresentation(sintheta*cosphi, sintheta*sinphi,
costheta, copy=False))))
def scale_factors(self):
r = self.r / u.radian
sintheta = np.sin(self.theta)
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('phi', r * sintheta),
('theta', r),
('r', l)))
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
# TODO: this could be optimized to shortcut even if a differential_class
# is passed in, using the ._re_represent_differentials() method
if inspect.isclass(other_class) and not differential_class:
if issubclass(other_class, SphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta,
distance=self.r)
elif issubclass(other_class, UnitSphericalRepresentation):
return other_class(lon=self.phi, lat=90 * u.deg - self.theta)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.r, Distance):
d = self.r.view(u.Quantity)
else:
d = self.r
x = d * np.sin(self.theta) * np.cos(self.phi)
y = d * np.sin(self.theta) * np.sin(self.phi)
z = d * np.cos(self.theta)
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
s = np.hypot(cart.x, cart.y)
r = np.hypot(s, cart.z)
phi = np.arctan2(cart.y, cart.x)
theta = np.arctan2(s, cart.z)
return cls(phi=phi, theta=theta, r=r, copy=False)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the radius.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.r)
class CylindricalRepresentation(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
attr_classes = OrderedDict([('rho', u.Quantity),
('phi', Angle),
('z', u.Quantity)])
def __init__(self, rho, phi, z, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1., self.shape)
return OrderedDict(
(('rho', CartesianRepresentation(cosphi, sinphi, 0, copy=False)),
('phi', CartesianRepresentation(-sinphi, cosphi, 0, copy=False)),
('z', CartesianRepresentation(0, 0, l, unit=u.one, copy=False))))
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.*u.one, self.shape, subok=True)
return OrderedDict((('rho', l),
('phi', rho),
('z', l)))
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
class MetaBaseDifferential(InheritDocstrings, abc.ABCMeta):
"""Set default ``attr_classes`` and component getters on a Differential.
For these, the components are those of the base representation prefixed
by 'd_', and the class is `~astropy.units.Quantity`.
"""
def __init__(cls, name, bases, dct):
super().__init__(name, bases, dct)
# Don't do anything for base helper classes.
if cls.__name__ in ('BaseDifferential', 'BaseSphericalDifferential',
'BaseSphericalCosLatDifferential'):
return
if 'base_representation' not in dct:
raise NotImplementedError('Differential representations must have a'
'"base_representation" class attribute.')
# If not defined explicitly, create attr_classes.
if not hasattr(cls, 'attr_classes'):
base_attr_classes = cls.base_representation.attr_classes
cls.attr_classes = OrderedDict([('d_' + c, u.Quantity)
for c in base_attr_classes])
if 'recommended_units' in dct:
warnings.warn(_recommended_units_deprecation,
AstropyDeprecationWarning)
# Ensure we don't override the property that warns about the
# deprecation, but that the value remains the same.
dct.setdefault('_recommended_units', dct.pop('recommended_units'))
repr_name = cls.get_name()
if repr_name in DIFFERENTIAL_CLASSES:
raise ValueError("Differential class {0} already defined"
.format(repr_name))
DIFFERENTIAL_CLASSES[repr_name] = cls
# If not defined explicitly, create properties for the components.
for component in cls.attr_classes:
if not hasattr(cls, component):
setattr(cls, component,
property(_make_getter(component),
doc=("Component '{0}' of the Differential."
.format(component))))
class BaseDifferential(BaseRepresentationOrDifferential,
metaclass=MetaBaseDifferential):
r"""A base class representing differentials of representations.
These represent differences or derivatives along each component.
E.g., for physics spherical coordinates, these would be
:math:`\delta r, \delta \theta, \delta \phi`.
Parameters
----------
d_comp1, d_comp2, d_comp3 : `~astropy.units.Quantity` or subclass
The components of the 3D differentials. The names are the keys and the
subclasses the values of the ``attr_classes`` attribute.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
Notes
-----
All differential representation classes should subclass this base class,
and define an ``base_representation`` attribute with the class of the
regular `~astropy.coordinates.BaseRepresentation` for which differential
coordinates are provided. This will set up a default ``attr_classes``
instance with names equal to the base component names prefixed by ``d_``,
and all classes set to `~astropy.units.Quantity`, plus properties to access
those, and a default ``__init__`` for initialization.
"""
recommended_units = deprecated_attribute('recommended_units', since='3.0')
_recommended_units = {}
@classmethod
def _check_base(cls, base):
if cls not in base._compatible_differentials:
raise TypeError("Differential class {0} is not compatible with the "
"base (representation) class {1}"
.format(cls, base.__class__))
def _get_deriv_key(self, base):
"""Given a base (representation instance), determine the unit of the
derivative by removing the representation unit from the component units
of this differential.
"""
# This check is just a last resort so we don't return a strange unit key
# from accidentally passing in the wrong base.
self._check_base(base)
for name in base.components:
comp = getattr(base, name)
d_comp = getattr(self, 'd_{0}'.format(name), None)
if d_comp is not None:
d_unit = comp.unit / d_comp.unit
# Get the si unit without a scale by going via Quantity;
# `.si` causes the scale to be included in the value.
return str(u.Quantity(1., d_unit).si.unit)
else:
raise RuntimeError("Invalid representation-differential match! Not "
"sure how we got into this state.")
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors()
def to_cartesian(self, base):
"""Convert the differential to 3D rectangular cartesian coordinates.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
This object as a `CartesianDifferential`
"""
base_e, base_sf = self._get_base_vectors(base)
return functools.reduce(
operator.add, (getattr(self, d_c) * base_sf[c] * base_e[c]
for d_c, c in zip(self.components, base.components)))
@classmethod
def from_cartesian(cls, other, base):
"""Convert the differential from 3D rectangular cartesian coordinates to
the desired class.
Parameters
----------
other :
The object to convert into this differential.
base : instance of ``self.base_representation``
The points for which the differentials are to be converted: each of
the components is multiplied by its unit vectors and scale factors.
Returns
-------
A new differential object that is this class' type.
"""
base_e, base_sf = cls._get_base_vectors(base)
return cls(*(other.dot(e / base_sf[component])
for component, e in base_e.items()), copy=False)
def represent_as(self, other_class, base):
"""Convert coordinates to another representation.
If the instance is of the requested class, it is returned unmodified.
By default, conversion is done via cartesian coordinates.
Parameters
----------
other_class : `~astropy.coordinates.BaseRepresentation` subclass
The type of representation to turn the coordinates into.
base : instance of ``self.base_representation``, optional
Base relative to which the differentials are defined. If the other
class is a differential representation, the base will be converted
to its ``base_representation``.
"""
if other_class is self.__class__:
return self
# The default is to convert via cartesian coordinates.
self_cartesian = self.to_cartesian(base)
if issubclass(other_class, BaseDifferential):
base = base.represent_as(other_class.base_representation)
return other_class.from_cartesian(self_cartesian, base)
else:
return other_class.from_cartesian(self_cartesian)
@classmethod
def from_representation(cls, representation, base):
"""Create a new instance of this representation from another one.
Parameters
----------
representation : `~astropy.coordinates.BaseRepresentation` instance
The presentation that should be converted to this class.
base : instance of ``cls.base_representation``
The base relative to which the differentials will be defined. If
the representation is a differential itself, the base will be
converted to its ``base_representation`` to help convert it.
"""
if isinstance(representation, BaseDifferential):
cartesian = representation.to_cartesian(
base.represent_as(representation.base_representation))
else:
cartesian = representation.to_cartesian()
return cls.from_cartesian(cartesian, base)
def _scale_operation(self, op, *args):
"""Scale all components.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.mul`, `~operator.neg`, etc.
*args
Any arguments required for the operator (typically, what is to
be multiplied with, divided by).
"""
scaled_attrs = [op(getattr(self, c), *args) for c in self.components]
return self.__class__(*scaled_attrs, copy=False)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If ``other`` is a representation,
it will be used as a base for which to evaluate the differential,
and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if isinstance(self, type(other)):
first, second = (self, other) if not reverse else (other, self)
return self.__class__(*[op(getattr(first, c), getattr(second, c))
for c in self.components])
else:
try:
self_cartesian = self.to_cartesian(other)
except TypeError:
return NotImplemented
return other._combine_operation(op, self_cartesian, not reverse)
def __sub__(self, other):
# avoid "differential - representation".
if isinstance(other, BaseRepresentation):
return NotImplemented
return super().__sub__(other)
def norm(self, base=None):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units.
Parameters
----------
base : instance of ``self.base_representation``
Base relative to which the differentials are defined. This is
required to calculate the physical size of the differential for
all but cartesian differentials.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return self.to_cartesian(base).norm()
class CartesianDifferential(BaseDifferential):
"""Differentials in of points in 3D cartesian coordinates.
Parameters
----------
d_x, d_y, d_z : `~astropy.units.Quantity` or array
The x, y, and z coordinates of the differentials. If ``d_x``, ``d_y``,
and ``d_z`` have different shapes, they should be broadcastable. If not
quantities, ``unit`` should be set. If only ``d_x`` is given, it is
assumed that it contains an array with the 3 coordinates stored along
``xyz_axis``.
unit : `~astropy.units.Unit` or str
If given, the differentials will be converted to this unit (or taken to
be in this unit if not given.
xyz_axis : int, optional
The axis along which the coordinates are stored when a single array is
provided instead of distinct ``d_x``, ``d_y``, and ``d_z`` (default: 0).
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CartesianRepresentation
_d_xyz = None
def __init__(self, d_x, d_y=None, d_z=None, unit=None, xyz_axis=None,
copy=True):
if d_y is None and d_z is None:
if isinstance(d_x, np.ndarray) and d_x.dtype.kind not in 'OV':
# Short-cut for 3-D array input.
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
# Keep a link to the array with all three coordinates
# so that we can return it quickly if needed in get_xyz.
self._d_xyz = d_x
if xyz_axis:
d_x = np.moveaxis(d_x, xyz_axis, 0)
self._xyz_axis = xyz_axis
else:
self._xyz_axis = 0
self._d_x, self._d_y, self._d_z = d_x
return
else:
d_x, d_y, d_z = d_x
if xyz_axis is not None:
raise ValueError("xyz_axis should only be set if d_x, d_y, and d_z "
"are in a single array passed in through d_x, "
"i.e., d_y and d_z should not be not given.")
if d_y is None or d_z is None:
raise ValueError("d_x, d_y, and d_z are required to instantiate {0}"
.format(self.__class__.__name__))
if unit is not None:
d_x = u.Quantity(d_x, unit, copy=copy, subok=True)
d_y = u.Quantity(d_y, unit, copy=copy, subok=True)
d_z = u.Quantity(d_z, unit, copy=copy, subok=True)
copy = False
super().__init__(d_x, d_y, d_z, copy=copy)
if not (self._d_x.unit.is_equivalent(self._d_y.unit) and
self._d_x.unit.is_equivalent(self._d_z.unit)):
raise u.UnitsError('d_x, d_y and d_z should have equivalent units.')
def to_cartesian(self, base=None):
return CartesianRepresentation(*[getattr(self, c) for c
in self.components])
@classmethod
def from_cartesian(cls, other, base=None):
return cls(*[getattr(other, c) for c in other.components])
def get_d_xyz(self, xyz_axis=0):
"""Return a vector array of the x, y, and z coordinates.
Parameters
----------
xyz_axis : int, optional
The axis in the final array along which the x, y, z components
should be stored (default: 0).
Returns
-------
d_xyz : `~astropy.units.Quantity`
With dimension 3 along ``xyz_axis``. Note that, if possible,
this will be a view.
"""
if self._d_xyz is not None:
if self._xyz_axis == xyz_axis:
return self._d_xyz
else:
return np.moveaxis(self._d_xyz, self._xyz_axis, xyz_axis)
# Create combined array. TO DO: keep it in _d_xyz for repeated use?
# But then in-place changes have to cancel it. Likely best to
# also update components.
return _combine_xyz(self._d_x, self._d_y, self._d_z, xyz_axis=xyz_axis)
d_xyz = property(get_d_xyz)
class BaseSphericalDifferential(BaseDifferential):
def _d_lon_coslat(self, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon * np.cos(base.lat)
@classmethod
def _get_d_lon(cls, d_lon_coslat, base):
"""Convert longitude differential d_lon_coslat to d_lon.
Parameters
----------
d_lon_coslat : `~astropy.units.Quantity`
Longitude differential that includes ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon_coslat / np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
@classproperty
def _dimensional_differential(cls):
return SphericalDifferential
def __init__(self, d_lon, d_lat, copy=True):
super().__init__(d_lon, d_lat, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, representation.d_lat)
elif isinstance(representation, (SphericalCosLatDifferential,
UnitSphericalCosLatDifferential)):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalDifferential(BaseSphericalDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon, d_lat : `~astropy.units.Quantity`
The differential longitude and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalDifferential
def __init__(self, d_lon, d_lat, d_distance, copy=True):
super().__init__(d_lon, d_lat, d_distance, copy=copy)
if not self._d_lon.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon and d_lat should have equivalent units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_lon, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat,
self.d_distance)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self._d_lon_coslat(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self.d_lon, -self.d_lat, self.d_distance)
else:
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
d_lon = cls._get_d_lon(representation.d_lon_coslat, base)
return cls(d_lon, representation.d_lat, representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_phi, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class BaseSphericalCosLatDifferential(BaseDifferential):
"""Differtials from points on a spherical base representation.
With cos(lat) assumed to be included in the longitude differential.
"""
@classmethod
def _get_base_vectors(cls, base):
"""Get unit vectors and scale factors from (unit)spherical base.
Parameters
----------
base : instance of ``self.base_representation``
The points for which the unit vectors and scale factors should be
retrieved.
Returns
-------
unit_vectors : dict of `CartesianRepresentation`
In the directions of the coordinates of base.
scale_factors : dict of `~astropy.units.Quantity`
Scale factors for each of the coordinates. The scale factor for
longitude does not include the cos(lat) factor.
Raises
------
TypeError : if the base is not of the correct type
"""
cls._check_base(base)
return base.unit_vectors(), base.scale_factors(omit_coslat=True)
def _d_lon(self, base):
"""Convert longitude differential with cos(lat) to one without.
Parameters
----------
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
self._check_base(base)
return self.d_lon_coslat / np.cos(base.lat)
@classmethod
def _get_d_lon_coslat(cls, d_lon, base):
"""Convert longitude differential d_lon to d_lon_coslat.
Parameters
----------
d_lon : `~astropy.units.Quantity`
Value of the longitude differential without ``cos(lat)``.
base : instance of ``cls.base_representation``
The base from which the latitude will be taken.
"""
cls._check_base(base)
return d_lon * np.cos(base.lat)
def _combine_operation(self, op, other, reverse=False):
"""Combine two differentials, or a differential with a representation.
If ``other`` is of the same differential type as ``self``, the
components will simply be combined. If both are different parts of
a `~astropy.coordinates.SphericalDifferential` (e.g., a
`~astropy.coordinates.UnitSphericalDifferential` and a
`~astropy.coordinates.RadialDifferential`), they will combined
appropriately.
If ``other`` is a representation, it will be used as a base for which
to evaluate the differential, and the result is a new representation.
Parameters
----------
op : `~operator` callable
Operator to apply (e.g., `~operator.add`, `~operator.sub`, etc.
other : `~astropy.coordinates.BaseRepresentation` instance
The other differential or representation.
reverse : bool
Whether the operands should be reversed (e.g., as we got here via
``self.__rsub__`` because ``self`` is a subclass of ``other``).
"""
if (isinstance(other, BaseSphericalCosLatDifferential) and
not isinstance(self, type(other)) or
isinstance(other, RadialDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalCosLatDifferential(**result_args)
return super()._combine_operation(op, other, reverse)
class UnitSphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points on a unit sphere.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The longitude and latitude of the differentials.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = UnitSphericalRepresentation
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity)])
@classproperty
def _dimensional_differential(cls):
return SphericalCosLatDifferential
def __init__(self, d_lon_coslat, d_lat, copy=True):
super().__init__(d_lon_coslat, d_lat, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def to_cartesian(self, base):
if isinstance(base, SphericalRepresentation):
scale = base.distance
elif isinstance(base, PhysicsSphericalRepresentation):
scale = base.r
else:
return super().to_cartesian(base)
base = base.represent_as(UnitSphericalRepresentation)
return scale * super().to_cartesian(base)
def represent_as(self, other_class, base=None):
# Only have enough information to represent other unit-spherical.
if issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# All spherical differentials can be done without going to Cartesian,
# though w/o CosLat needs base for the latitude.
if isinstance(representation, SphericalCosLatDifferential):
return cls(representation.d_lon_coslat, representation.d_lat)
elif isinstance(representation, (SphericalDifferential,
UnitSphericalDifferential)):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta)
return super().from_representation(representation, base)
class SphericalCosLatDifferential(BaseSphericalCosLatDifferential):
"""Differential(s) of points in 3D spherical coordinates.
Parameters
----------
d_lon_coslat, d_lat : `~astropy.units.Quantity`
The differential longitude (with cos(lat) included) and latitude.
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = SphericalRepresentation
_unit_differential = UnitSphericalCosLatDifferential
attr_classes = OrderedDict([('d_lon_coslat', u.Quantity),
('d_lat', u.Quantity),
('d_distance', u.Quantity)])
def __init__(self, d_lon_coslat, d_lat, d_distance, copy=True):
super().__init__(d_lon_coslat, d_lat, d_distance, copy=copy)
if not self._d_lon_coslat.unit.is_equivalent(self._d_lat.unit):
raise u.UnitsError('d_lon_coslat and d_lat should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though some need base for the latitude to remove cos(lat).
if issubclass(other_class, UnitSphericalCosLatDifferential):
return other_class(self.d_lon_coslat, self.d_lat)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_distance)
elif issubclass(other_class, SphericalDifferential):
return other_class(self._d_lon(base), self.d_lat, self.d_distance)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self._d_lon(base), self.d_lat)
elif issubclass(other_class, PhysicsSphericalDifferential):
return other_class(self._d_lon(base), -self.d_lat, self.d_distance)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat.
if isinstance(representation, SphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_lon, base)
return cls(d_lon_coslat, representation.d_lat,
representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
d_lon_coslat = cls._get_d_lon_coslat(representation.d_phi, base)
return cls(d_lon_coslat, -representation.d_theta,
representation.d_r)
return super().from_representation(representation, base)
class RadialDifferential(BaseDifferential):
"""Differential(s) of radial distances.
Parameters
----------
d_distance : `~astropy.units.Quantity`
The differential distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = RadialRepresentation
def to_cartesian(self, base):
return self.d_distance * base.represent_as(
UnitSphericalRepresentation).to_cartesian()
@classmethod
def from_cartesian(cls, other, base):
return cls(other.dot(base.represent_as(UnitSphericalRepresentation)),
copy=False)
@classmethod
def from_representation(cls, representation, base=None):
if isinstance(representation, (SphericalDifferential,
SphericalCosLatDifferential)):
return cls(representation.d_distance)
elif isinstance(representation, PhysicsSphericalDifferential):
return cls(representation.d_r)
else:
return super().from_representation(representation, base)
def _combine_operation(self, op, other, reverse=False):
if isinstance(other, self.base_representation):
if reverse:
first, second = other.distance, self.d_distance
else:
first, second = self.d_distance, other.distance
return other.__class__(op(first, second), copy=False)
elif isinstance(other, (BaseSphericalDifferential,
BaseSphericalCosLatDifferential)):
all_components = set(self.components) | set(other.components)
first, second = (self, other) if not reverse else (other, self)
result_args = {c: op(getattr(first, c, 0.), getattr(second, c, 0.))
for c in all_components}
return SphericalDifferential(**result_args)
else:
return super()._combine_operation(op, other, reverse)
class PhysicsSphericalDifferential(BaseDifferential):
"""Differential(s) of 3D spherical coordinates using physics convention.
Parameters
----------
d_phi, d_theta : `~astropy.units.Quantity`
The differential azimuth and inclination.
d_r : `~astropy.units.Quantity`
The differential radial distance.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = PhysicsSphericalRepresentation
def __init__(self, d_phi, d_theta, d_r, copy=True):
super().__init__(d_phi, d_theta, d_r, copy=copy)
if not self._d_phi.unit.is_equivalent(self._d_theta.unit):
raise u.UnitsError('d_phi and d_theta should have equivalent '
'units.')
def represent_as(self, other_class, base=None):
# All spherical differentials can be done without going to Cartesian,
# though CosLat needs base for the latitude. For those, explicitly
# do the equivalent of self._d_lon_coslat in SphericalDifferential.
if issubclass(other_class, SphericalDifferential):
return other_class(self.d_phi, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalDifferential):
return other_class(self.d_phi, -self.d_theta)
elif issubclass(other_class, SphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta, self.d_r)
elif issubclass(other_class, UnitSphericalCosLatDifferential):
self._check_base(base)
d_lon_coslat = self.d_phi * np.sin(base.theta)
return other_class(d_lon_coslat, -self.d_theta)
elif issubclass(other_class, RadialDifferential):
return other_class(self.d_r)
return super().represent_as(other_class, base)
@classmethod
def from_representation(cls, representation, base=None):
# Other spherical differentials can be done without going to Cartesian,
# though we need base for the latitude to remove coslat. For that case,
# do the equivalent of cls._d_lon in SphericalDifferential.
if isinstance(representation, SphericalDifferential):
return cls(representation.d_lon, -representation.d_lat,
representation.d_distance)
elif isinstance(representation, SphericalCosLatDifferential):
cls._check_base(base)
d_phi = representation.d_lon_coslat / np.sin(base.theta)
return cls(d_phi, -representation.d_lat, representation.d_distance)
return super().from_representation(representation, base)
class CylindricalDifferential(BaseDifferential):
"""Differential(s) of points in cylindrical coordinates.
Parameters
----------
d_rho : `~astropy.units.Quantity`
The differential cylindrical radius.
d_phi : `~astropy.units.Quantity`
The differential azimuth.
d_z : `~astropy.units.Quantity`
The differential height.
copy : bool, optional
If `True` (default), arrays will be copied rather than referenced.
"""
base_representation = CylindricalRepresentation
def __init__(self, d_rho, d_phi, d_z, copy=False):
super().__init__(d_rho, d_phi, d_z, copy=copy)
if not self._d_rho.unit.is_equivalent(self._d_z.unit):
raise u.UnitsError("d_rho and d_z should have equivalent units.")
|
a4a64ee3ffe012a896b3f591e969c7221f6b44ba44b7444c21fa31f4bd09e557 | """
Implements the wrapper for the Astropy test runner in the form of the
``./setup.py test`` distutils command.
"""
import os
import stat
import shutil
import subprocess
import sys
import tempfile
from distutils import log
from contextlib import contextmanager
from setuptools import Command
@contextmanager
def _suppress_stdout():
'''
A context manager to temporarily disable stdout.
Used later when installing a temporary copy of astropy to avoid a
very verbose output.
'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
class FixRemoteDataOption(type):
"""
This metaclass is used to catch cases where the user is running the tests
with --remote-data. We've now changed the --remote-data option so that it
takes arguments, but we still want --remote-data to work as before and to
enable all remote tests. With this metaclass, we can modify sys.argv
before distutils/setuptools try to parse the command-line options.
"""
def __init__(cls, name, bases, dct):
try:
idx = sys.argv.index('--remote-data')
except ValueError:
pass
else:
sys.argv[idx] = '--remote-data=any'
try:
idx = sys.argv.index('-R')
except ValueError:
pass
else:
sys.argv[idx] = '-R=any'
return super(FixRemoteDataOption, cls).__init__(name, bases, dct)
class AstropyTest(Command, metaclass=FixRemoteDataOption):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"Accepts comma separated string to specify multiple packages. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a .py file, '
'it is relative to the built package, so e.g., a leading "astropy/" '
'is necessary. If a relative path to a .rst file, it is relative to '
'the directory *below* the --docs-path directory, so a leading '
'"docs/" is usually necessary. May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data=', 'R', 'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open. Requires the '
'psutil package.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If "auto", all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files."),
('repeat=', None,
'How many times to repeat each test (can be used to check for '
'sporadic failures).'),
('temp-root=', None,
'The root directory in which to create the temporary testing files. '
'If unspecified the system default is used (e.g. /tmp) as explained '
'in the documentation for tempfile.mkstemp.'),
('verbose-install', None,
'Turn on terminal output from the installation of astropy in a '
'temporary folder.'),
('readonly', None,
'Make the temporary installation being tested read-only.')
]
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = 'none'
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
self.repeat = None
self.temp_root = None
self.verbose_install = False
self.readonly = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'add_local_eggs_to_path=True, ' # see _build_temp_install below
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
def run(self):
"""
Run the tests!
"""
# Install the runtime dependencies.
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
# Ensure there is a doc path
if self.docs_path is None:
cfg_docs_dir = self.distribution.get_option_dict('build_docs').get('source_dir', None)
# Some affiliated packages use this.
# See astropy/package-template#157
if cfg_docs_dir is not None and os.path.exists(cfg_docs_dir[1]):
self.docs_path = os.path.abspath(cfg_docs_dir[1])
# fall back on a default path of "docs"
elif os.path.exists('docs'): # pragma: no cover
self.docs_path = os.path.abspath('docs')
# Build a testing install of the package
self._build_temp_install()
# Install the test dependencies
# NOTE: we do this here after _build_temp_install because there is
# a weird but which occurs if psutil is installed in this way before
# astropy is built, Cython can have segmentation fault. Strange, eh?
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
# Copy any additional dependencies that may have been installed via
# tests_requires or install_requires. We then pass the
# add_local_eggs_to_path=True option to package.test() to make sure the
# eggs get included in the path.
if os.path.exists('.eggs'):
shutil.copytree('.eggs', os.path.join(self.testing_path, '.eggs'))
# This option exists so that we can make sure that the tests don't
# write to an installed location.
if self.readonly:
log.info('changing permissions of temporary installation to read-only')
self._change_permissions_testing_path(writable=False)
# Run everything in a try: finally: so that the tmp dir gets deleted.
try:
# Construct this modules testing command
cmd = self.generate_testing_command()
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
testproc = subprocess.Popen(
[sys.executable, '-c', cmd],
cwd=self.testing_path, close_fds=False)
retcode = testproc.wait()
except KeyboardInterrupt:
import signal
# If a keyboard interrupt is handled, pass it to the test
# subprocess to prompt pytest to initiate its teardown
testproc.send_signal(signal.SIGINT)
retcode = testproc.wait()
finally:
# Remove temporary directory
if self.readonly:
self._change_permissions_testing_path(writable=True)
shutil.rmtree(self.tmp_dir)
raise SystemExit(retcode)
def _build_temp_install(self):
"""
Install the package and to a temporary directory for the purposes of
testing. This allows us to test the install command, include the
entry points, and also avoids creating pyc and __pycache__ directories
inside the build directory
"""
# On OSX the default path for temp files is under /var, but in most
# cases on OSX /var is actually a symlink to /private/var; ensure we
# dereference that link, because py.test is very sensitive to relative
# paths...
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-',
dir=self.temp_root)
self.tmp_dir = os.path.realpath(tmp_dir)
log.info('installing to temporary directory: {0}'.format(self.tmp_dir))
# We now install the package to the temporary directory. We do this
# rather than build and copy because this will ensure that e.g. entry
# points work.
self.reinitialize_command('install')
install_cmd = self.distribution.get_command_obj('install')
install_cmd.prefix = self.tmp_dir
if self.verbose_install:
self.run_command('install')
else:
with _suppress_stdout():
self.run_command('install')
# We now get the path to the site-packages directory that was created
# inside self.tmp_dir
install_cmd = self.get_finalized_command('install')
self.testing_path = install_cmd.install_lib
# Ideally, docs_path is set properly in run(), but if it is still
# not set here, do not pretend it is, otherwise bad things happen.
# See astropy/package-template#157
if self.docs_path is not None:
new_docs_path = os.path.join(self.testing_path,
os.path.basename(self.docs_path))
shutil.copytree(self.docs_path, new_docs_path)
self.docs_path = new_docs_path
shutil.copy('setup.cfg', self.testing_path)
def _change_permissions_testing_path(self, writable=False):
if writable:
basic_flags = stat.S_IRUSR | stat.S_IWUSR
else:
basic_flags = stat.S_IRUSR
for root, dirs, files in os.walk(self.testing_path):
for dirname in dirs:
os.chmod(os.path.join(root, dirname), basic_flags | stat.S_IXUSR)
for filename in files:
os.chmod(os.path.join(root, filename), basic_flags)
def _generate_coverage_commands(self):
"""
This method creates the post and pre commands if coverage is to be
generated
"""
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage # pylint: disable=W0611
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
self.testing_path, self.package_name.replace('.', '/'),
'tests', 'coveragerc')
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
coveragerc_content = coveragerc_content.replace(
"{packagename}", self.package_name.replace('.', '/'))
tmp_coveragerc = os.path.join(self.tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file="{0}", config_file="{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), tmp_coveragerc))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, "{0}", "{1}");'.format(
os.path.abspath('.'), self.testing_path))
return cmd_pre, cmd_post
|
ae502a9d1a649a63cce0d35da0dc426f90beb272cfeedf887d7b5703d844cb68 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
try:
import h5py # pylint: disable=W0611
except ImportError:
HAS_H5PY = False
else:
HAS_H5PY = True
try:
import yaml # pylint: disable=W0611
HAS_YAML = True
except ImportError:
HAS_YAML = False
import copy
import pickle
from io import StringIO
import pytest
import numpy as np
from ...coordinates import EarthLocation
from ...table import Table, QTable, join, hstack, vstack, Column, NdarrayMixin
from ...table import serialize
from ... import time
from ... import coordinates
from ... import units as u
from ..column import BaseColumn
from .. import table_helpers
from .conftest import MIXIN_COLS
def test_attributes(mixin_cols):
"""
Required attributes for a column can be set.
"""
m = mixin_cols['m']
m.info.name = 'a'
assert m.info.name == 'a'
m.info.description = 'a'
assert m.info.description == 'a'
# Cannot set unit for these classes
if isinstance(m, (u.Quantity, coordinates.SkyCoord, time.Time)):
with pytest.raises(AttributeError):
m.info.unit = u.m
else:
m.info.unit = u.m
assert m.info.unit is u.m
m.info.format = 'a'
assert m.info.format == 'a'
m.info.meta = {'a': 1}
assert m.info.meta == {'a': 1}
with pytest.raises(AttributeError):
m.info.bad_attr = 1
with pytest.raises(AttributeError):
m.info.bad_attr
def check_mixin_type(table, table_col, in_col):
# We check for QuantityInfo rather than just isinstance(col, u.Quantity)
# since we want to treat EarthLocation as a mixin, even though it is
# a Quantity subclass.
if ((isinstance(in_col.info, u.QuantityInfo) and type(table) is not QTable)
or isinstance(in_col, Column)):
assert type(table_col) is table.ColumnClass
else:
assert type(table_col) is type(in_col)
# Make sure in_col got copied and creating table did not touch it
assert in_col.info.name is None
def test_make_table(table_types, mixin_cols):
"""
Make a table with the columns in mixin_cols, which is an ordered dict of
three cols: 'a' and 'b' are table_types.Column type, and 'm' is a mixin.
"""
t = table_types.Table(mixin_cols)
check_mixin_type(t, t['m'], mixin_cols['m'])
cols = list(mixin_cols.values())
t = table_types.Table(cols, names=('i', 'a', 'b', 'm'))
check_mixin_type(t, t['m'], mixin_cols['m'])
t = table_types.Table(cols)
check_mixin_type(t, t['col3'], mixin_cols['m'])
def test_io_ascii_write():
"""
Test that table with mixin column can be written by io.ascii for
every pure Python writer. No validation of the output is done,
this just confirms no exceptions.
"""
from ...io.ascii.connect import _get_connectors_table
t = QTable(MIXIN_COLS)
for fmt in _get_connectors_table():
if fmt['Format'] == 'ascii.ecsv' and not HAS_YAML:
continue
if fmt['Write'] and '.fast_' not in fmt['Format']:
out = StringIO()
t.write(out, format=fmt['Format'])
def test_votable_quantity_write(tmpdir):
"""
Test that table with Quantity mixin column can be round-tripped by
io.votable. Note that FITS and HDF5 mixin support are tested (much more
thoroughly) in their respective subpackage tests
(io/fits/tests/test_connect.py and io/misc/tests/test_hdf5.py).
"""
t = QTable()
t['a'] = u.Quantity([1, 2, 4], unit='Angstrom')
filename = str(tmpdir.join('table-tmp'))
t.write(filename, format='votable', overwrite=True)
qt = QTable.read(filename, format='votable')
assert isinstance(qt['a'], u.Quantity)
assert qt['a'].unit == 'Angstrom'
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_standard(tmpdir, table_types):
"""
Test that table with Time mixin columns can be written by io.fits.
Validation of the output is done. Test that io.fits writes a table
containing Time mixin columns that can be partially round-tripped
(metadata scale, location).
Note that we postpone checking the "local" scale, since that cannot
be done with format 'cxcsec', as it requires an epoch.
"""
t = table_types([[1, 2], ['string', 'column']])
for scale in time.STANDARD_TIME_SCALES:
t['a'+scale] = time.Time([[1, 2], [3, 4]], format='cxcsec',
scale=scale, location=EarthLocation(
-2446354, 4237210, 4077985, unit='m'))
t['b'+scale] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale=scale)
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for scale in time.STANDARD_TIME_SCALES:
for ab in ('a', 'b'):
name = ab + scale
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_write_fits_local(tmpdir, table_types):
"""
Test that table with a Time mixin with scale local can also be written
by io.fits. Like ``test_io_time_write_fits_standard`` above, but avoiding
``cxcsec`` format, which requires an epoch and thus cannot be used for a
local time scale.
"""
t = table_types([[1, 2], ['string', 'column']])
t['a_local'] = time.Time([[50001, 50002], [50003, 50004]],
format='mjd', scale='local',
location=EarthLocation(-2446354, 4237210, 4077985,
unit='m'))
t['b_local'] = time.Time(['1999-01-01T00:00:00.123456789',
'2010-01-01T00:00:00'], scale='local')
t['c'] = [3., 4.]
filename = str(tmpdir.join('table-tmp'))
# Show that FITS format succeeds
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits', astropy_native=True)
for ab in ('a', 'b'):
name = ab + '_local'
# Assert that the time columns are read as Time
assert isinstance(tm[name], time.Time)
# Assert that the scales round-trip
assert tm[name].scale == t[name].scale
# Assert that the format is jd
assert tm[name].format == 'jd'
# Assert that the location round-trips
assert tm[name].location == t[name].location
# Finally assert that the column data round-trips
assert (tm[name] == t[name]).all()
for name in ('col0', 'col1', 'c'):
# Assert that the non-time columns are read as Column
assert isinstance(tm[name], Column)
# Assert that the non-time columns' data round-trips
assert (tm[name] == t[name]).all()
# Test for conversion of time data to its value, as defined by its format.
for ab in ('a', 'b'):
name = ab + '_local'
t[name].info.serialize_method['fits'] = 'formatted_value'
t.write(filename, format='fits', overwrite=True)
tm = table_types.read(filename, format='fits')
for ab in ('a', 'b'):
name = ab + '_local'
assert not isinstance(tm[name], time.Time)
assert (tm[name] == t[name].value).all()
def test_votable_mixin_write_fail(mixin_cols):
"""
Test that table with mixin columns (excluding Quantity) cannot be written by
io.votable.
"""
t = QTable(mixin_cols)
# Only do this test if there are unsupported column types (i.e. anything besides
# BaseColumn and Quantity class instances).
unsupported_cols = t.columns.not_isinstance((BaseColumn, u.Quantity))
if not unsupported_cols:
pytest.skip("no unsupported column types")
out = StringIO()
with pytest.raises(ValueError) as err:
t.write(out, format='votable')
assert 'cannot write table with mixin column(s)' in str(err.value)
def test_join(table_types):
"""
Join tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['a'] = table_types.Column(['a', 'b', 'b', 'c'])
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t2 = table_types.Table(t1)
t2['a'] = ['b', 'c', 'a', 'd']
for name, col in MIXIN_COLS.items():
t1[name].info.description = name
t2[name].info.description = name + '2'
for join_type in ('inner', 'left'):
t12 = join(t1, t2, keys='a', join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
assert t12[name1].info.description == name
assert t12[name2].info.description == name + '2'
for join_type in ('outer', 'right'):
with pytest.raises(NotImplementedError) as exc:
t12 = join(t1, t2, keys='a', join_type=join_type)
assert 'join requires masking column' in str(exc.value)
with pytest.raises(ValueError) as exc:
t12 = join(t1, t2, keys=['a', 'skycoord'])
assert 'not allowed as a key column' in str(exc.value)
# Join does work for a mixin which is a subclass of np.ndarray
t12 = join(t1, t2, keys=['quantity'])
assert np.all(t12['a_1'] == t1['a'])
def test_hstack(table_types):
"""
Hstack tables with mixin cols. Use column "i" as proxy for what the
result should be for each mixin.
"""
t1 = table_types.Table()
t1['i'] = table_types.Column([0, 1, 2, 3])
for name, col in MIXIN_COLS.items():
t1[name] = col
t1[name].info.description = name
t1[name].info.meta = {'a': 1}
for join_type in ('inner', 'outer'):
for chop in (True, False):
t2 = table_types.Table(t1)
if chop:
t2 = t2[:-1]
if join_type == 'outer':
with pytest.raises(NotImplementedError) as exc:
t12 = hstack([t1, t2], join_type=join_type)
assert 'hstack requires masking column' in str(exc.value)
continue
t12 = hstack([t1, t2], join_type=join_type)
idx1 = t12['i_1']
idx2 = t12['i_2']
for name, col in MIXIN_COLS.items():
name1 = name + '_1'
name2 = name + '_2'
assert_table_name_col_equal(t12, name1, col[idx1])
assert_table_name_col_equal(t12, name2, col[idx2])
for attr in ('description', 'meta'):
assert getattr(t1[name].info, attr) == getattr(t12[name1].info, attr)
assert getattr(t2[name].info, attr) == getattr(t12[name2].info, attr)
def assert_table_name_col_equal(t, name, col):
"""
Assert all(t[name] == col), with special handling for known mixin cols.
"""
if isinstance(col, coordinates.SkyCoord):
assert np.all(t[name].ra == col.ra)
assert np.all(t[name].dec == col.dec)
elif isinstance(col, u.Quantity):
if type(t) is QTable:
assert np.all(t[name] == col)
elif isinstance(col, table_helpers.ArrayWrapper):
assert np.all(t[name].data == col.data)
else:
assert np.all(t[name] == col)
def test_get_items(mixin_cols):
"""
Test that slicing / indexing table gives right values and col attrs inherit
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
for item in ([1, 3], np.array([0, 2]), slice(1, 3)):
t2 = t[item]
m2 = m[item]
assert_table_name_col_equal(t2, 'm', m[item])
for attr in attrs:
assert getattr(t2['m'].info, attr) == getattr(m.info, attr)
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_info_preserved_pickle_copy_init(mixin_cols):
"""
Test copy, pickle, and init from class roundtrip preserve info. This
tests not only the mixin classes but a regular column as well.
"""
def pickle_roundtrip(c):
return pickle.loads(pickle.dumps(c))
def init_from_class(c):
return c.__class__(c)
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
for colname in ('i', 'm'):
m = mixin_cols[colname]
m.info.name = colname
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
for func in (copy.copy, copy.deepcopy, pickle_roundtrip, init_from_class):
m2 = func(m)
for attr in attrs:
assert getattr(m2.info, attr) == getattr(m.info, attr)
def test_add_column(mixin_cols):
"""
Test that adding a column preserves values and attributes
"""
attrs = ('name', 'unit', 'dtype', 'format', 'description', 'meta')
m = mixin_cols['m']
assert m.info.name is None
# Make sure adding column in various ways doesn't touch
t = QTable([m], names=['a'])
assert m.info.name is None
t['new'] = m
assert m.info.name is None
m.info.name = 'm'
m.info.format = '{0}'
m.info.description = 'd'
m.info.meta = {'a': 1}
t = QTable([m])
# Add columns m2, m3, m4 by two different methods and test expected equality
t['m2'] = m
m.info.name = 'm3'
t.add_columns([m], copy=True)
m.info.name = 'm4'
t.add_columns([m], copy=False)
for name in ('m2', 'm3', 'm4'):
assert_table_name_col_equal(t, name, m)
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t[name].info, attr)
# Also check that one can set using a scalar.
s = m[0]
if type(s) is type(m):
# We're not going to worry about testing classes for which scalars
# are a different class than the real array (and thus loose info, etc.)
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
# While we're add it, also check a length-1 table.
t = QTable([m[1:2]], names=['m'])
if type(s) is type(m):
t['s'] = m[0]
assert_table_name_col_equal(t, 's', m[0])
for attr in attrs:
if attr != 'name':
assert getattr(t['m'].info, attr) == getattr(t['s'].info, attr)
def test_vstack():
"""
Vstack tables with mixin cols.
"""
t1 = QTable(MIXIN_COLS)
t2 = QTable(MIXIN_COLS)
with pytest.raises(NotImplementedError):
vstack([t1, t2])
def test_insert_row(mixin_cols):
"""
Test inserting a row, which only works for BaseColumn and Quantity
"""
t = QTable(mixin_cols)
t['m'].info.description = 'd'
if isinstance(t['m'], (u.Quantity, Column)):
t.insert_row(1, t[-1])
assert t[1] == t[-1]
assert t['m'].info.description == 'd'
else:
with pytest.raises(ValueError) as exc:
t.insert_row(1, t[-1])
assert "Unable to insert row" in str(exc.value)
def test_insert_row_bad_unit():
"""
Insert a row into a QTable with the wrong unit
"""
t = QTable([[1] * u.m])
with pytest.raises(ValueError) as exc:
t.insert_row(0, (2 * u.m / u.s,))
assert "'m / s' (speed) and 'm' (length) are not convertible" in str(exc.value)
def test_convert_np_array(mixin_cols):
"""
Test that converting to numpy array creates an object dtype and that
each instance in the array has the expected type.
"""
t = QTable(mixin_cols)
ta = t.as_array()
m = mixin_cols['m']
dtype_kind = m.dtype.kind if hasattr(m, 'dtype') else 'O'
assert ta['m'].dtype.kind == dtype_kind
def test_assignment_and_copy():
"""
Test that assignment of an int, slice, and fancy index works.
Along the way test that copying table works.
"""
for name in ('quantity', 'arraywrap'):
m = MIXIN_COLS[name]
t0 = QTable([m], names=['m'])
for i0, i1 in ((1, 2),
(slice(0, 2), slice(1, 3)),
(np.array([1, 2]), np.array([2, 3]))):
t = t0.copy()
t['m'][i0] = m[i1]
if name == 'arraywrap':
assert np.all(t['m'].data[i0] == m.data[i1])
assert np.all(t0['m'].data[i0] == m.data[i0])
assert np.all(t0['m'].data[i0] != t['m'].data[i0])
else:
assert np.all(t['m'][i0] == m[i1])
assert np.all(t0['m'][i0] == m[i0])
assert np.all(t0['m'][i0] != t['m'][i0])
def test_grouping():
"""
Test grouping with mixin columns. Raises not yet implemented error.
"""
t = QTable(MIXIN_COLS)
t['index'] = ['a', 'b', 'b', 'c']
with pytest.raises(NotImplementedError):
t.group_by('index')
def test_conversion_qtable_table():
"""
Test that a table round trips from QTable => Table => QTable
"""
qt = QTable(MIXIN_COLS)
names = qt.colnames
for name in names:
qt[name].info.description = name
t = Table(qt)
for name in names:
assert t[name].info.description == name
if name == 'quantity':
assert np.all(t['quantity'] == qt['quantity'].value)
assert np.all(t['quantity'].unit is qt['quantity'].unit)
assert isinstance(t['quantity'], t.ColumnClass)
else:
assert_table_name_col_equal(t, name, qt[name])
qt2 = QTable(qt)
for name in names:
assert qt2[name].info.description == name
assert_table_name_col_equal(qt2, name, qt[name])
def test_setitem_as_column_name():
"""
Test for mixin-related regression described in #3321.
"""
t = Table()
t['a'] = ['x', 'y']
t['b'] = 'b' # Previously was failing with KeyError
assert np.all(t['a'] == ['x', 'y'])
assert np.all(t['b'] == ['b', 'b'])
def test_quantity_representation():
"""
Test that table representation of quantities does not have unit
"""
t = QTable([[1, 2] * u.m])
assert t.pformat() == ['col0',
' m ',
'----',
' 1.0',
' 2.0']
def test_skycoord_representation():
"""
Test that skycoord representation works, both in the way that the
values are output and in changing the frame representation.
"""
# With no unit we get "None" in the unit row
c = coordinates.SkyCoord([0], [1], [0], representation='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
'None,None,None',
'--------------',
' 0.0,1.0,0.0']
# Test that info works with a dynamically changed representation
c = coordinates.SkyCoord([0], [1], [0], unit='m', representation='cartesian')
t = Table([c])
assert t.pformat() == [' col0 ',
' m,m,m ',
'-----------',
'0.0,1.0,0.0']
t['col0'].representation = 'unitspherical'
assert t.pformat() == [' col0 ',
'deg,deg ',
'--------',
'90.0,0.0']
t['col0'].representation = 'cylindrical'
assert t.pformat() == [' col0 ',
' m,deg,m ',
'------------',
'1.0,90.0,0.0']
def test_ndarray_mixin():
"""
Test directly adding a plain structured array into a table instead of the
view as an NdarrayMixin. Once added as an NdarrayMixin then all the previous
tests apply.
"""
a = np.array([(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')],
dtype='<i4,' + ('|U1'))
b = np.array([(10, 'aa'), (20, 'bb'), (30, 'cc'), (40, 'dd')],
dtype=[('x', 'i4'), ('y', ('U2'))])
c = np.rec.fromrecords([(100, 'raa'), (200, 'rbb'), (300, 'rcc'), (400, 'rdd')],
names=['rx', 'ry'])
d = np.arange(8).reshape(4, 2).view(NdarrayMixin)
# Add one during initialization and the next as a new column.
t = Table([a], names=['a'])
t['b'] = b
t['c'] = c
t['d'] = d
assert isinstance(t['a'], NdarrayMixin)
assert t['a'][1][1] == a[1][1]
assert t['a'][2][0] == a[2][0]
assert t[1]['a'][1] == a[1][1]
assert t[2]['a'][0] == a[2][0]
assert isinstance(t['b'], NdarrayMixin)
assert t['b'][1]['x'] == b[1]['x']
assert t['b'][1]['y'] == b[1]['y']
assert t[1]['b']['x'] == b[1]['x']
assert t[1]['b']['y'] == b[1]['y']
assert isinstance(t['c'], NdarrayMixin)
assert t['c'][1]['rx'] == c[1]['rx']
assert t['c'][1]['ry'] == c[1]['ry']
assert t[1]['c']['rx'] == c[1]['rx']
assert t[1]['c']['ry'] == c[1]['ry']
assert isinstance(t['d'], NdarrayMixin)
assert t['d'][1][0] == d[1][0]
assert t['d'][1][1] == d[1][1]
assert t[1]['d'][0] == d[1][0]
assert t[1]['d'][1] == d[1][1]
assert t.pformat() == [' a b c d [2] ',
'-------- ---------- ------------ ------',
"(1, 'a') (10, 'aa') (100, 'raa') 0 .. 1",
"(2, 'b') (20, 'bb') (200, 'rbb') 2 .. 3",
"(3, 'c') (30, 'cc') (300, 'rcc') 4 .. 5",
"(4, 'd') (40, 'dd') (400, 'rdd') 6 .. 7"]
def test_possible_string_format_functions():
"""
The QuantityInfo info class for Quantity implements a
possible_string_format_functions() method that overrides the
standard pprint._possible_string_format_functions() function.
Test this.
"""
t = QTable([[1, 2] * u.m])
t['col0'].info.format = '%.3f'
assert t.pformat() == [' col0',
' m ',
'-----',
'1.000',
'2.000']
t['col0'].info.format = 'hi {:.3f}'
assert t.pformat() == [' col0 ',
' m ',
'--------',
'hi 1.000',
'hi 2.000']
t['col0'].info.format = '.4f'
assert t.pformat() == [' col0 ',
' m ',
'------',
'1.0000',
'2.0000']
def test_rename_mixin_columns(mixin_cols):
"""
Rename a mixin column.
"""
t = QTable(mixin_cols)
tc = t.copy()
t.rename_column('m', 'mm')
assert t.colnames == ['i', 'a', 'b', 'mm']
if isinstance(t['mm'], table_helpers.ArrayWrapper):
assert np.all(t['mm'].data == tc['m'].data)
elif isinstance(t['mm'], coordinates.SkyCoord):
assert np.all(t['mm'].ra == tc['m'].ra)
assert np.all(t['mm'].dec == tc['m'].dec)
else:
assert np.all(t['mm'] == tc['m'])
def test_represent_mixins_as_columns_unit_fix():
"""
If the unit is invalid for a column that gets serialized this would
cause an exception. Fixed in #7481.
"""
t = Table({'a': [1, 2]}, masked=True)
t['a'].unit = 'not a valid unit'
t['a'].mask[1] = True
serialize._represent_mixins_as_columns(t)
|
804d46dd8535ff2d09d67a08def6b8d05c8931b62df52f5eefd9789d5caaadd6 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
import numpy.ma as ma
from ..convolve import convolve, convolve_fft
from numpy.testing import assert_array_almost_equal_nulp, assert_array_almost_equal
import itertools
VALID_DTYPES = ('>f4', '<f4', '>f8', '<f8')
VALID_DTYPE_MATRIX = list(itertools.product(VALID_DTYPES, VALID_DTYPES))
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
NANHANDLING_OPTIONS = ['interpolate', 'fill']
NORMALIZE_OPTIONS = [True, False]
PRESERVE_NAN_OPTIONS = [True, False]
BOUNDARIES_AND_CONVOLUTIONS = (list(zip(itertools.cycle((convolve,)),
BOUNDARY_OPTIONS)) + [(convolve_fft,
'wrap'),
(convolve_fft,
'fill')])
HAS_SCIPY = True
try:
import scipy
except ImportError:
HAS_SCIPY = False
HAS_PANDAS = True
try:
import pandas
except ImportError:
HAS_PANDAS = False
class TestConvolve1D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [1, 4, 5, 6, 5, 7, 8]
y = [0.2, 0.6, 0.2]
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
def test_tuple(self):
"""
Test that convolve works correctly when inputs are tuples
"""
x = (1, 4, 5, 6, 5, 7, 8)
y = (0.2, 0.6, 0.2)
z = convolve(x, y, boundary=None)
assert_array_almost_equal_nulp(z,
np.array([0., 3.6, 5., 5.6, 5.6, 6.8, 0.]), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve works correctly when inputs are lists
"""
array = [1., 4., 5., 6., 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
assert np.all(np.array(array, dtype=dtype) == x)
assert np.all(np.array(kernel, dtype=dtype) == y)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan', 'dtype'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS,
VALID_DTYPES))
def test_input_unmodified_with_nan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan, dtype):
"""
Test that convolve doesn't modify the input data
"""
array = [1., 4., 5., np.nan, 5., 7., 8.]
kernel = [0.2, 0.6, 0.2]
x = np.array(array, dtype=dtype)
y = np.array(kernel, dtype=dtype)
# Make pseudoimmutable
x.flags.writeable = False
y.flags.writeable = False
# make copies for post call comparison
x_copy = x.copy()
y_copy = y.copy()
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel, preserve_nan=preserve_nan)
# ( NaN == NaN ) = False
# Only compare non NaN values for canonical equivilance
# and then check NaN explicitly with np.isnan()
array_is_nan = np.isnan(array)
kernel_is_nan = np.isnan(kernel)
array_not_nan = ~array_is_nan
kernel_not_nan = ~kernel_is_nan
assert np.all(x_copy[array_not_nan] == x[array_not_nan])
assert np.all(y_copy[kernel_not_nan] == y[kernel_not_nan])
assert np.all(np.isnan(x[array_is_nan]))
assert np.all(np.isnan(y[kernel_is_nan]))
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([1., 2., 3.], dtype=dtype_array)
y = np.array([0., 1., 0.], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('convfunc', 'boundary',), BOUNDARIES_AND_CONVOLUTIONS)
def test_unity_1_none(self, boundary, convfunc):
'''
Test that a unit kernel with a single element returns the same array
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([1.], dtype='>f8')
z = convfunc(x, y, boundary=boundary)
np.testing.assert_allclose(z, x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3(self, boundary):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None).
'''
x = np.array([1., 2., 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([0., 2., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3(self, boundary):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements
'''
x = np.array([1., 0., 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert np.all(z == np.array([0., 4., 0.], dtype='>f8'))
elif boundary == 'fill':
assert np.all(z == np.array([1., 4., 3.], dtype='>f8'))
elif boundary == 'wrap':
assert np.all(z == np.array([4., 4., 4.], dtype='>f8'))
else:
assert np.all(z == np.array([2., 4., 6.], dtype='>f8'))
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary', 'nan_treatment',
'normalize_kernel', 'preserve_nan'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS,
NORMALIZE_OPTIONS,
PRESERVE_NAN_OPTIONS))
def test_uniform_3_withnan(self, boundary, nan_treatment, normalize_kernel,
preserve_nan):
'''
Test that the different modes are producing the correct results using
a uniform kernel with three elements. This version includes a NaN
value in the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([1., 1., 1.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
z = np.nan_to_num(z)
# boundary, nan_treatment, normalize_kernel
rslt = {
(None, 'interpolate', True): [0, 2, 0],
(None, 'interpolate', False): [0, 6, 0],
(None, 'fill', True): [0, 4/3., 0],
(None, 'fill', False): [0, 4, 0],
('fill', 'interpolate', True): [1/2., 2, 3/2.],
('fill', 'interpolate', False): [3/2., 6, 9/2.],
('fill', 'fill', True): [1/3., 4/3., 3/3.],
('fill', 'fill', False): [1, 4, 3],
('wrap', 'interpolate', True): [2, 2, 2],
('wrap', 'interpolate', False): [6, 6, 6],
('wrap', 'fill', True): [4/3., 4/3., 4/3.],
('wrap', 'fill', False): [4, 4, 4],
('extend', 'interpolate', True): [1, 2, 3],
('extend', 'interpolate', False): [3, 6, 9],
('extend', 'fill', True): [2/3., 4/3., 6/3.],
('extend', 'fill', False): [2, 4, 6],
}[boundary, nan_treatment, normalize_kernel]
if preserve_nan:
rslt[1] = 0
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_zero_sum_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with zero sum kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = [-1, -1, -1, -1, 8, -1, -1, -1, -1]
assert(np.isclose(sum(y), 0, atol=1e-8))
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 0., 0., 0., 0., 0.],
('fill'): [-6., -3., -1., 0., 0., 10., 21., 33., 46.],
('wrap'): [-36., -27., -18., -9., 0., 9., 18., 27., 36.],
('extend'): [-10., -6., -3., -1., 0., 1., 3., 6., 10.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'normalize_kernel'),
itertools.product(BOUNDARY_OPTIONS,
NORMALIZE_OPTIONS))
def test_int_masked_kernel(self, boundary, normalize_kernel):
"""
Test that convolve works correctly with integer masked kernels.
"""
if normalize_kernel:
pytest.xfail("You can't normalize by a zero sum kernel")
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = ma.array([-1, -1, -1, -1, 8, -1, -1, -1, -1], mask=[1, 0, 0, 0, 0, 0, 0, 0, 0], fill_value=0.)
z = convolve(x, y, boundary=boundary, normalize_kernel=normalize_kernel)
# boundary, normalize_kernel == False
rslt = {
(None): [0., 0., 0., 0., 9., 0., 0., 0., 0.],
('fill'): [-1., 3., 6., 8., 9., 10., 21., 33., 46.],
('wrap'): [-31., -21., -11., -1., 9., 10., 20., 30., 40.],
('extend'): [-5., 0., 4., 7., 9., 10., 12., 15., 19.]
}[boundary]
assert_array_almost_equal_nulp(z, np.array(rslt, dtype='>f8'), 10)
class TestConvolve2D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=True)
assert_array_almost_equal_nulp(z, x, 10)
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z, np.array(x, float)*9, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1_none(self, boundary):
'''
Test that a 1x1 unit kernel returns the same array
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 5., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[0., 0., 3.],
[1., 0., 0.],
[0., 2., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 6., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 4., 3.],
[3., 6., 5.],
[3., 3., 2.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[6., 6., 6.],
[6., 6., 6.],
[6., 6., 6.]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[2., 7., 12.],
[4., 6., 8.],
[6., 5., 4.]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnanfilled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 8., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., 5., 4.],
[4., 8., 7.],
[4., 4., 3.]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[8., 8., 8.],
[8., 8., 8.],
[8., 8., 8.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., 9., 16.],
[5., 8., 11.],
[8., 7., 6.]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3_withnaninterped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[0., 0., 4.],
[1., np.nan, 0.],
[0., 3., 0.]], dtype='>f8')
y = np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1./8, 5./8, 4./8],
[4./8, 8./8, 7./8],
[4./8, 4./8, 3./8]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2./8, 9./8, 16./8],
[5./8, 8./8, 11./8],
[8./8, 7./8, 6./8]], dtype='>f8'), 10)
else:
raise ValueError("Invalid boundary specification")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_non_normalized_kernel_2D(self, boundary):
x = np.array([[0., 0., 4.],
[1., 2., 0.],
[0., 3., 0.]], dtype='float')
y = np.array([[1., -1., 1.],
[-1., 0., -1.],
[1., -1., 1.]], dtype='float')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='float'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[1., -5., 2.],
[1., 0., -3.],
[-2., -1., -1.]], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[0., -8., 6.],
[5., 0., -4.],
[2., 3., -4.]], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[2., -1., -2.],
[0., 0., 1.],
[2., -4., 2.]], dtype='float'), 10)
else:
raise ValueError("Invalid boundary specification")
class TestConvolve3D:
def test_list(self):
"""
Test that convolve works correctly when inputs are lists
"""
x = [[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]
z = convolve(x, x, boundary='fill', fill_value=1, normalize_kernel=False)
assert_array_almost_equal_nulp(z / 27, x, 10)
@pytest.mark.parametrize(('dtype_array', 'dtype_kernel'), VALID_DTYPE_MATRIX)
def test_dtype(self, dtype_array, dtype_kernel):
'''
Test that 32- and 64-bit floats are correctly handled
'''
x = np.array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=dtype_array)
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype=dtype_kernel)
z = convolve(x, y)
assert x.dtype == z.dtype
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_1x1x1_none(self, boundary):
'''
Test that a 1x1x1 unit kernel returns the same array
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 0., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.array([[[1.]]], dtype='>f8')
z = convolve(x, y, boundary=boundary)
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_unity_3x3x3(self, boundary):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None).
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 3., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., 3., 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 81., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[23., 28., 16.], [35., 46., 25.], [25., 34., 18.]],
[[40., 50., 23.], [63., 81., 36.], [46., 60., 27.]],
[[32., 40., 16.], [50., 61., 22.], [36., 44., 16.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]],
[[81., 81., 81.], [81., 81., 81.], [81., 81., 81.]]], dtype='>f8'), 10)
else:
assert_array_almost_equal_nulp(z, np.array([[[65., 54., 43.], [75., 66., 57.], [85., 78., 71.]],
[[96., 71., 46.], [108., 81., 54.], [120., 91., 62.]],
[[127., 88., 49.], [141., 96., 51.], [155., 104., 53.]]], dtype='>f8'), 10)
@pytest.mark.parametrize(('boundary', 'nan_treatment'),
itertools.product(BOUNDARY_OPTIONS,
NANHANDLING_OPTIONS))
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_filled(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
normalize_kernel=False)
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'), 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]], dtype='>f8'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]],
[[78., 78., 78.], [78., 78., 78.], [78., 78., 78.]]], dtype='>f8'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8'), 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_uniform_3x3x3_withnan_interped(self, boundary):
'''
Test that the different modes are producing the correct results using
a 3x3 uniform kernel. This version includes a NaN value in the
original array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.ones((3, 3, 3), dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='interpolate',
normalize_kernel=True)
kernsum = y.sum() - 1 # one nan is missing
mid = x[np.isfinite(x)].sum() / kernsum
if boundary is None:
assert_array_almost_equal_nulp(z, np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 78., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([[[20., 25., 13.],
[32., 43., 22.],
[22., 31., 15.]],
[[37., 47., 20.],
[60., 78., 33.],
[43., 57., 24.]],
[[29., 37., 13.],
[47., 58., 19.],
[33., 41., 13.]]],
dtype='>f8')/kernsum, 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.tile(mid.astype('>f8'), [3, 3, 3]), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([[[62., 51., 40.],
[72., 63., 54.],
[82., 75., 68.]],
[[93., 68., 43.],
[105., 78., 51.],
[117., 88., 59.]],
[[124., 85., 46.],
[138., 93., 48.],
[152., 101., 50.]]],
dtype='>f8')/kernsum, 10)
else:
raise ValueError("Invalid Boundary Option")
@pytest.mark.parametrize(('boundary'), BOUNDARY_OPTIONS)
def test_asymmetric_kernel(boundary):
'''
Regression test for #6264: make sure that asymmetric convolution
functions go the right direction
'''
x = np.array([3., 0., 1.], dtype='>f8')
y = np.array([1, 2, 3], dtype='>f8')
z = convolve(x, y, boundary=boundary, normalize_kernel=False)
if boundary == 'fill':
assert_array_almost_equal_nulp(z, np.array([6., 10., 2.], dtype='float'), 10)
elif boundary is None:
assert_array_almost_equal_nulp(z, np.array([0., 10., 0.], dtype='float'), 10)
elif boundary == 'extend':
assert_array_almost_equal_nulp(z, np.array([15., 10., 3.], dtype='float'), 10)
elif boundary == 'wrap':
assert_array_almost_equal_nulp(z, np.array([9., 10., 5.], dtype='float'), 10)
@pytest.mark.parametrize('ndims', (1, 2, 3))
def test_convolution_consistency(ndims):
np.random.seed(0)
array = np.random.randn(*([3]*ndims))
np.random.seed(0)
kernel = np.random.rand(*([3]*ndims))
conv_f = convolve_fft(array, kernel, boundary='fill')
conv_d = convolve(array, kernel, boundary='fill')
assert_array_almost_equal_nulp(conv_f, conv_d, 30)
def test_astropy_convolution_against_numpy():
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(np.convolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_SCIPY')
def test_astropy_convolution_against_scipy():
from scipy.signal import fftconvolve
x = np.array([1, 2, 3])
y = np.array([5, 4, 3, 2, 1])
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve(y, x, normalize_kernel=False))
assert_array_almost_equal(fftconvolve(y, x, 'same'),
convolve_fft(y, x, normalize_kernel=False))
@pytest.mark.skipif('not HAS_PANDAS')
def test_regression_6099():
wave = np.array((np.linspace(5000, 5100, 10)))
boxcar = 3
nonseries_result = convolve(wave, np.ones((boxcar,))/boxcar)
wave_series = pandas.Series(wave)
series_result = convolve(wave_series, np.ones((boxcar,))/boxcar)
assert_array_almost_equal(nonseries_result, series_result)
def test_invalid_array_convolve():
kernel = np.ones(3)/3.
with pytest.raises(TypeError):
convolve('glork', kernel)
|
3f30ac5c58e3afafa44a40bc61491b8ef7f5cb939fe2c9df7731c0c185a2846a | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# namedtuple is needed for find_mod_objs so it can have a non-local module
from collections import namedtuple
import pytest
from .. import introspection
from ..introspection import (find_current_module, find_mod_objs,
isinstancemethod, minversion)
def test_pkg_finder():
"""
Tests that the `find_current_module` function works. Note that
this also implicitly tests compat.misc._patched_getmodule
"""
mod1 = 'astropy.utils.introspection'
mod2 = 'astropy.utils.tests.test_introspection'
mod3 = 'astropy.utils.tests.test_introspection'
assert find_current_module(0).__name__ == mod1
assert find_current_module(1).__name__ == mod2
assert find_current_module(0, True).__name__ == mod3
def test_find_current_mod():
from sys import getrecursionlimit
thismodnm = __name__
assert find_current_module(0) is introspection
assert find_current_module(1).__name__ == thismodnm
assert find_current_module(getrecursionlimit() + 1) is None
assert find_current_module(0, True).__name__ == thismodnm
assert find_current_module(0, [introspection]).__name__ == thismodnm
assert find_current_module(0, ['astropy.utils.introspection']).__name__ == thismodnm
with pytest.raises(ImportError):
find_current_module(0, ['faddfdsasewrweriopunjlfiurrhujnkflgwhu'])
def test_find_mod_objs():
lnms, fqns, objs = find_mod_objs('astropy')
# this import is after the above call intentionally to make sure
# find_mod_objs properly imports astropy on its own
import astropy
# just check for astropy.test ... other things might be added, so we
# shouldn't check that it's the only thing
assert 'test' in lnms
assert astropy.test in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=False)
assert 'namedtuple' in lnms
assert 'collections.namedtuple' in fqns
assert namedtuple in objs
lnms, fqns, objs = find_mod_objs(__name__, onlylocals=True)
assert 'namedtuple' not in lnms
assert 'collections.namedtuple' not in fqns
assert namedtuple not in objs
def test_minversion():
from types import ModuleType
test_module = ModuleType(str("test_module"))
test_module.__version__ = '0.12.2'
good_versions = ['0.12', '0.12.1', '0.12.0.dev']
bad_versions = ['1', '1.2rc1']
for version in good_versions:
assert minversion(test_module, version)
for version in bad_versions:
assert not minversion(test_module, version)
|
783d7e485e201db228e2a35c036ec4c613d4045596220959c0c5ea563f583f7f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from ...utils.data import get_pkg_data_contents, get_pkg_data_filename
from ...time import Time
from ... import units as u
from ..wcs import WCS, Sip
from ..utils import (proj_plane_pixel_scales, proj_plane_pixel_area,
is_proj_plane_distorted,
non_celestial_pixel_scales, wcs_to_celestial_frame,
celestial_frame_to_wcs, skycoord_to_pixel,
pixel_to_skycoord, custom_wcs_to_frame_mappings,
custom_frame_to_wcs_mappings, add_stokes_axis_to_wcs)
def test_wcs_dropping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
dropped = wcs.dropaxis(0)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([2, 3, 4]))
dropped = wcs.dropaxis(1)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 3, 4]))
dropped = wcs.dropaxis(2)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 4]))
dropped = wcs.dropaxis(3)
assert np.all(dropped.wcs.get_pc().diagonal() == np.array([1, 2, 3]))
def test_wcs_swapping():
wcs = WCS(naxis=4)
wcs.wcs.pc = np.zeros([4, 4])
np.fill_diagonal(wcs.wcs.pc, np.arange(1, 5))
pc = wcs.wcs.pc # for later use below
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
wcs = WCS(naxis=4)
wcs.wcs.cd = pc
swapped = wcs.swapaxes(0, 1)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([2, 1, 3, 4]))
swapped = wcs.swapaxes(0, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([4, 2, 3, 1]))
swapped = wcs.swapaxes(2, 3)
assert np.all(swapped.wcs.get_pc().diagonal() == np.array([1, 2, 4, 3]))
@pytest.mark.parametrize('ndim', (2, 3))
def test_add_stokes(ndim):
wcs = WCS(naxis=ndim)
for ii in range(ndim + 1):
outwcs = add_stokes_axis_to_wcs(wcs, ii)
assert outwcs.wcs.naxis == ndim + 1
assert outwcs.wcs.ctype[ii] == 'STOKES'
assert outwcs.wcs.cname[ii] == 'STOKES'
def test_slice():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
assert np.all(slice_wcs.wcs.crpix == np.array([1, 0]))
assert slice_wcs._naxis == [1000, 499]
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.wcs_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
assert slice_wcs._naxis == [250, 250]
slice_wcs = mywcs.slice([slice(None, None, 2), slice(0, None, 2)])
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.2]))
assert slice_wcs._naxis == [500, 250]
# Non-integral values do not alter the naxis attribute
slice_wcs = mywcs.slice([slice(50.), slice(20.)])
assert slice_wcs._naxis == [1000, 500]
slice_wcs = mywcs.slice([slice(50.), slice(20)])
assert slice_wcs._naxis == [20, 500]
slice_wcs = mywcs.slice([slice(50), slice(20.5)])
assert slice_wcs._naxis == [1000, 50]
def test_slice_with_sip():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
mywcs._naxis = [1000, 500]
mywcs.wcs.ctype = ['RA---TAN-SIP', 'DEC--TAN-SIP']
a = np.array(
[[0, 0, 5.33092692e-08, 3.73753773e-11, -2.02111473e-13],
[0, 2.44084308e-05, 2.81394789e-11, 5.17856895e-13, 0.0],
[-2.41334657e-07, 1.29289255e-10, 2.35753629e-14, 0.0, 0.0],
[-2.37162007e-10, 5.43714947e-13, 0.0, 0.0, 0.0],
[ -2.81029767e-13, 0.0, 0.0, 0.0, 0.0]]
)
b = np.array(
[[0, 0, 2.99270374e-05, -2.38136074e-10, 7.23205168e-13],
[0, -1.71073858e-07, 6.31243431e-11, -5.16744347e-14, 0.0],
[6.95458963e-06, -3.08278961e-10, -1.75800917e-13, 0.0, 0.0],
[3.51974159e-11, 5.60993016e-14, 0.0, 0.0, 0.0],
[-5.92438525e-13, 0.0, 0.0, 0.0, 0.0]]
)
mywcs.sip = Sip(a, b, None, None, mywcs.wcs.crpix)
mywcs.wcs.set()
pscale = 0.1 # from cdelt
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)])
# test that CRPIX maps to CRVAL:
assert_allclose(
slice_wcs.all_pix2world(*slice_wcs.wcs.crpix, 1),
slice_wcs.wcs.crval, rtol=0.0, atol=1e-6 * pscale
)
def test_slice_getitem():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.625, 0.25]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
mywcs.wcs.crpix = [2, 2]
slice_wcs = mywcs[1::2, 0::4]
assert np.all(slice_wcs.wcs.crpix == np.array([0.875, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.4, 0.2]))
# Default: numpy order
slice_wcs = mywcs[1::2]
assert np.all(slice_wcs.wcs.crpix == np.array([2, 0.75]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.1, 0.2]))
def test_slice_fitsorder():
mywcs = WCS(naxis=2)
mywcs.wcs.crval = [1, 1]
mywcs.wcs.cdelt = [0.1, 0.1]
mywcs.wcs.crpix = [1, 1]
slice_wcs = mywcs.slice([slice(1, None), slice(0, None)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0, 1]))
slice_wcs = mywcs.slice([slice(1, None, 2), slice(0, None, 4)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 0.625]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.4]))
slice_wcs = mywcs.slice([slice(1, None, 2)], numpy_order=False)
assert np.all(slice_wcs.wcs.crpix == np.array([0.25, 1]))
assert np.all(slice_wcs.wcs.cdelt == np.array([0.2, 0.1]))
def test_invalid_slice():
mywcs = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
mywcs[0]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
with pytest.raises(ValueError) as exc:
mywcs[0, ::2]
assert exc.value.args[0] == ("Cannot downsample a WCS with indexing. Use "
"wcs.sub or wcs.dropaxis if you want to remove "
"axes.")
def test_axis_names():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT-LSR', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
mywcs.wcs.cname = ['RA', 'DEC', 'VOPT', 'STOKES']
assert mywcs.axis_type_names == ['RA', 'DEC', 'VOPT', 'STOKES']
def test_celestial():
mywcs = WCS(naxis=4)
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT', 'STOKES']
cel = mywcs.celestial
assert tuple(cel.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert cel.axis_type_names == ['RA', 'DEC']
def test_wcs_to_celestial_frame():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates.builtin_frames import ICRS, ITRS, FK5, FK4, Galactic
mywcs = WCS(naxis=2)
with pytest.raises(ValueError) as exc:
assert wcs_to_celestial_frame(mywcs) is None
assert exc.value.args[0] == "Could not determine celestial frame corresponding to the specified WCS object"
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
with pytest.raises(ValueError):
assert wcs_to_celestial_frame(mywcs) is None
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
mywcs.wcs.equinox = 1987.
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK5)
assert frame.equinox == Time(1987., format='jyear')
mywcs.wcs.equinox = 1982
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, FK4)
assert frame.equinox == Time(1982., format='byear')
mywcs.wcs.equinox = np.nan
mywcs.wcs.ctype = ['GLON-SIN', 'GLAT-SIN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, Galactic)
mywcs.wcs.ctype = ['TLON-CAR', 'TLAT-CAR']
mywcs.wcs.dateobs = '2017-08-17T12:41:04.430'
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ITRS)
assert frame.obstime == Time('2017-08-17T12:41:04.430')
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
mywcs.wcs.radesys = 'ICRS'
for equinox in [np.nan, 1987, 1982]:
mywcs.wcs.equinox = equinox
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# Flipped order
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['DEC--TAN', 'RA---TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
# More than two dimensions
mywcs = WCS(naxis=3)
mywcs.wcs.ctype = ['DEC--TAN', 'VELOCITY', 'RA---TAN']
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, ICRS)
def test_wcs_to_celestial_frame_extend():
mywcs = WCS(naxis=2)
mywcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
class OffsetFrame:
pass
def identify_offset(wcs):
if wcs.wcs.ctype[0].endswith('OFFSET') and wcs.wcs.ctype[1].endswith('OFFSET'):
return OffsetFrame()
with custom_wcs_to_frame_mappings(identify_offset):
frame = wcs_to_celestial_frame(mywcs)
assert isinstance(frame, OffsetFrame)
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
wcs_to_celestial_frame(mywcs)
def test_celestial_frame_to_wcs():
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import ICRS, ITRS, FK5, FK4, FK4NoETerms, Galactic, BaseCoordinateFrame
class FakeFrame(BaseCoordinateFrame):
pass
frame = FakeFrame()
with pytest.raises(ValueError) as exc:
celestial_frame_to_wcs(frame)
assert exc.value.args[0] == ("Could not determine WCS corresponding to "
"the specified coordinate frame.")
frame = ICRS()
mywcs = celestial_frame_to_wcs(frame)
mywcs.wcs.set()
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'ICRS'
assert np.isnan(mywcs.wcs.equinox)
assert mywcs.wcs.lonpole == 180
assert mywcs.wcs.latpole == 0
frame = FK5(equinox='J1987')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK5'
assert mywcs.wcs.equinox == 1987.
frame = FK4(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4'
assert mywcs.wcs.equinox == 1982.
frame = FK4NoETerms(equinox='B1982')
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('RA---TAN', 'DEC--TAN')
assert mywcs.wcs.radesys == 'FK4-NO-E'
assert mywcs.wcs.equinox == 1982.
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('GLON-TAN', 'GLAT-TAN')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('GLON-CAR', 'GLAT-CAR')
assert mywcs.wcs.radesys == ''
assert np.isnan(mywcs.wcs.equinox)
frame = Galactic()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
mywcs.wcs.crval = [100, -30]
mywcs.wcs.set()
assert_allclose((mywcs.wcs.lonpole, mywcs.wcs.latpole), (180, 60))
frame = ITRS(obstime=Time('2017-08-17T12:41:04.43'))
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == '2017-08-17T12:41:04.430'
frame = ITRS()
mywcs = celestial_frame_to_wcs(frame, projection='CAR')
assert tuple(mywcs.wcs.ctype) == ('TLON-CAR', 'TLAT-CAR')
assert mywcs.wcs.radesys == 'ITRS'
assert mywcs.wcs.dateobs == Time('J2000').utc.isot
def test_celestial_frame_to_wcs_extend():
class OffsetFrame:
pass
frame = OffsetFrame()
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def identify_offset(frame, projection=None):
if isinstance(frame, OffsetFrame):
wcs = WCS(naxis=2)
wcs.wcs.ctype = ['XOFFSET', 'YOFFSET']
return wcs
with custom_frame_to_wcs_mappings(identify_offset):
mywcs = celestial_frame_to_wcs(frame)
assert tuple(mywcs.wcs.ctype) == ('XOFFSET', 'YOFFSET')
# Check that things are back to normal after the context manager
with pytest.raises(ValueError):
celestial_frame_to_wcs(frame)
def test_pixscale_nodrop():
mywcs = WCS(naxis=2)
mywcs.wcs.cdelt = [0.1, 0.2]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2]
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
def test_pixscale_withdrop():
mywcs = WCS(naxis=3)
mywcs.wcs.cdelt = [0.1, 0.2, 1]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN', 'VOPT']
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
mywcs.wcs.cdelt = [-0.1, 0.2, 1]
assert_almost_equal(proj_plane_pixel_scales(mywcs.celestial), (0.1, 0.2))
def test_pixscale_cd():
mywcs = WCS(naxis=2)
mywcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.2))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_cd_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cd = [[scale * np.cos(rho), -scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize('angle',
(30, 45, 60, 75))
def test_pixscale_pc_rotated(angle):
mywcs = WCS(naxis=2)
rho = np.radians(angle)
scale = 0.1
mywcs.wcs.cdelt = [-scale, scale]
mywcs.wcs.pc = [[np.cos(rho), -np.sin(rho)],
[np.sin(rho), np.cos(rho)]]
mywcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert_almost_equal(proj_plane_pixel_scales(mywcs), (0.1, 0.1))
@pytest.mark.parametrize(('cdelt', 'pc', 'pccd'),
(([0.1, 0.2], np.eye(2), np.diag([0.1, 0.2])),
([0.1, 0.2, 0.3], np.eye(3), np.diag([0.1, 0.2, 0.3])),
([1, 1, 1], np.diag([0.1, 0.2, 0.3]), np.diag([0.1, 0.2, 0.3]))))
def test_pixel_scale_matrix(cdelt, pc, pccd):
mywcs = WCS(naxis=(len(cdelt)))
mywcs.wcs.cdelt = cdelt
mywcs.wcs.pc = pc
assert_almost_equal(mywcs.pixel_scale_matrix, pccd)
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], False),
(['RA---TAN', 'FREQ'], False),))
def test_is_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.is_celestial == cel
@pytest.mark.parametrize(('ctype', 'cel'),
((['RA---TAN', 'DEC--TAN'], True),
(['RA---TAN', 'DEC--TAN', 'FREQ'], True),
(['RA---TAN', 'FREQ'], False),))
def test_has_celestial(ctype, cel):
mywcs = WCS(naxis=len(ctype))
mywcs.wcs.ctype = ctype
assert mywcs.has_celestial == cel
@pytest.mark.parametrize(('cdelt', 'pc', 'cd'),
((np.array([0.1, 0.2]), np.eye(2), np.eye(2)),
(np.array([1, 1]), np.diag([0.1, 0.2]), np.eye(2)),
(np.array([0.1, 0.2]), np.eye(2), None),
(np.array([0.1, 0.2]), None, np.eye(2)),
))
def test_noncelestial_scale(cdelt, pc, cd):
mywcs = WCS(naxis=2)
if cd is not None:
mywcs.wcs.cd = cd
if pc is not None:
mywcs.wcs.pc = pc
mywcs.wcs.cdelt = cdelt
mywcs.wcs.ctype = ['RA---TAN', 'FREQ']
ps = non_celestial_pixel_scales(mywcs)
assert_almost_equal(ps.to_value(u.deg), np.array([0.1, 0.2]))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_contents('maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# Make sure you can specify a different class using ``cls`` keyword
class SkyCoord2(SkyCoord):
pass
new2 = pixel_to_skycoord(xp, yp, wcs, mode=mode,
cls=SkyCoord2).transform_to('icrs')
assert new2.__class__ is SkyCoord2
assert_allclose(new2.ra.degree, ref.ra.degree)
assert_allclose(new2.dec.degree, ref.dec.degree)
def test_is_proj_plane_distorted():
# non-orthogonal CD:
wcs = WCS(naxis=2)
wcs.wcs.cd = [[-0.1, 0], [0, 0.2]]
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
assert(is_proj_plane_distorted(wcs))
# almost orthogonal CD:
wcs.wcs.cd = [[0.1 + 2.0e-7, 1.7e-7], [1.2e-7, 0.1 - 1.3e-7]]
assert(not is_proj_plane_distorted(wcs))
# real case:
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
assert(is_proj_plane_distorted(wcs))
@pytest.mark.parametrize('mode', ['all', 'wcs'])
def test_skycoord_to_pixel_distortions(mode):
# Import astropy.coordinates here to avoid circular imports
from ...coordinates import SkyCoord
header = get_pkg_data_filename('data/sip.fits')
wcs = WCS(header)
ref = SkyCoord(202.50 * u.deg, 47.19 * u.deg, frame='icrs')
xp, yp = skycoord_to_pixel(ref, wcs, mode=mode)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
|
8578ecf18a4bb45e4881013661f7434b850acf29d1877055533bf13c357c9206 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import functools
import datetime
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose
from ...tests.helper import catch_warnings, pytest
from ...utils import isiterable
from .. import Time, ScaleValueError, STANDARD_TIME_SCALES, TimeString, TimezoneInfo
from ...coordinates import EarthLocation
from ... import units as u
from ... import _erfa as erfa
from ...table import Column
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
allclose_jd = functools.partial(np.allclose, rtol=2. ** -52, atol=0)
allclose_jd2 = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52) # 20 ps atol
allclose_sec = functools.partial(np.allclose, rtol=2. ** -52,
atol=2. ** -52 * 24 * 3600) # 20 ps atol
allclose_year = functools.partial(np.allclose, rtol=2. ** -52,
atol=0.) # 14 microsec at current epoch
def setup_function(func):
func.FORMATS_ORIG = deepcopy(Time.FORMATS)
def teardown_function(func):
Time.FORMATS.clear()
Time.FORMATS.update(func.FORMATS_ORIG)
class TestBasic():
"""Basic tests stemming from initial example and API reference"""
def test_simple(self):
times = ['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00']
t = Time(times, format='iso', scale='utc')
assert (repr(t) == "<Time object: scale='utc' format='iso' "
"value=['1999-01-01 00:00:00.123' '2010-01-01 00:00:00.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+1.4288980208333335e-06,
-0.50000000e+00]))
# Set scale to TAI
t = t.tai
assert (repr(t) == "<Time object: scale='tai' format='iso' "
"value=['1999-01-01 00:00:32.123' '2010-01-01 00:00:34.000']>")
assert allclose_jd(t.jd1, np.array([2451180., 2455198.]))
assert allclose_jd2(t.jd2, np.array([-0.5+0.00037179926839122024,
-0.5+0.00039351851851851852]))
# Get a new ``Time`` object which is referenced to the TT scale
# (internal JD1 and JD1 are now with respect to TT scale)"""
assert (repr(t.tt) == "<Time object: scale='tt' format='iso' "
"value=['1999-01-01 00:01:04.307' '2010-01-01 00:01:06.184']>")
# Get the representation of the ``Time`` object in a particular format
# (in this case seconds since 1998.0). This returns either a scalar or
# array, depending on whether the input was a scalar or array"""
assert allclose_sec(t.cxcsec, np.array([31536064.307456788, 378691266.18400002]))
def test_different_dimensions(self):
"""Test scalars, vector, and higher-dimensions"""
# scalar
val, val1 = 2450000.0, 0.125
t1 = Time(val, val1, format='jd')
assert t1.isscalar is True and t1.shape == ()
# vector
val = np.arange(2450000., 2450010.)
t2 = Time(val, format='jd')
assert t2.isscalar is False and t2.shape == val.shape
# explicitly check broadcasting for mixed vector, scalar.
val2 = 0.
t3 = Time(val, val2, format='jd')
assert t3.isscalar is False and t3.shape == val.shape
val2 = (np.arange(5.)/10.).reshape(5, 1)
# now see if broadcasting to two-dimensional works
t4 = Time(val, val2, format='jd')
assert t4.isscalar is False
assert t4.shape == np.broadcast(val, val2).shape
@pytest.mark.parametrize('value', [2455197.5, [2455197.5]])
def test_copy_time(self, value):
"""Test copying the values of a Time object by passing it into the
Time initializer.
"""
t = Time(value, format='jd', scale='utc')
t2 = Time(t, copy=False)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is t2._time.jd1
assert t._time.jd2 is t2._time.jd2
t2 = Time(t, copy=True)
assert np.all(t.jd - t2.jd == 0)
assert np.all((t - t2).jd == 0)
assert t._time.jd1 is not t2._time.jd1
assert t._time.jd2 is not t2._time.jd2
# Include initializers
t2 = Time(t, format='iso', scale='tai', precision=1)
assert t2.value == '2010-01-01 00:00:34.0'
t2 = Time(t, format='iso', scale='tai', out_subfmt='date')
assert t2.value == '2010-01-01'
def test_getitem(self):
"""Test that Time objects holding arrays are properly subscriptable,
set isscalar as appropriate, and also subscript delta_ut1_utc, etc."""
mjd = np.arange(50000, 50010)
t = Time(mjd, format='mjd', scale='utc', location=('45d', '50d'))
t1 = t[3]
assert t1.isscalar is True
assert t1._time.jd1 == t._time.jd1[3]
assert t1.location is t.location
t1a = Time(mjd[3], format='mjd', scale='utc')
assert t1a.isscalar is True
assert np.all(t1._time.jd1 == t1a._time.jd1)
t1b = Time(t[3])
assert t1b.isscalar is True
assert np.all(t1._time.jd1 == t1b._time.jd1)
t2 = t[4:6]
assert t2.isscalar is False
assert np.all(t2._time.jd1 == t._time.jd1[4:6])
assert t2.location is t.location
t2a = Time(t[4:6])
assert t2a.isscalar is False
assert np.all(t2a._time.jd1 == t._time.jd1[4:6])
t2b = Time([t[4], t[5]])
assert t2b.isscalar is False
assert np.all(t2b._time.jd1 == t._time.jd1[4:6])
t2c = Time((t[4], t[5]))
assert t2c.isscalar is False
assert np.all(t2c._time.jd1 == t._time.jd1[4:6])
t.delta_tdb_tt = np.arange(len(t)) # Explicitly set (not testing .tdb)
t3 = t[4:6]
assert np.all(t3._delta_tdb_tt == t._delta_tdb_tt[4:6])
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.arange(len(mjd)), np.arange(len(mjd))))
t5 = t4[3]
assert t5.location == t4.location[3]
t6 = t4[4:6]
assert np.all(t6.location == t4.location[4:6])
# check it is a view
# (via ndarray, since quantity setter problematic for structured array)
allzeros = np.array((0., 0., 0.), dtype=t4.location.dtype)
assert t6.location.view(np.ndarray)[-1] != allzeros
assert t4.location.view(np.ndarray)[5] != allzeros
t6.location.view(np.ndarray)[-1] = allzeros
assert t4.location.view(np.ndarray)[5] == allzeros
# Test subscription also works for two-dimensional arrays.
frac = np.arange(0., 0.999, 0.2)
t7 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=('45d', '50d'))
assert t7[0, 0]._time.jd1 == t7._time.jd1[0, 0]
assert t7[0, 0].isscalar is True
assert np.all(t7[5]._time.jd1 == t7._time.jd1[5])
assert np.all(t7[5]._time.jd2 == t7._time.jd2[5])
assert np.all(t7[:, 2]._time.jd1 == t7._time.jd1[:, 2])
assert np.all(t7[:, 2]._time.jd2 == t7._time.jd2[:, 2])
assert np.all(t7[:, 0]._time.jd1 == t._time.jd1)
assert np.all(t7[:, 0]._time.jd2 == t._time.jd2)
# Get tdb to check that delta_tdb_tt attribute is sliced properly.
t7_tdb = t7.tdb
assert t7_tdb[0, 0].delta_tdb_tt == t7_tdb.delta_tdb_tt[0, 0]
assert np.all(t7_tdb[5].delta_tdb_tt == t7_tdb.delta_tdb_tt[5])
assert np.all(t7_tdb[:, 2].delta_tdb_tt == t7_tdb.delta_tdb_tt[:, 2])
# Explicitly set delta_tdb_tt attribute. Now it should not be sliced.
t7.delta_tdb_tt = 0.1
t7_tdb2 = t7.tdb
assert t7_tdb2[0, 0].delta_tdb_tt == 0.1
assert t7_tdb2[5].delta_tdb_tt == 0.1
assert t7_tdb2[:, 2].delta_tdb_tt == 0.1
# Check broadcasting of location.
t8 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc',
location=(np.arange(len(frac)), np.arange(len(frac))))
assert t8[0, 0].location == t8.location[0, 0]
assert np.all(t8[5].location == t8.location[5])
assert np.all(t8[:, 2].location == t8.location[:, 2])
# Finally check empty array.
t9 = t[:0]
assert t9.isscalar is False
assert t9.shape == (0,)
assert t9.size == 0
def test_properties(self):
"""Use properties to convert scales and formats. Note that the UT1 to
UTC transformation requires a supplementary value (``delta_ut1_utc``)
that can be obtained by interpolating from a table supplied by IERS.
This is tested separately."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert allclose_jd(t.jd, 2455197.5)
assert t.iso == '2010-01-01 00:00:00.000'
assert t.tt.iso == '2010-01-01 00:01:06.184'
assert t.tai.fits == '2010-01-01T00:00:34.000(TAI)'
assert allclose_jd(t.utc.jd, 2455197.5)
assert allclose_jd(t.ut1.jd, 2455197.500003867)
assert t.tcg.isot == '2010-01-01T00:01:06.910'
assert allclose_sec(t.unix, 1262304000.0)
assert allclose_sec(t.cxcsec, 378691266.184)
assert allclose_sec(t.gps, 946339215.0)
assert t.datetime == datetime.datetime(2010, 1, 1)
def test_precision(self):
"""Set the output precision which is used for some formats. This is
also a test of the code that provides a dict for global and instance
options."""
t = Time('2010-01-01 00:00:00', format='iso', scale='utc')
# Uses initial class-defined precision=3
assert t.iso == '2010-01-01 00:00:00.000'
# Set instance precision to 9
t.precision = 9
assert t.iso == '2010-01-01 00:00:00.000000000'
assert t.tai.utc.iso == '2010-01-01 00:00:00.000000000'
def test_transforms(self):
"""Transform from UTC to all supported time scales (TAI, TCB, TCG,
TDB, TT, UT1, UTC). This requires auxiliary information (latitude and
longitude)."""
lat = 19.48125
lon = -155.933222
t = Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(lon, lat))
t.delta_ut1_utc = 0.3341 # Explicitly set one part of the xform
assert t.utc.iso == '2006-01-15 21:24:37.500000'
assert t.ut1.iso == '2006-01-15 21:24:37.834100'
assert t.tai.iso == '2006-01-15 21:25:10.500000'
assert t.tt.iso == '2006-01-15 21:25:42.684000'
assert t.tcg.iso == '2006-01-15 21:25:43.322690'
assert t.tdb.iso == '2006-01-15 21:25:42.684373'
assert t.tcb.iso == '2006-01-15 21:25:56.893952'
def test_location(self):
"""Check that location creates an EarthLocation object, and that
such objects can be used as arguments.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(lon, lat))
assert isinstance(t.location, EarthLocation)
location = EarthLocation(lon, lat)
t2 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=location)
assert isinstance(t2.location, EarthLocation)
assert t2.location == t.location
t3 = Time(['2006-01-15 21:24:37.5'], format='iso', scale='utc',
precision=6, location=(location.x, location.y, location.z))
assert isinstance(t3.location, EarthLocation)
assert t3.location == t.location
def test_location_array(self):
"""Check that location arrays are checked for size and used
for the corresponding times. Also checks that erfa
can handle array-valued locations, and can broadcast these if needed.
"""
lat = 19.48125
lon = -155.933222
t = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(lon, lat))
assert np.all(t.utc.iso == '2006-01-15 21:24:37.500000')
assert np.all(t.tdb.iso[0] == '2006-01-15 21:25:42.684373')
t2 = Time(['2006-01-15 21:24:37.5']*2, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
assert np.all(t2.utc.iso == '2006-01-15 21:24:37.500000')
assert t2.tdb.iso[0] == '2006-01-15 21:25:42.684373'
assert t2.tdb.iso[1] != '2006-01-15 21:25:42.684373'
with pytest.raises(ValueError): # 1 time, but two locations
Time('2006-01-15 21:24:37.5', format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
with pytest.raises(ValueError): # 3 times, but two locations
Time(['2006-01-15 21:24:37.5']*3, format='iso', scale='utc',
precision=6, location=(np.array([lon, 0]),
np.array([lat, 0])))
# multidimensional
mjd = np.arange(50000., 50008.).reshape(4, 2)
t3 = Time(mjd, format='mjd', scale='utc', location=(lon, lat))
assert t3.shape == (4, 2)
assert t3.location.shape == ()
assert t3.tdb.shape == t3.shape
t4 = Time(mjd, format='mjd', scale='utc',
location=(np.array([lon, 0]), np.array([lat, 0])))
assert t4.shape == (4, 2)
assert t4.location.shape == t4.shape
assert t4.tdb.shape == t4.shape
t5 = Time(mjd, format='mjd', scale='utc',
location=(np.array([[lon], [0], [0], [0]]),
np.array([[lat], [0], [0], [0]])))
assert t5.shape == (4, 2)
assert t5.location.shape == t5.shape
assert t5.tdb.shape == t5.shape
def test_all_scale_transforms(self):
"""Test that standard scale transforms work. Does not test correctness,
except reversibility [#2074]. Also tests that standard scales can't be
converted to local scales"""
lat = 19.48125
lon = -155.933222
for scale1 in STANDARD_TIME_SCALES:
t1 = Time('2006-01-15 21:24:37.5', format='iso', scale=scale1,
location=(lon, lat))
for scale2 in STANDARD_TIME_SCALES:
t2 = getattr(t1, scale2)
t21 = getattr(t2, scale1)
assert allclose_jd(t21.jd, t1.jd)
# test for conversion to local scale
scale3 = 'local'
with pytest.raises(ScaleValueError):
t2 = getattr(t1, scale3)
def test_creating_all_formats(self):
"""Create a time object using each defined format"""
Time(2000.5, format='decimalyear')
Time(100.0, format='cxcsec')
Time(100.0, format='unix')
Time(100.0, format='gps')
Time(1950.0, format='byear', scale='tai')
Time(2000.0, format='jyear', scale='tai')
Time('B1950.0', format='byear_str', scale='tai')
Time('J2000.0', format='jyear_str', scale='tai')
Time('2000-01-01 12:23:34.0', format='iso', scale='tai')
Time('2000-01-01 12:23:34.0Z', format='iso', scale='utc')
Time('2000-01-01T12:23:34.0', format='isot', scale='tai')
Time('2000-01-01T12:23:34.0Z', format='isot', scale='utc')
Time('2000-01-01T12:23:34.0', format='fits')
Time('2000-01-01T12:23:34.0', format='fits', scale='tdb')
Time('2000-01-01T12:23:34.0(TDB)', format='fits')
Time(2400000.5, 51544.0333981, format='jd', scale='tai')
Time(0.0, 51544.0333981, format='mjd', scale='tai')
Time('2000:001:12:23:34.0', format='yday', scale='tai')
Time('2000:001:12:23:34.0Z', format='yday', scale='utc')
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
Time(dt, format='datetime', scale='tai')
Time([dt, dt], format='datetime', scale='tai')
def test_local_format_transforms(self):
"""
Test trasformation of local time to different formats
Transformation to formats with reference time should give
ScalevalueError
"""
t = Time('2006-01-15 21:24:37.5', scale='local')
assert_allclose(t.jd, 2453751.3921006946, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.mjd, 53750.892100694444, atol=0.001/3600./24., rtol=0.)
assert_allclose(t.decimalyear, 2006.0408002758752, atol=0.001/3600./24./365., rtol=0.)
assert t.datetime == datetime.datetime(2006, 1, 15, 21, 24, 37, 500000)
assert t.isot == '2006-01-15T21:24:37.500'
assert t.yday == '2006:015:21:24:37.500'
assert t.fits == '2006-01-15T21:24:37.500(LOCAL)'
assert_allclose(t.byear, 2006.04217888831, atol=0.001/3600./24./365., rtol=0.)
assert_allclose(t.jyear, 2006.0407723496082, atol=0.001/3600./24./365., rtol=0.)
assert t.byear_str == 'B2006.042'
assert t.jyear_str == 'J2006.041'
# epochTimeFormats
with pytest.raises(ScaleValueError):
t2 = t.gps
with pytest.raises(ScaleValueError):
t2 = t.unix
with pytest.raises(ScaleValueError):
t2 = t.cxcsec
with pytest.raises(ScaleValueError):
t2 = t.plot_date
def test_datetime(self):
"""
Test datetime format, including guessing the format from the input type
by not providing the format keyword to Time.
"""
dt = datetime.datetime(2000, 1, 2, 3, 4, 5, 123456)
dt2 = datetime.datetime(2001, 1, 1)
t = Time(dt, scale='utc', precision=9)
assert t.iso == '2000-01-02 03:04:05.123456000'
assert t.datetime == dt
assert t.value == dt
t2 = Time(t.iso, scale='utc')
assert t2.datetime == dt
t = Time([dt, dt2], scale='utc')
assert np.all(t.value == [dt, dt2])
t = Time('2000-01-01 01:01:01.123456789', scale='tai')
assert t.datetime == datetime.datetime(2000, 1, 1, 1, 1, 1, 123457)
# broadcasting
dt3 = (dt + (dt2-dt)*np.arange(12)).reshape(4, 3)
t3 = Time(dt3, scale='utc')
assert t3.shape == (4, 3)
assert t3[2, 1].value == dt3[2, 1]
assert t3[2, 1] == Time(dt3[2, 1])
assert np.all(t3.value == dt3)
assert np.all(t3[1].value == dt3[1])
assert np.all(t3[:, 2] == Time(dt3[:, 2]))
assert Time(t3[2, 0]) == t3[2, 0]
def test_epoch_transform(self):
"""Besselian and julian epoch transforms"""
jd = 2457073.05631
t = Time(jd, format='jd', scale='tai', precision=6)
assert allclose_year(t.byear, 2015.1365941020817)
assert allclose_year(t.jyear, 2015.1349933196439)
assert t.byear_str == 'B2015.136594'
assert t.jyear_str == 'J2015.134993'
t2 = Time(t.byear, format='byear', scale='tai')
assert allclose_jd(t2.jd, jd)
t2 = Time(t.jyear, format='jyear', scale='tai')
assert allclose_jd(t2.jd, jd)
t = Time('J2015.134993', scale='tai', precision=6)
assert np.allclose(t.jd, jd, rtol=1e-10, atol=0) # J2015.134993 has 10 digit precision
assert t.byear_str == 'B2015.136594'
def test_input_validation(self):
"""Wrong input type raises error"""
times = [10, 20]
with pytest.raises(ValueError):
Time(times, format='iso', scale='utc')
with pytest.raises(ValueError):
Time('2000:001', format='jd', scale='utc')
with pytest.raises(ValueError):
Time([50000.0], ['bad'], format='mjd', scale='tai')
with pytest.raises(ValueError):
Time(50000.0, 'bad', format='mjd', scale='tai')
with pytest.raises(ValueError):
Time('2005-08-04T00:01:02.000Z', scale='tai')
# regression test against #3396
with pytest.raises(ValueError):
Time(np.nan, format='jd', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI)', scale='utc')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(TAI')
with pytest.raises(ValueError):
Time('2000-01-02T03:04:05(UT(NIST)')
def test_utc_leap_sec(self):
"""Time behaves properly near or in UTC leap second. This
uses the 2012-06-30 leap second for testing."""
for year, month, day in ((2012, 6, 30), (2016, 12, 31)):
# Start with a day without a leap second and note rollover
yyyy_mm = '{:04d}-{:02d}'.format(year, month)
yyyy_mm_dd = '{:04d}-{:02d}-{:02d}'.format(year, month, day)
t1 = Time(yyyy_mm + '-01 23:59:60.0', scale='utc')
assert t1.iso == yyyy_mm + '-02 00:00:00.000'
# Leap second is different
t1 = Time(yyyy_mm_dd + ' 23:59:59.900', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:59.900'
t1 = Time(yyyy_mm_dd + ' 23:59:60.000', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.000'
t1 = Time(yyyy_mm_dd + ' 23:59:60.999', scale='utc')
assert t1.iso == yyyy_mm_dd + ' 23:59:60.999'
if month == 6:
yyyy_mm_dd_plus1 = '{:04d}-07-01'.format(year)
else:
yyyy_mm_dd_plus1 = '{:04d}-01-01'.format(year+1)
t1 = Time(yyyy_mm_dd + ' 23:59:61.0', scale='utc')
assert t1.iso == yyyy_mm_dd_plus1 + ' 00:00:00.000'
# Delta time gives 2 seconds here as expected
t0 = Time(yyyy_mm_dd + ' 23:59:59', scale='utc')
t1 = Time(yyyy_mm_dd_plus1 + ' 00:00:00', scale='utc')
assert allclose_sec((t1 - t0).sec, 2.0)
def test_init_from_time_objects(self):
"""Initialize from one or more Time objects"""
t1 = Time('2007:001', scale='tai')
t2 = Time(['2007-01-02', '2007-01-03'], scale='utc')
# Init from a list of Time objects without an explicit scale
t3 = Time([t1, t2])
# Test that init appropriately combines a scalar (t1) and list (t2)
# and that scale and format are same as first element.
assert len(t3) == 3
assert t3.scale == t1.scale
assert t3.format == t1.format # t1 format is yday
assert np.all(t3.value == np.concatenate([[t1.yday], t2.tai.yday]))
# Init from a single Time object without a scale
t3 = Time(t1)
assert t3.isscalar
assert t3.scale == t1.scale
assert t3.format == t1.format
assert np.all(t3.value == t1.value)
# Init from a single Time object with scale specified
t3 = Time(t1, scale='utc')
assert t3.scale == 'utc'
assert np.all(t3.value == t1.utc.value)
# Init from a list of Time object with scale specified
t3 = Time([t1, t2], scale='tt')
assert t3.scale == 'tt'
assert t3.format == t1.format # yday
assert np.all(t3.value == np.concatenate([[t1.tt.yday], t2.tt.yday]))
# OK, how likely is this... but might as well test.
mjd = np.arange(50000., 50006.)
frac = np.arange(0., 0.999, 0.2)
t4 = Time(mjd[:, np.newaxis] + frac, format='mjd', scale='utc')
t5 = Time([t4[:2], t4[4:5]])
assert t5.shape == (3, 5)
# throw error when deriving local scale time
# from non local time scale
with pytest.raises(ValueError):
t6 = Time(t1, scale='local')
class TestVal2():
"""Tests related to val2"""
def test_val2_ignored(self):
"""Test that val2 is ignored for string input"""
t = Time('2001:001', 'ignored', scale='utc')
assert t.yday == '2001:001:00:00:00.000'
def test_val2(self):
"""Various tests of the val2 input"""
t = Time([0.0, 50000.0], [50000.0, 0.0], format='mjd', scale='tai')
assert t.mjd[0] == t.mjd[1]
assert t.jd[0] == t.jd[1]
def test_val_broadcasts_against_val2(self):
mjd = np.arange(50000., 50007.)
frac = np.arange(0., 0.999, 0.2)
t = Time(mjd[:, np.newaxis], frac, format='mjd', scale='utc')
assert t.shape == (7, 5)
with pytest.raises(ValueError):
Time([0.0, 50000.0], [0.0, 1.0, 2.0], format='mjd', scale='tai')
class TestSubFormat():
"""Test input and output subformat functionality"""
def test_input_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
assert np.all(t.iso == np.array(['2000-01-01 00:00:00.000',
'2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
# Heterogeneous input formats with in_subfmt='date_*'
times = ['2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
in_subfmt='date_*')
assert np.all(t.iso == np.array(['2000-01-01 01:01:00.000',
'2000-01-01 01:01:01.000',
'2000-01-01 01:01:01.123']))
def test_input_subformat_fail(self):
"""Failed format matching"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='date')
def test_bad_input_subformat(self):
"""Non-existent input subformat"""
with pytest.raises(ValueError):
Time('2000-01-01 01:01', format='iso', scale='tai',
in_subfmt='doesnt exist')
def test_output_subformat(self):
"""Input subformat selection"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01 01:01',
'2000-01-01 01:01:01', '2000-01-01 01:01:01.123']
t = Time(times, format='iso', scale='tai',
out_subfmt='date_hm')
assert np.all(t.iso == np.array(['2000-01-01 00:00',
'2000-01-01 01:01',
'2000-01-01 01:01',
'2000-01-01 01:01']))
def test_fits_format(self):
"""FITS format includes bigger years."""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-01-01', '2000-01-01T01:01:01', '2000-01-01T01:01:01.123']
t = Time(times, format='fits', scale='tai')
assert np.all(t.fits == np.array(['2000-01-01T00:00:00.000(TAI)',
'2000-01-01T01:01:01.000(TAI)',
'2000-01-01T01:01:01.123(TAI)']))
# Explicit long format for output, default scale is UTC.
t2 = Time(times, format='fits', out_subfmt='long*')
assert np.all(t2.fits == np.array(['+02000-01-01T00:00:00.000(UTC)',
'+02000-01-01T01:01:01.000(UTC)',
'+02000-01-01T01:01:01.123(UTC)']))
# Implicit long format for output, because of negative year.
times[2] = '-00594-01-01'
t3 = Time(times, format='fits', scale='tai')
assert np.all(t3.fits == np.array(['+02000-01-01T00:00:00.000(TAI)',
'+02000-01-01T01:01:01.000(TAI)',
'-00594-01-01T00:00:00.000(TAI)']))
# Implicit long format for output, because of large positive year.
times[2] = '+10594-01-01'
t4 = Time(times, format='fits', scale='tai')
assert np.all(t4.fits == np.array(['+02000-01-01T00:00:00.000(TAI)',
'+02000-01-01T01:01:01.000(TAI)',
'+10594-01-01T00:00:00.000(TAI)']))
def test_yday_format(self):
"""Year:Day_of_year format"""
# Heterogeneous input formats with in_subfmt='*' (default)
times = ['2000-12-01', '2001-12-01 01:01:01.123']
t = Time(times, format='iso', scale='tai')
t.out_subfmt = 'date_hm'
assert np.all(t.yday == np.array(['2000:336:00:00',
'2001:335:01:01']))
t.out_subfmt = '*'
assert np.all(t.yday == np.array(['2000:336:00:00:00.000',
'2001:335:01:01:01.123']))
def test_scale_input(self):
"""Test for issues related to scale input"""
# Check case where required scale is defined by the TimeFormat.
# All three should work.
t = Time(100.0, format='cxcsec', scale='utc')
assert t.scale == 'utc'
t = Time(100.0, format='unix', scale='tai')
assert t.scale == 'tai'
t = Time(100.0, format='gps', scale='utc')
assert t.scale == 'utc'
# Check that bad scale is caught when format is specified
with pytest.raises(ScaleValueError):
Time(1950.0, format='byear', scale='bad scale')
# Check that bad scale is caught when format is auto-determined
with pytest.raises(ScaleValueError):
Time('2000:001:00:00:00', scale='bad scale')
def test_fits_scale(self):
"""Test that scale gets interpreted correctly for FITS strings."""
t = Time('2000-01-02(TAI)')
assert t.scale == 'tai'
# Test deprecated scale.
t = Time('2000-01-02(IAT)')
assert t.scale == 'tai'
# Test with scale and FITS string scale
t = Time('2045-11-08T00:00:00.000(UTC)', scale='utc')
assert t.scale == 'utc'
# Test with local time scale and FITS string scale
t = Time('2045-11-08T00:00:00.000(LOCAL)')
assert t.scale == 'local'
# Check that inconsistent scales lead to errors.
with pytest.raises(ValueError):
Time('2000-01-02(TAI)', scale='utc')
with pytest.raises(ValueError):
Time(['2000-01-02(TAI)', '2001-02-03(UTC)'])
# Check that inconsistent FITS string scales lead to errors.
with pytest.raises(ValueError):
Time(['2000-01-02(TAI)', '2001-02-03(IAT)'])
# Check that inconsistent realizations lead to errors.
with pytest.raises(ValueError):
Time(['2000-01-02(ET(NIST))', '2001-02-03(ET)'])
def test_fits_scale_representation(self):
t = Time('1960-01-02T03:04:05.678(ET(NIST))')
assert t.scale == 'tt'
assert t.value == '1960-01-02T03:04:05.678(ET(NIST))'
def test_scale_default(self):
"""Test behavior when no scale is provided"""
# These first three are TimeFromEpoch and have an intrinsic time scale
t = Time(100.0, format='cxcsec')
assert t.scale == 'tt'
t = Time(100.0, format='unix')
assert t.scale == 'utc'
t = Time(100.0, format='gps')
assert t.scale == 'tai'
for date in ('J2000', '2000:001', '2000-01-01T00:00:00'):
t = Time(date)
assert t.scale == 'utc'
t = Time(2000.1, format='byear')
assert t.scale == 'utc'
def test_epoch_times(self):
"""Test time formats derived from EpochFromTime"""
t = Time(0.0, format='cxcsec', scale='tai')
assert t.tt.iso == '1998-01-01 00:00:00.000'
# Create new time object from this one and change scale, format
t2 = Time(t, scale='tt', format='iso')
assert t2.value == '1998-01-01 00:00:00.000'
# Value take from Chandra.Time.DateTime('2010:001:00:00:00').secs
t_cxcsec = 378691266.184
t = Time(t_cxcsec, format='cxcsec', scale='utc')
assert allclose_sec(t.value, t_cxcsec)
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.value, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
assert t.yday == '2010:001:00:00:00.000'
t = Time('2010:001:00:00:00.000', scale='utc')
assert allclose_sec(t.cxcsec, t_cxcsec)
assert allclose_sec(t.tt.cxcsec, t_cxcsec)
# Value from:
# d = datetime.datetime(2000, 1, 1)
# matplotlib.pylab.dates.date2num(d)
t = Time('2000-01-01 00:00:00', scale='utc')
assert np.allclose(t.plot_date, 730120.0, atol=1e-5, rtol=0)
# Round trip through epoch time
for scale in ('utc', 'tt'):
t = Time('2000:001', scale=scale)
t2 = Time(t.unix, scale=scale, format='unix')
assert getattr(t2, scale).iso == '2000-01-01 00:00:00.000'
# Test unix time. Values taken from http://en.wikipedia.org/wiki/Unix_time
t = Time('2013-05-20 21:18:46', scale='utc')
assert allclose_sec(t.unix, 1369084726.0)
assert allclose_sec(t.tt.unix, 1369084726.0)
# Values from issue #1118
t = Time('2004-09-16T23:59:59', scale='utc')
assert allclose_sec(t.unix, 1095379199.0)
class TestSofaErrors():
"""Test that erfa status return values are handled correctly"""
def test_bad_time(self):
iy = np.array([2000], dtype=np.intc)
im = np.array([2000], dtype=np.intc) # bad month
id = np.array([2000], dtype=np.intc) # bad day
with pytest.raises(ValueError): # bad month, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = -5000
im[0] = 2
with pytest.raises(ValueError): # bad year, fatal error
djm0, djm = erfa.cal2jd(iy, im, id)
iy[0] = 2000
with catch_warnings() as w:
djm0, djm = erfa.cal2jd(iy, im, id)
assert len(w) == 1
assert 'bad day (JD computed)' in str(w[0].message)
assert allclose_jd(djm0, [2400000.5])
assert allclose_jd(djm, [53574.])
class TestCopyReplicate():
"""Test issues related to copying and replicating data"""
def test_immutable_input(self):
"""Internals are never mutable."""
jds = np.array([2450000.5], dtype=np.double)
t = Time(jds, format='jd', scale='tai')
assert allclose_jd(t.jd, jds)
jds[0] = 2458654
assert not allclose_jd(t.jd, jds)
mjds = np.array([50000.0], dtype=np.double)
t = Time(mjds, format='mjd', scale='tai')
assert allclose_jd(t.jd, [2450000.5])
mjds[0] = 0.0
assert allclose_jd(t.jd, [2450000.5])
def test_replicate(self):
"""Test replicate method"""
t = Time(['2000:001'], format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.replicate()
assert t.yday == t2.yday
assert t.format == t2.format
assert t.scale == t2.scale
assert t.location == t2.location
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday == t2.yday
assert t.yday != t_yday # prove that it changed
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x == t2.location.x
assert t.location.x != t_loc_x # prove that it changed
def test_copy(self):
"""Test copy method"""
t = Time('2000:001', format='yday', scale='tai',
location=('45d', '45d'))
t_yday = t.yday
t_loc_x = t.location.x.copy()
t2 = t.copy()
assert t.yday == t2.yday
# This is not allowed publicly, but here we hack the internal time
# and location values to show that t and t2 are not sharing references.
t2._time.jd1 += 100.0
# Need to delete the cached yday attributes (only an issue because
# of the internal _time hack).
del t.cache
del t2.cache
assert t.yday != t2.yday
assert t.yday == t_yday # prove that it did not change
t2_loc_x_view = t2.location.x
t2_loc_x_view[()] = 0 # use 0 to avoid having to give units
assert t2.location.x == t2_loc_x_view
assert t.location.x != t2.location.x
assert t.location.x == t_loc_x # prove that it changed
def test_python_builtin_copy():
t = Time('2000:001', format='yday', scale='tai')
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
assert t.jd == t2.jd
assert t.jd == t3.jd
def test_now():
"""
Tests creating a Time object with the `now` class method.
"""
now = datetime.datetime.utcnow()
t = Time.now()
assert t.format == 'datetime'
assert t.scale == 'utc'
dt = t.datetime - now # a datetime.timedelta object
# this gives a .1 second margin between the `utcnow` call and the `Time`
# initializer, which is really way more generous than necessary - typical
# times are more like microseconds. But it seems safer in case some
# platforms have slow clock calls or something.
assert dt.total_seconds() < 0.1
def test_decimalyear():
t = Time('2001:001', format='yday')
assert t.decimalyear == 2001.0
t = Time(2000.0, [0.5, 0.75], format='decimalyear')
assert np.all(t.value == [2000.5, 2000.75])
jd0 = Time('2000:001').jd
jd1 = Time('2001:001').jd
d_jd = jd1 - jd0
assert np.all(t.jd == [jd0 + 0.5 * d_jd,
jd0 + 0.75 * d_jd])
def test_fits_year0():
t = Time(1721425.5, format='jd')
assert t.fits == '0001-01-01T00:00:00.000(UTC)'
t = Time(1721425.5 - 366., format='jd')
assert t.fits == '+00000-01-01T00:00:00.000(UTC)'
t = Time(1721425.5 - 366. - 365., format='jd')
assert t.fits == '-00001-01-01T00:00:00.000(UTC)'
def test_fits_year10000():
t = Time(5373484.5, format='jd', scale='tai')
assert t.fits == '+10000-01-01T00:00:00.000(TAI)'
t = Time(5373484.5 - 365., format='jd', scale='tai')
assert t.fits == '9999-01-01T00:00:00.000(TAI)'
t = Time(5373484.5, -1./24./3600., format='jd', scale='tai')
assert t.fits == '9999-12-31T23:59:59.000(TAI)'
def test_dir():
t = Time('2000:001', format='yday', scale='tai')
assert 'utc' in dir(t)
def test_bool():
"""Any Time object should evaluate to True unless it is empty [#3520]."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert bool(t) is True
assert bool(t[0]) is True
assert bool(t[:0]) is False
def test_len_size():
"""Check length of Time objects and that scalar ones do not have one."""
t = Time(np.arange(50000, 50010), format='mjd', scale='utc')
assert len(t) == 10 and t.size == 10
t1 = Time(np.arange(50000, 50010).reshape(2, 5), format='mjd', scale='utc')
assert len(t1) == 2 and t1.size == 10
# Can have length 1 or length 0 arrays.
t2 = t[:1]
assert len(t2) == 1 and t2.size == 1
t3 = t[:0]
assert len(t3) == 0 and t3.size == 0
# But cannot get length from scalar.
t4 = t[0]
with pytest.raises(TypeError) as err:
len(t4)
# Ensure we're not just getting the old error of
# "object of type 'float' has no len()".
assert 'Time' in str(err)
def test_TimeFormat_scale():
"""guard against recurrence of #1122, where TimeFormat class looses uses
attributes (delta_ut1_utc here), preventing conversion to unix, cxc"""
t = Time('1900-01-01', scale='ut1')
t.delta_ut1_utc = 0.0
t.unix
assert t.unix == t.utc.unix
@pytest.mark.remote_data
def test_scale_conversion():
Time(Time.now().cxcsec, format='cxcsec', scale='ut1')
def test_byteorder():
"""Ensure that bigendian and little-endian both work (closes #2942)"""
mjd = np.array([53000.00, 54000.00])
big_endian = mjd.astype('>f8')
little_endian = mjd.astype('<f8')
time_mjd = Time(mjd, format='mjd')
time_big = Time(big_endian, format='mjd')
time_little = Time(little_endian, format='mjd')
assert np.all(time_big == time_mjd)
assert np.all(time_little == time_mjd)
def test_datetime_tzinfo():
"""
Test #3160 that time zone info in datetime objects is respected.
"""
class TZm6(datetime.tzinfo):
def utcoffset(self, dt):
return datetime.timedelta(hours=-6)
d = datetime.datetime(2002, 1, 2, 10, 3, 4, tzinfo=TZm6())
t = Time(d)
assert t.value == datetime.datetime(2002, 1, 2, 16, 3, 4)
def test_subfmts_regex():
"""
Test having a custom subfmts with a regular expression
"""
class TimeLongYear(TimeString):
name = 'longyear'
subfmts = (('date',
r'(?P<year>[+-]\d{5})-%m-%d', # hybrid
'{year:+06d}-{mon:02d}-{day:02d}'),)
t = Time('+02000-02-03', format='longyear')
assert t.value == '+02000-02-03'
assert t.jd == Time('2000-02-03').jd
def test_set_format_basic():
"""
Test basics of setting format attribute.
"""
for format, value in (('jd', 2451577.5),
('mjd', 51577.0),
('cxcsec', 65923264.184), # confirmed with Chandra.Time
('datetime', datetime.datetime(2000, 2, 3, 0, 0)),
('iso', '2000-02-03 00:00:00.000')):
t = Time('+02000-02-03', format='fits')
t0 = t.replicate()
t.format = format
assert t.value == value
# Internal jd1 and jd2 are preserved
assert t._time.jd1 is t0._time.jd1
assert t._time.jd2 is t0._time.jd2
def test_set_format_shares_subfmt():
"""
Set format and round trip through a format that shares out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='date_hms', precision=5)
tc = t.copy()
t.format = 'isot'
assert t.precision == 5
assert t.out_subfmt == 'date_hms'
assert t.value == '2000-02-03T00:00:00.00000'
t.format = 'fits'
assert t.value == tc.value
assert t.precision == 5
def test_set_format_does_not_share_subfmt():
"""
Set format and round trip through a format that does not share out_subfmt
"""
t = Time('+02000-02-03', format='fits', out_subfmt='longdate')
t.format = 'isot'
assert t.out_subfmt == '*' # longdate_hms not there, goes to default
assert t.value == '2000-02-03T00:00:00.000'
t.format = 'fits'
assert t.out_subfmt == '*'
assert t.value == '2000-02-03T00:00:00.000(UTC)' # date_hms
def test_replicate_value_error():
"""
Passing a bad format to replicate should raise ValueError, not KeyError.
PR #3857.
"""
t1 = Time('2007:001', scale='tai')
with pytest.raises(ValueError) as err:
t1.replicate(format='definitely_not_a_valid_format')
assert 'format must be one of' in str(err)
def test_remove_astropy_time():
"""
Make sure that 'astropy_time' format is really gone after #3857. Kind of
silly test but just to be sure.
"""
t1 = Time('2007:001', scale='tai')
assert 'astropy_time' not in t1.FORMATS
with pytest.raises(ValueError) as err:
Time(t1, format='astropy_time')
assert 'format must be one of' in str(err)
def test_isiterable():
"""
Ensure that scalar `Time` instances are not reported as iterable by the
`isiterable` utility.
Regression test for https://github.com/astropy/astropy/issues/4048
"""
t1 = Time.now()
assert not isiterable(t1)
t2 = Time(['1999-01-01 00:00:00.123456789', '2010-01-01 00:00:00'],
format='iso', scale='utc')
assert isiterable(t2)
def test_to_datetime():
tz = TimezoneInfo(utc_offset=-10*u.hour, tzname='US/Hawaii')
# The above lines produces a `datetime.tzinfo` object similar to:
# tzinfo = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
assert tz_aware_datetime.time() == datetime.time(14, 0)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
with pytest.raises(ValueError) as e:
Time('2015-06-30 23:59:60.000').to_datetime()
assert 'does not support leap seconds' in str(e.message)
@pytest.mark.skipif('not HAS_PYTZ')
def test_to_datetime_pytz():
tz = pytz.timezone('US/Hawaii')
time = Time('2010-09-03 00:00:00')
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
assert tz_aware_datetime.time() == datetime.time(14, 0)
assert tz.tzname(time.datetime) == tz_aware_datetime.tzname()
assert time == forced_to_astropy_time
# Test non-scalar time inputs:
time = Time(['2010-09-03 00:00:00', '2005-09-03 06:00:00',
'1990-09-03 06:00:00'])
tz_aware_datetime = time.to_datetime(tz)
forced_to_astropy_time = Time(tz_aware_datetime)
for dt, tz_dt in zip(time.datetime, tz_aware_datetime):
assert tz.tzname(dt) == tz_dt.tzname()
assert np.all(time == forced_to_astropy_time)
def test_cache():
t = Time('2010-09-03 00:00:00')
t2 = Time('2010-09-03 00:00:00')
# Time starts out without a cache
assert 'cache' not in t._time.__dict__
# Access the iso format and confirm that the cached version is as expected
t.iso
assert t.cache['format']['iso'] == t2.iso
# Access the TAI scale and confirm that the cached version is as expected
t.tai
assert t.cache['scale']['tai'] == t2.tai
# New Time object after scale transform does not have a cache yet
assert 'cache' not in t.tt._time.__dict__
# Clear the cache
del t.cache
assert 'cache' not in t._time.__dict__
# Check accessing the cache creates an empty dictionary
assert not t.cache
assert 'cache' in t._time.__dict__
def test_epoch_date_jd_is_day_fraction():
"""
Ensure that jd1 and jd2 of an epoch Time are respect the (day, fraction) convention
(see #6638)
"""
t0 = Time("J2000", scale="tdb")
assert t0.jd1 == 2451545.0
assert t0.jd2 == 0.0
t1 = Time(datetime.datetime(2000, 1, 1, 12, 0, 0), scale="tdb")
assert t1.jd1 == 2451545.0
assert t1.jd2 == 0.0
def test_sum_is_equivalent():
"""
Ensure that two equal dates defined in different ways behave equally (#6638)
"""
t0 = Time("J2000", scale="tdb")
t1 = Time("2000-01-01 12:00:00", scale="tdb")
assert t0 == t1
assert (t0 + 1 * u.second) == (t1 + 1 * u.second)
def test_string_valued_columns():
# Columns have a nice shim that translates bytes to string as needed.
# Ensure Time can handle these. Use multi-d array just to be sure.
times = [[['{:04d}-{:02d}-{:02d}'.format(y, m, d) for d in range(1, 3)]
for m in range(5, 7)] for y in range(2012, 2014)]
cutf32 = Column(times)
cbytes = cutf32.astype('S')
tutf32 = Time(cutf32)
tbytes = Time(cbytes)
assert np.all(tutf32 == tbytes)
tutf32 = Time(Column(['B1950']))
tbytes = Time(Column([b'B1950']))
assert tutf32 == tbytes
# Regression tests for arrays with entries with unequal length. gh-6903.
times = Column([b'2012-01-01', b'2012-01-01T00:00:00'])
assert np.all(Time(times) == Time(['2012-01-01', '2012-01-01T00:00:00']))
def test_bytes_input():
tstring = '2011-01-02T03:04:05'
tbytes = b'2011-01-02T03:04:05'
assert tbytes.decode('ascii') == tstring
t0 = Time(tstring)
t1 = Time(tbytes)
assert t1 == t0
tarray = np.array(tbytes)
assert tarray.dtype.kind == 'S'
t2 = Time(tarray)
assert t2 == t0
def test_writeable_flag():
t = Time([1, 2, 3], format='cxcsec')
t[1] = 5.0
assert allclose_sec(t[1].value, 5.0)
t.writeable = False
with pytest.raises(ValueError) as err:
t[1] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
with pytest.raises(ValueError) as err:
t[:] = 5.0
assert 'Time object is read-only. Make a copy()' in str(err)
t.writeable = True
t[1] = 10.0
assert allclose_sec(t[1].value, 10.0)
# Scalar is not writeable
t = Time('2000:001', scale='utc')
with pytest.raises(ValueError) as err:
t[()] = '2000:002'
assert 'scalar Time object is read-only.' in str(err)
# Transformed attribute is not writeable
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = t.tt # t2 is read-only now because t.tt is cached
with pytest.raises(ValueError) as err:
t2[0] = '2005:001'
assert 'Time object is read-only. Make a copy()' in str(err)
def test_setitem_location():
loc = EarthLocation(x=[1, 2] * u.m, y=[3, 4] * u.m, z=[5, 6] * u.m)
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
# Succeeds because the right hand side makes no implication about
# location and just inherits t.location
t[0, 0] = 0
assert allclose_sec(t.value, [[0, 2], [3, 4]])
# Fails because the right hand side has location=None
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-1, format='cxcsec')
assert ('cannot set to Time with different location: '
'expected location=(1.0, 3.0, 5.0) m and '
'got location=None') in str(err)
# Succeeds because the right hand side correctly sets location
t[0, 0] = Time(-2, format='cxcsec', location=loc[0])
assert allclose_sec(t.value, [[-2, 2], [3, 4]])
# Fails because the right hand side has different location
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=(1.0, 3.0, 5.0) m and '
'got location=(2.0, 4.0, 6.0) m') in str(err)
# Fails because the Time has None location and RHS has defined location
t = Time([[1, 2], [3, 4]], format='cxcsec')
with pytest.raises(ValueError) as err:
t[0, 0] = Time(-2, format='cxcsec', location=loc[1])
assert ('cannot set to Time with different location: '
'expected location=None and '
'got location=(2.0, 4.0, 6.0) m') in str(err)
# Broadcasting works
t = Time([[1, 2], [3, 4]], format='cxcsec', location=loc)
t[0, :] = Time([-3, -4], format='cxcsec', location=loc)
assert allclose_sec(t.value, [[-3, -4], [3, 4]])
def test_setitem_from_python_objects():
t = Time([[1, 2], [3, 4]], format='cxcsec')
assert t.cache == {}
t.iso
assert 'iso' in t.cache['format']
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:00:02.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Setting item clears cache
t[0, 1] = 100
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[3, 4]])
assert np.all(t.iso == [['1998-01-01 00:00:01.000', '1998-01-01 00:01:40.000'],
['1998-01-01 00:00:03.000', '1998-01-01 00:00:04.000']])
# Set with a float value
t.iso
t[1, :] = 200
assert t.cache == {}
assert allclose_sec(t.value, [[1, 100],
[200, 200]])
# Array of strings in yday format
t[:, 1] = ['1998:002', '1998:003']
assert allclose_sec(t.value, [[1, 86400 * 1],
[200, 86400 * 2]])
# Incompatible numeric value
t = Time(['2000:001', '2000:002'])
t[0] = '2001:001'
with pytest.raises(ValueError) as err:
t[0] = 100
assert 'cannot convert value to a compatible Time object' in str(err)
def test_setitem_from_time_objects():
"""Set from existing Time object.
"""
# Set from time object with different scale
t = Time(['2000:001', '2000:002'], scale='utc')
t2 = Time(['2000:010'], scale='tai')
t[1] = t2[0]
assert t.value[1] == t2.utc.value[0]
# Time object with different scale and format
t = Time(['2000:001', '2000:002'], scale='utc')
t2.format = 'jyear'
t[1] = t2[0]
assert t.yday[1] == t2.utc.yday[0]
def test_setitem_bad_item():
t = Time([1, 2], format='cxcsec')
with pytest.raises(IndexError):
t['asdf'] = 3
def test_setitem_deltas():
"""Setting invalidates any transform deltas"""
t = Time([1, 2], format='cxcsec')
t.delta_tdb_tt = [1, 2]
t.delta_ut1_utc = [3, 4]
t[1] = 3
assert not hasattr(t, '_delta_tdb_tt')
assert not hasattr(t, '_delta_ut1_utc')
def test_subclass():
"""Check that we can initialize subclasses with a Time instance."""
# Ref: Issue gh-#7449 and PR gh-#7453.
class _Time(Time):
pass
t1 = Time('1999-01-01T01:01:01')
t2 = _Time(t1)
assert t2.__class__ == _Time
assert t1 == t2
|
07fbaab7a6092730263a76347a14a8f29861d33edee22178ee912442df3bad06 | # The purpose of these tests are to ensure that calling ufuncs with quantities
# returns quantities with the right units, or raises exceptions.
import warnings
from collections import namedtuple
import pytest
import numpy as np
from numpy.testing import assert_allclose
from ... import units as u
from .. import quantity_helper as qh
from ...tests.helper import raises
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
testcase = namedtuple('testcase', ['f', 'q_in', 'q_out'])
testexc = namedtuple('testexc', ['f', 'q_in', 'exc', 'msg'])
testwarn = namedtuple('testwarn', ['f', 'q_in', 'wfilter'])
@pytest.mark.skip
def test_testcase(tc):
results = tc.f(*tc.q_in)
# careful of the following line, would break on a function returning
# a single tuple (as opposed to tuple of return values)
results = (results, ) if type(results) != tuple else results
for result, expected in zip(results, tc.q_out):
assert result.unit == expected.unit
assert_allclose(result.value, expected.value, atol=1.E-15)
@pytest.mark.skip
def test_testexc(te):
with pytest.raises(te.exc) as exc:
te.f(*te.q_in)
if te.msg is not None:
assert te.msg in exc.value.args[0]
@pytest.mark.skip
def test_testwarn(tw):
with warnings.catch_warnings():
warnings.filterwarnings(tw.wfilter)
tw.f(*tw.q_in)
class TestUfuncCoverage:
"""Test that we cover all ufunc's"""
# Ignore possible scipy ufuncs; for scipy in particular, we have support
# for some, but for others it still has to be decided whether we can
# support them or not.
@pytest.mark.skipif(HAS_SCIPY,
reason='scipy.special coverage is incomplete')
def test_coverage(self):
all_np_ufuncs = set([ufunc for ufunc in np.core.umath.__dict__.values()
if isinstance(ufunc, np.ufunc)])
all_q_ufuncs = (qh.UNSUPPORTED_UFUNCS |
set(qh.UFUNC_HELPERS.keys()))
assert all_np_ufuncs - all_q_ufuncs == set()
assert all_q_ufuncs - all_np_ufuncs == set()
class TestQuantityTrigonometricFuncs:
"""
Test trigonometric functions
"""
@pytest.mark.parametrize('tc', (
testcase(
f=np.sin,
q_in=(30. * u.degree, ),
q_out=(0.5*u.dimensionless_unscaled, )
),
testcase(
f=np.sin,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([0., 1. / np.sqrt(2.), 1.]) * u.one, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(30. * u.degree), ),
q_out=(np.radians(30.) * u.radian, )
),
testcase(
f=np.arcsin,
q_in=(np.sin(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, )
),
testcase(
f=np.cos,
q_in=(np.pi / 3. * u.radian, ),
q_out=(0.5 * u.dimensionless_unscaled, )
),
testcase(
f=np.cos,
q_in=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
q_out=(np.array([1., 1. / np.sqrt(2.), 0.]) * u.one, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arccos,
q_in=(np.cos(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian), ),
q_out=(np.array([0., np.pi / 4., np.pi / 2.]) * u.radian, ),
),
testcase(
f=np.tan,
q_in=(np.pi / 3. * u.radian, ),
q_out=(np.sqrt(3.) * u.dimensionless_unscaled, )
),
testcase(
f=np.tan,
q_in=(np.array([0., 45., 135., 180.]) * u.degree, ),
q_out=(np.array([0., 1., -1., 0.]) * u.dimensionless_unscaled, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.pi / 3. * u.radian), ),
q_out=(np.pi / 3. * u.radian, )
),
testcase(
f=np.arctan,
q_in=(np.tan(np.array([10., 30., 70., 80.]) * u.degree), ),
q_out=(np.radians(np.array([10., 30., 70., 80.]) * u.degree), )
),
testcase(
f=np.arctan2,
q_in=(np.array([10., 30., 70., 80.]) * u.m, 2.0 * u.km),
q_out=(np.arctan2(np.array([10., 30., 70., 80.]),
2000.) * u.radian, )
),
testcase(
f=np.arctan2,
q_in=((np.array([10., 80.]) * u.m / (2.0 * u.km)).to(u.one), 1.),
q_out=(np.arctan2(np.array([10., 80.]) / 2000., 1.) * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.radians,
q_in=(180. * u.degree, ),
q_out=(np.pi * u.radian, )
),
testcase(
f=np.deg2rad,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.radians,
q_in=(3. * u.radian, ),
q_out=(3. * u.radian, )
),
testcase(
f=np.rad2deg,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(60. * u.degree, ),
q_out=(60. * u.degree, )
),
testcase(
f=np.rad2deg,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
),
testcase(
f=np.degrees,
q_in=(np.pi * u.radian, ),
q_out=(180. * u.degree, )
)
))
def test_testcases(self, tc):
return test_testcase(tc)
@pytest.mark.parametrize('te', (
testexc(
f=np.deg2rad,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.radians,
q_in=(3. * u.m, ),
exc=TypeError,
msg=None
),
testexc(
f=np.rad2deg,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.degrees,
q_in=(3. * u.m),
exc=TypeError,
msg=None
),
testexc(
f=np.sin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'sin' function to quantities with angle units"
),
testexc(
f=np.arcsin,
q_in=(3. * u.m, ),
exc=TypeError,
msg="Can only apply 'arcsin' function to dimensionless quantities"
),
testexc(
f=np.cos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'cos' function to quantities with angle units"
),
testexc(
f=np.arccos,
q_in=(3. * u.s, ),
exc=TypeError,
msg="Can only apply 'arccos' function to dimensionless quantities"
),
testexc(
f=np.tan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'tan' function to quantities with angle units"
),
testexc(
f=np.arctan,
q_in=(np.array([1, 2, 3]) * u.N, ),
exc=TypeError,
msg="Can only apply 'arctan' function to dimensionless quantities"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1. * u.s),
exc=u.UnitsError,
msg="compatible dimensions"
),
testexc(
f=np.arctan2,
q_in=(np.array([1, 2, 3]) * u.N, 1.),
exc=u.UnitsError,
msg="dimensionless quantities when other arg"
)
))
def test_testexcs(self, te):
return test_testexc(te)
@pytest.mark.parametrize('tw', (
testwarn(
f=np.arcsin,
q_in=(27. * u.pc / (15 * u.kpc), ),
wfilter='error'
),
))
def test_testwarns(self, tw):
return test_testwarn(tw)
class TestQuantityMathFuncs:
"""
Test other mathematical functions
"""
def test_multiply_scalar(self):
assert np.multiply(4. * u.m, 2. / u.s) == 8. * u.m / u.s
assert np.multiply(4. * u.m, 2.) == 8. * u.m
assert np.multiply(4., 2. / u.s) == 8. / u.s
def test_multiply_array(self):
assert np.all(np.multiply(np.arange(3.) * u.m, 2. / u.s) ==
np.arange(0, 6., 2.) * u.m / u.s)
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_scalar(self, function):
assert function(4. * u.m, 2. * u.s) == function(4., 2.) * u.m / u.s
assert function(4. * u.m, 2.) == function(4., 2.) * u.m
assert function(4., 2. * u.s) == function(4., 2.) / u.s
@pytest.mark.parametrize('function', (np.divide, np.true_divide))
def test_divide_array(self, function):
assert np.all(function(np.arange(3.) * u.m, 2. * u.s) ==
function(np.arange(3.), 2.) * u.m / u.s)
def test_floor_divide_remainder_and_divmod(self):
inch = u.Unit(0.0254 * u.m)
dividend = np.array([1., 2., 3.]) * u.m
divisor = np.array([3., 4., 5.]) * inch
quotient = dividend // divisor
remainder = dividend % divisor
assert_allclose(quotient.value, [13., 19., 23.])
assert quotient.unit == u.dimensionless_unscaled
assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
assert remainder.unit == dividend.unit
quotient2 = np.floor_divide(dividend, divisor)
remainder2 = np.remainder(dividend, divisor)
assert np.all(quotient2 == quotient)
assert np.all(remainder2 == remainder)
quotient3, remainder3 = divmod(dividend, divisor)
assert np.all(quotient3 == quotient)
assert np.all(remainder3 == remainder)
with pytest.raises(TypeError):
divmod(dividend, u.km)
with pytest.raises(TypeError):
dividend // u.km
with pytest.raises(TypeError):
dividend % u.km
quotient4, remainder4 = np.divmod(dividend, divisor)
assert np.all(quotient4 == quotient)
assert np.all(remainder4 == remainder)
with pytest.raises(TypeError):
np.divmod(dividend, u.km)
def test_sqrt_scalar(self):
assert np.sqrt(4. * u.m) == 2. * u.m ** 0.5
def test_sqrt_array(self):
assert np.all(np.sqrt(np.array([1., 4., 9.]) * u.m)
== np.array([1., 2., 3.]) * u.m ** 0.5)
def test_square_scalar(self):
assert np.square(4. * u.m) == 16. * u.m ** 2
def test_square_array(self):
assert np.all(np.square(np.array([1., 2., 3.]) * u.m)
== np.array([1., 4., 9.]) * u.m ** 2)
def test_reciprocal_scalar(self):
assert np.reciprocal(4. * u.m) == 0.25 / u.m
def test_reciprocal_array(self):
assert np.all(np.reciprocal(np.array([1., 2., 4.]) * u.m)
== np.array([1., 0.5, 0.25]) / u.m)
def test_heaviside_scalar(self):
assert np.heaviside(0. * u.m, 0.5) == 0.5 * u.dimensionless_unscaled
assert np.heaviside(0. * u.s,
25 * u.percent) == 0.25 * u.dimensionless_unscaled
assert np.heaviside(2. * u.J, 0.25) == 1. * u.dimensionless_unscaled
def test_heaviside_array(self):
values = np.array([-1., 0., 0., +1.])
halfway = np.array([0.75, 0.25, 0.75, 0.25]) * u.dimensionless_unscaled
assert np.all(np.heaviside(values * u.m,
halfway * u.dimensionless_unscaled) ==
[0, 0.25, 0.75, +1.] * u.dimensionless_unscaled)
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_scalar(self, function):
assert function(8. * u.m**3) == 2. * u.m
@pytest.mark.parametrize('function', (np.cbrt, ))
def test_cbrt_array(self, function):
# Calculate cbrt on both sides since on Windows the cube root of 64
# does not exactly equal 4. See 4388.
values = np.array([1., 8., 64.])
assert np.all(function(values * u.m**3) ==
function(values) * u.m)
def test_power_scalar(self):
assert np.power(4. * u.m, 2.) == 16. * u.m ** 2
assert np.power(4., 200. * u.cm / u.m) == \
u.Quantity(16., u.dimensionless_unscaled)
# regression check on #1696
assert np.power(4. * u.m, 0.) == 1. * u.dimensionless_unscaled
def test_power_array(self):
assert np.all(np.power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
# float_power only introduced in numpy 1.12
@pytest.mark.skipif("not hasattr(np, 'float_power')")
def test_float_power_array(self):
assert np.all(np.float_power(np.array([1., 2., 3.]) * u.m, 3.)
== np.array([1., 8., 27.]) * u.m ** 3)
# regression check on #1696
assert np.all(np.float_power(np.arange(4.) * u.m, 0.) ==
1. * u.dimensionless_unscaled)
@raises(ValueError)
def test_power_array_array(self):
np.power(4. * u.m, [2., 4.])
@raises(ValueError)
def test_power_array_array2(self):
np.power([2., 4.] * u.m, [2., 4.])
def test_power_array_array3(self):
# Identical unit fractions are converted automatically to dimensionless
# and should be allowed as base for np.power: #4764
q = [2., 4.] * u.m / u.m
powers = [2., 4.]
res = np.power(q, powers)
assert np.all(res.value == q.value ** powers)
assert res.unit == u.dimensionless_unscaled
# The same holds for unit fractions that are scaled dimensionless.
q2 = [2., 4.] * u.m / u.cm
# Test also against different types of exponent
for cls in (list, tuple, np.array, np.ma.array, u.Quantity):
res2 = np.power(q2, cls(powers))
assert np.all(res2.value == q2.to_value(1) ** powers)
assert res2.unit == u.dimensionless_unscaled
# Though for single powers, we keep the composite unit.
res3 = q2 ** 2
assert np.all(res3.value == q2.value ** 2)
assert res3.unit == q2.unit ** 2
assert np.all(res3 == q2 ** [2, 2])
def test_power_invalid(self):
with pytest.raises(TypeError) as exc:
np.power(3., 4. * u.m)
assert "raise something to a dimensionless" in exc.value.args[0]
def test_copysign_scalar(self):
assert np.copysign(3 * u.m, 1.) == 3. * u.m
assert np.copysign(3 * u.m, 1. * u.s) == 3. * u.m
assert np.copysign(3 * u.m, -1.) == -3. * u.m
assert np.copysign(3 * u.m, -1. * u.s) == -3. * u.m
def test_copysign_array(self):
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1.) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s, -1. * u.m) ==
-np.array([1., 2., 3.]) * u.s)
assert np.all(np.copysign(np.array([1., 2., 3.]) * u.s,
np.array([-2., 2., -4.]) * u.m) ==
np.array([-1., 2., -3.]) * u.s)
q = np.copysign(np.array([1., 2., 3.]), -3 * u.m)
assert np.all(q == np.array([-1., -2., -3.]))
assert not isinstance(q, u.Quantity)
def test_ldexp_scalar(self):
assert np.ldexp(4. * u.m, 2) == 16. * u.m
def test_ldexp_array(self):
assert np.all(np.ldexp(np.array([1., 2., 3.]) * u.m, [3, 2, 1])
== np.array([8., 8., 6.]) * u.m)
def test_ldexp_invalid(self):
with pytest.raises(TypeError):
np.ldexp(3. * u.m, 4.)
with pytest.raises(TypeError):
np.ldexp(3., u.Quantity(4, u.m, dtype=int))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_scalar(self, function):
q = function(3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(0.5)
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value
== function(np.array([1. / 3., 1. / 2., 1.])))
# should also work on quantities that can be made dimensionless
q2 = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', (np.exp, np.expm1, np.exp2,
np.log, np.log2, np.log10, np.log1p))
def test_exp_invalid_units(self, function):
# Can't use exp() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
def test_modf_scalar(self):
q = np.modf(9. * u.m / (600. * u.cm))
assert q == (0.5 * u.dimensionless_unscaled,
1. * u.dimensionless_unscaled)
def test_modf_array(self):
v = np.arange(10.) * u.m / (500. * u.cm)
q = np.modf(v)
n = np.modf(v.to_value(u.dimensionless_unscaled))
assert q[0].unit == u.dimensionless_unscaled
assert q[1].unit == u.dimensionless_unscaled
assert all(q[0].value == n[0])
assert all(q[1].value == n[1])
def test_frexp_scalar(self):
q = np.frexp(3. * u.m / (6. * u.m))
assert q == (np.array(0.5), np.array(0.0))
def test_frexp_array(self):
q = np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert all((_q0, _q1) == np.frexp(_d) for _q0, _q1, _d
in zip(q[0], q[1], [1. / 3., 1. / 2., 1.]))
def test_frexp_invalid_units(self):
# Can't use prod() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
np.frexp(3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
# also does not work on quantities that can be made dimensionless
with pytest.raises(TypeError) as exc:
np.frexp(np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert exc.value.args[0] == ("Can only apply 'frexp' function to "
"unscaled dimensionless quantities")
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_array(self, function):
q = function(np.array([2., 3., 6.]) * u.m / (6. * u.cm), 1.)
assert q.unit == u.dimensionless_unscaled
assert_allclose(q.value,
function(np.array([100. / 3., 100. / 2., 100.]), 1.))
@pytest.mark.parametrize('function', (np.logaddexp, np.logaddexp2))
def test_dimensionless_twoarg_invalid_units(self, function):
with pytest.raises(TypeError) as exc:
function(1. * u.km / u.s, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
class TestInvariantUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.fabs,
np.conj, np.conjugate,
np.negative, np.spacing, np.rint,
np.floor, np.ceil, np.positive])
def test_invariant_scalar(self, ufunc):
q_i = 4.7 * u.m
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert q_o.value == ufunc(q_i.value)
@pytest.mark.parametrize(('ufunc'), [np.absolute, np.conjugate,
np.negative, np.rint,
np.floor, np.ceil])
def test_invariant_array(self, ufunc):
q_i = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_o = ufunc(q_i)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i.unit
assert np.all(q_o.value == ufunc(q_i.value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_scalar(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.km
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_array(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.us
q_o = ufunc(q_i1, q_i2)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_one_arbitrary(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
arbitrary_unit_value = np.array([0.])
q_o = ufunc(q_i1, arbitrary_unit_value)
assert isinstance(q_o, u.Quantity)
assert q_o.unit == q_i1.unit
assert_allclose(q_o.value, ufunc(q_i1.value, arbitrary_unit_value))
@pytest.mark.parametrize(('ufunc'), [np.add, np.subtract, np.hypot,
np.maximum, np.minimum, np.nextafter,
np.remainder, np.mod, np.fmod])
def test_invariant_twoarg_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestComparisonUfuncs:
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10., -5., 1.e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(q_o2 == ufunc((q_i1 / q_i2)
.to_value(u.dimensionless_unscaled), 2.))
# comparison with 0., inf, nan is OK even for dimensional quantities
for arbitrary_unit_value in (0., np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value*np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0., np.inf, np.nan]))
@pytest.mark.parametrize(('ufunc'), [np.greater, np.greater_equal,
np.less, np.less_equal,
np.not_equal, np.equal])
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError) as exc:
ufunc(q_i1, q_i2)
assert "compatible dimensions" in exc.value.args[0]
class TestInplaceUfuncs:
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace(self, value):
# without scaling
s = value * u.rad
check = s
np.sin(s, out=s)
assert check is s
assert check.unit == u.dimensionless_unscaled
# with scaling
s2 = (value * u.rad).to(u.deg)
check2 = s2
np.sin(s2, out=s2)
assert check2 is s2
assert check2.unit == u.dimensionless_unscaled
assert_allclose(s.value, s2.value)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_ufunc_inplace_2(self, value):
"""Check inplace works with non-quantity input and quantity output"""
s = value * u.m
check = s
np.absolute(value, out=s)
assert check is s
assert np.all(check.value == np.absolute(value))
assert check.unit is u.dimensionless_unscaled
np.sqrt(value, out=s)
assert check is s
assert np.all(check.value == np.sqrt(value))
assert check.unit is u.dimensionless_unscaled
np.exp(value, out=s)
assert check is s
assert np.all(check.value == np.exp(value))
assert check.unit is u.dimensionless_unscaled
np.arcsin(value/10., out=s)
assert check is s
assert np.all(check.value == np.arcsin(value/10.))
assert check.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_one_argument_two_output_ufunc_inplace(self, value):
v = 100. * value * u.cm / u.m
v_copy = v.copy()
tmp = v.copy()
check = v
np.modf(v, tmp, v)
assert check is v
assert check.unit == u.dimensionless_unscaled
v2 = v_copy.to(u.dimensionless_unscaled)
check2 = v2
np.modf(v2, tmp, v2)
assert check2 is v2
assert check2.unit == u.dimensionless_unscaled
# can also replace in last position if no scaling is needed
v3 = v_copy.to(u.dimensionless_unscaled)
check3 = v3
np.modf(v3, v3, tmp)
assert check3 is v3
assert check3.unit == u.dimensionless_unscaled
# And now, with numpy >= 1.13, one can also replace input with
# first output when scaling
v4 = v_copy.copy()
check4 = v4
np.modf(v4, v4, tmp)
assert check4 is v4
assert check4.unit == u.dimensionless_unscaled
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_1(self, value):
s = value * u.cycle
check = s
s /= 2.
assert check is s
assert np.all(check.value == value / 2.)
s /= u.s
assert check is s
assert check.unit == u.cycle / u.s
s *= 2. * u.s
assert check is s
assert np.all(check == value * u.cycle)
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_ufunc_inplace_2(self, value):
s = value * u.cycle
check = s
np.arctan2(s, s, out=s)
assert check is s
assert check.unit == u.radian
with pytest.raises(u.UnitsError):
s += 1. * u.m
assert check is s
assert check.unit == u.radian
np.arctan2(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.radian
np.add(1. * u.deg, s, out=s)
assert check is s
assert check.unit == u.deg
np.multiply(2. / u.s, s, out=s)
assert check is s
assert check.unit == u.deg / u.s
def test_two_argument_ufunc_inplace_3(self):
s = np.array([1., 2., 3.]) * u.dimensionless_unscaled
np.add(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert np.all(s.value == np.array([3., 6., 9.]))
assert s.unit is u.dimensionless_unscaled
np.arctan2(np.array([1., 2., 3.]), np.array([1., 2., 3.]) * 2., out=s)
assert_allclose(s.value, np.arctan2(1., 2.))
assert s.unit is u.radian
@pytest.mark.parametrize(('value'), [1., np.arange(10.)])
def test_two_argument_two_output_ufunc_inplace(self, value):
v = value * u.m
divisor = 70.*u.cm
v1 = v.copy()
tmp = v.copy()
check = np.divmod(v1, divisor, out=(tmp, v1))
assert check[0] is tmp and check[1] is v1
assert tmp.unit == u.dimensionless_unscaled
assert v1.unit == v.unit
v2 = v.copy()
check2 = np.divmod(v2, divisor, out=(v2, tmp))
assert check2[0] is v2 and check2[1] is tmp
assert v2.unit == u.dimensionless_unscaled
assert tmp.unit == v.unit
v3a = v.copy()
v3b = v.copy()
check3 = np.divmod(v3a, divisor, out=(v3a, v3b))
assert check3[0] is v3a and check3[1] is v3b
assert v3a.unit == u.dimensionless_unscaled
assert v3b.unit == v.unit
def test_ufunc_inplace_non_contiguous_data(self):
# ensure inplace works also for non-contiguous data (closes #1834)
s = np.arange(10.) * u.m
s_copy = s.copy()
s2 = s[::2]
s2 += 1. * u.cm
assert np.all(s[::2] > s_copy[::2])
assert np.all(s[1::2] == s_copy[1::2])
def test_ufunc_inplace_non_standard_dtype(self):
"""Check that inplace operations check properly for casting.
First two tests that check that float32 is kept close #3976.
"""
a1 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a1 *= np.float32(10)
assert a1.unit is u.m
assert a1.dtype == np.float32
a2 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.float32)
a2 += (20.*u.km)
assert a2.unit is u.m
assert a2.dtype == np.float32
# For integer, in-place only works if no conversion is done.
a3 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
a3 += u.Quantity(10, u.m, dtype=np.int64)
assert a3.unit is u.m
assert a3.dtype == np.int32
a4 = u.Quantity([1, 2, 3, 4], u.m, dtype=np.int32)
with pytest.raises(TypeError):
a4 += u.Quantity(10, u.mm, dtype=np.int64)
class TestUfuncAt:
"""Test that 'at' method for ufuncs (calculates in-place at given indices)
For Quantities, since calculations are in-place, it makes sense only
if the result is still a quantity, and if the unit does not have to change
"""
def test_one_argument_ufunc_at(self):
q = np.arange(10.) * u.m
i = np.array([1, 2])
qv = q.value.copy()
np.negative.at(q, i)
np.negative.at(qv, i)
assert np.all(q.value == qv)
assert q.unit is u.m
# cannot change from quantity to bool array
with pytest.raises(TypeError):
np.isfinite.at(q, i)
# for selective in-place, cannot change the unit
with pytest.raises(u.UnitsError):
np.square.at(q, i)
# except if the unit does not change (i.e., dimensionless)
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.square.at(d, i)
np.square.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
d = np.arange(10.) * u.dimensionless_unscaled
dv = d.value.copy()
np.log.at(d, i)
np.log.at(dv, i)
assert np.all(d.value == dv)
assert d.unit is u.dimensionless_unscaled
# also for sine it doesn't work, even if given an angle
a = np.arange(10.) * u.radian
with pytest.raises(u.UnitsError):
np.sin.at(a, i)
# except, for consistency, if we have made radian equivalent to
# dimensionless (though hopefully it will never be needed)
av = a.value.copy()
with u.add_enabled_equivalencies(u.dimensionless_angles()):
np.sin.at(a, i)
np.sin.at(av, i)
assert_allclose(a.value, av)
# but we won't do double conversion
ad = np.arange(10.) * u.degree
with pytest.raises(u.UnitsError):
np.sin.at(ad, i)
def test_two_argument_ufunc_at(self):
s = np.arange(10.) * u.m
i = np.array([1, 2])
check = s.value.copy()
np.add.at(s, i, 1.*u.km)
np.add.at(check, i, 1000.)
assert np.all(s.value == check)
assert s.unit is u.m
with pytest.raises(u.UnitsError):
np.add.at(s, i, 1.*u.s)
# also raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.at(s, i, 1*u.s)
# but be fine if it does not
s = np.arange(10.) * u.m
check = s.value.copy()
np.multiply.at(s, i, 2.*u.dimensionless_unscaled)
np.multiply.at(check, i, 2)
assert np.all(s.value == check)
s = np.arange(10.) * u.m
np.multiply.at(s, i, 2.)
assert np.all(s.value == check)
# of course cannot change class of data either
with pytest.raises(TypeError):
np.greater.at(s, i, 1.*u.km)
class TestUfuncReduceReduceatAccumulate:
"""Test 'reduce', 'reduceat' and 'accumulate' methods for ufuncs
For Quantities, it makes sense only if the unit does not have to change
"""
def test_one_argument_ufunc_reduce_accumulate(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
i = np.array([0, 5, 1, 6])
with pytest.raises(ValueError):
np.sin.reduce(s)
with pytest.raises(ValueError):
np.sin.accumulate(s)
with pytest.raises(ValueError):
np.sin.reduceat(s, i)
def test_two_argument_ufunc_reduce_accumulate(self):
s = np.arange(10.) * u.m
i = np.array([0, 5, 1, 6])
check = s.value.copy()
s_add_reduce = np.add.reduce(s)
check_add_reduce = np.add.reduce(check)
assert s_add_reduce.value == check_add_reduce
assert s_add_reduce.unit is u.m
s_add_accumulate = np.add.accumulate(s)
check_add_accumulate = np.add.accumulate(check)
assert np.all(s_add_accumulate.value == check_add_accumulate)
assert s_add_accumulate.unit is u.m
s_add_reduceat = np.add.reduceat(s, i)
check_add_reduceat = np.add.reduceat(check, i)
assert np.all(s_add_reduceat.value == check_add_reduceat)
assert s_add_reduceat.unit is u.m
# reduce(at) or accumulate on comparisons makes no sense,
# as intermediate result is not even a Quantity
with pytest.raises(TypeError):
np.greater.reduce(s)
with pytest.raises(TypeError):
np.greater.accumulate(s)
with pytest.raises(TypeError):
np.greater.reduceat(s, i)
# raise UnitsError if unit would have to be changed
with pytest.raises(u.UnitsError):
np.multiply.reduce(s)
with pytest.raises(u.UnitsError):
np.multiply.accumulate(s)
with pytest.raises(u.UnitsError):
np.multiply.reduceat(s, i)
# but be fine if it does not
s = np.arange(10.) * u.dimensionless_unscaled
check = s.value.copy()
s_multiply_reduce = np.multiply.reduce(s)
check_multiply_reduce = np.multiply.reduce(check)
assert s_multiply_reduce.value == check_multiply_reduce
assert s_multiply_reduce.unit is u.dimensionless_unscaled
s_multiply_accumulate = np.multiply.accumulate(s)
check_multiply_accumulate = np.multiply.accumulate(check)
assert np.all(s_multiply_accumulate.value == check_multiply_accumulate)
assert s_multiply_accumulate.unit is u.dimensionless_unscaled
s_multiply_reduceat = np.multiply.reduceat(s, i)
check_multiply_reduceat = np.multiply.reduceat(check, i)
assert np.all(s_multiply_reduceat.value == check_multiply_reduceat)
assert s_multiply_reduceat.unit is u.dimensionless_unscaled
class TestUfuncOuter:
"""Test 'outer' methods for ufuncs
Just a few spot checks, since it uses the same code as the regular
ufunc call
"""
def test_one_argument_ufunc_outer(self):
# one argument cannot be used
s = np.arange(10.) * u.radian
with pytest.raises(ValueError):
np.sin.outer(s)
def test_two_argument_ufunc_outer(self):
s1 = np.arange(10.) * u.m
s2 = np.arange(2.) * u.s
check1 = s1.value
check2 = s2.value
s12_multiply_outer = np.multiply.outer(s1, s2)
check12_multiply_outer = np.multiply.outer(check1, check2)
assert np.all(s12_multiply_outer.value == check12_multiply_outer)
assert s12_multiply_outer.unit == s1.unit * s2.unit
# raise UnitsError if appropriate
with pytest.raises(u.UnitsError):
np.add.outer(s1, s2)
# but be fine if it does not
s3 = np.arange(2.) * s1.unit
check3 = s3.value
s13_add_outer = np.add.outer(s1, s3)
check13_add_outer = np.add.outer(check1, check3)
assert np.all(s13_add_outer.value == check13_add_outer)
assert s13_add_outer.unit is s1.unit
s13_greater_outer = np.greater.outer(s1, s3)
check13_greater_outer = np.greater.outer(check1, check3)
assert type(s13_greater_outer) is np.ndarray
assert np.all(s13_greater_outer == check13_greater_outer)
if HAS_SCIPY:
from scipy import special as sps
class TestScipySpecialUfuncs:
erf_like_ufuncs = (
sps.erf, sps.gamma, sps.loggamma, sps.gammasgn, sps.psi,
sps.rgamma, sps.erfc, sps.erfcx, sps.erfi, sps.wofz, sps.dawsn,
sps.entr, sps.exprel, sps.expm1, sps.log1p, sps.exp2, sps.exp10)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_scalar(self, function):
TestQuantityMathFuncs.test_exp_scalar(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_array(self, function):
TestQuantityMathFuncs.test_exp_array(None, function)
@pytest.mark.parametrize('function', erf_like_ufuncs)
def test_erf_invalid_units(self, function):
TestQuantityMathFuncs.test_exp_invalid_units(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_scalar(self, function):
TestQuantityMathFuncs.test_cbrt_scalar(None, function)
@pytest.mark.parametrize('function', (sps.cbrt, ))
def test_cbrt_array(self, function):
TestQuantityMathFuncs.test_cbrt_array(None, function)
@pytest.mark.parametrize('function', (sps.radian, ))
def test_radian(self, function):
q1 = function(180. * u.degree, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q1.value, np.pi)
assert q1.unit == u.radian
q2 = function(0. * u.degree, 30. * u.arcmin, 0. * u.arcsec)
assert_allclose(q2.value, (30. * u.arcmin).to(u.radian).value)
assert q2.unit == u.radian
q3 = function(0. * u.degree, 0. * u.arcmin, 30. * u.arcsec)
assert_allclose(q3.value, (30. * u.arcsec).to(u.radian).value)
# the following doesn't make much sense in terms of the name of the
# routine, but we check it gives the correct result.
q4 = function(3. * u.radian, 0. * u.arcmin, 0. * u.arcsec)
assert_allclose(q4.value, 3.)
assert q4.unit == u.radian
with pytest.raises(TypeError):
function(3. * u.m, 2. * u.s, 1. * u.kg)
jv_like_ufuncs = (
sps.jv, sps.jn, sps.jve, sps.yn, sps.yv, sps.yve, sps.kn, sps.kv,
sps.kve, sps.iv, sps.ive, sps.hankel1, sps.hankel1e, sps.hankel2,
sps.hankel2e)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_scalar(self, function):
q = function(2. * u.m / (2. * u.m), 3. * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert q.value == function(1.0, 0.5)
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_array(self, function):
q = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.m))
assert q.unit == u.dimensionless_unscaled
assert np.all(q.value == function(
np.ones(3),
np.array([1. / 3., 1. / 2., 1.]))
)
# should also work on quantities that can be made dimensionless
q2 = function(np.ones(3) * u.m / (1. * u.m),
np.array([2., 3., 6.]) * u.m / (6. * u.cm))
assert q2.unit == u.dimensionless_unscaled
assert_allclose(q2.value,
function(np.ones(3),
np.array([100. / 3., 100. / 2., 100.])))
@pytest.mark.parametrize('function', jv_like_ufuncs)
def test_jv_invalid_units(self, function):
# Can't use jv() with non-dimensionless quantities
with pytest.raises(TypeError) as exc:
function(1. * u.kg, 3. * u.m / u.s)
assert exc.value.args[0] == ("Can only apply '{0}' function to "
"dimensionless quantities"
.format(function.__name__))
|
9c7df46d16153dde31322f8a18b487e83f27689b5c611eb2bf3e0537f30bc6f1 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames actually implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the trasnformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric
from .lsr import LSR, GalacticLSR
from .supergalactic import Supergalactic
from .altaz import AltAz
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .ecliptic import (GeocentricTrueEcliptic, BarycentricTrueEcliptic,
HeliocentricTrueEcliptic, BaseEclipticFrame)
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
from ..baseframe import frame_transform_graph
# we define an __all__ because otherwise the transformation modules
# get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'PrecessedGeocentric', 'GeocentricTrueEcliptic',
'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR',
'BaseEclipticFrame', 'BaseRADecFrame', 'make_transform_graph_docs']
def make_transform_graph_docs(transform_graph):
"""
Generates a string that can be used in other docstrings to include a
transformation graph, showing the available transforms and
coordinate systems.
Parameters
----------
transform_graph : `~.coordinates.TransformGraph`
Returns
-------
docstring : str
A string that can be added to the end of a docstring to show the
transform graph.
"""
from textwrap import dedent
coosys = [transform_graph.lookup_name(item) for
item in transform_graph.get_names()]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the coordinate systems built into the
`~astropy.coordinates` package, their aliases (useful for converting
other coordinates to them using attribute-style access) and the
pre-defined transformations between them. The user is free to
override any of these transformations by defining new transformations
between these systems, but the pre-defined transformations should be
sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from ..transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = u"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{0}:</b>
<span style="font-size: 24px; color: {1};"><b>➝</b></span>
</p>
</li>
""".format(cls.__name__, color)
html_list_items.append(block)
graph_legend = u"""
.. raw:: html
<ul>
{}
</ul>
""".format("\n".join(html_list_items))
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = make_transform_graph_docs(frame_transform_graph)
|
dd4dd96cc4767e02aff7c16b92fa796f8cb737fdfde185dc9ac998f87f763384 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from ... import units as u
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import (assert_quantity_allclose as assert_allclose,
catch_warnings)
from ...time import Time
from ...units import allclose as quantity_allclose
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3)*u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix,
transfunc.no_pos, transfunc.no_vel,
transfunc.just_matrix])
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep))
for k, diff in rep.differentials.items()])
expected_rep = _rep.with_differentials(diffs)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos = expected_pos + offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials['s']
if offset and offset.differentials:
expected_vel = (expected_vel + offset.differentials['s'])
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert quantity_allclose(c2.data.to_cartesian().xyz,
expected_pos.to_cartesian().xyz)
if expected_vel is not None:
diff = c2.data.differentials['s'].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)
rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff)
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2)
rep = r.CartesianRepresentation(np.arange(3)*u.pc,
differentials={'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize('rep', [
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr,
d_lat=11*u.mas/u.yr,
d_distance=-110*u.km/u.s)),
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}),
r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
distance=150*u.pc,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)})
])
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert 's' in rep.differentials
assert isinstance(c2.data.differentials['s'],
rep.differentials['s'].__class__)
if isinstance(rep.differentials['s'], r.RadialDifferential):
assert c2.data.differentials['s'] is rep.differentials['s']
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from ..sites import get_builtin_sites
diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s)
rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff)
loc = get_builtin_sites()['example_site']
aaf = AltAz(obstime='J2010', location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert 'cannot transform' in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert 'cannot transform' in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
ftrans = t.FunctionTransform(tfun, TCoo3, TCoo2,
register_graph=frame_transform_graph)
t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr,
pm_dec=1*u.marcsec/u.yr,)
with catch_warnings() as w:
t2 = t3.transform_to(TCoo2)
assert len(w) == 1
assert 'they have been dropped' in str(w[0].message)
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
from ..baseframe import BaseCoordinateFrame
from ..attributes import Attribute
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert ('BorkedFrame' in exc.value.args[0] and
"'ra'" in exc.value.args[0] and
"'dec'" in exc.value.args[0])
|
f53b8cd258a22a5d07d74b2a03838f013afd21085c78b56c727066c022cef6e6 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the SkyCoord class. Note that there are also SkyCoord tests in
test_api_ape5.py
"""
import copy
import pytest
import numpy as np
import numpy.testing as npt
from ... import units as u
from ...tests.helper import (catch_warnings,
assert_quantity_allclose as assert_allclose)
from ..representation import REPRESENTATION_CLASSES
from ...coordinates import (ICRS, FK4, FK5, Galactic, SkyCoord, Angle,
SphericalRepresentation, CartesianRepresentation,
UnitSphericalRepresentation, AltAz,
BaseCoordinateFrame, Attribute,
frame_transform_graph, RepresentationMapping)
from ...coordinates import Latitude, EarthLocation
from ...time import Time
from ...utils import minversion, isiterable
from ...utils.compat import NUMPY_LT_1_14
from ...utils.exceptions import AstropyDeprecationWarning
from ...units import allclose as quantity_allclose
RA = 1.0 * u.deg
DEC = 2.0 * u.deg
C_ICRS = ICRS(RA, DEC)
C_FK5 = C_ICRS.transform_to(FK5)
J2001 = Time('J2001', scale='utc')
def allclose(a, b, rtol=0.0, atol=None):
if atol is None:
atol = 1.e-8 * getattr(a, 'unit', 1.)
return quantity_allclose(a, b, rtol, atol)
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
if HAS_SCIPY and minversion(scipy, '0.12.0', inclusive=False):
OLDER_SCIPY = False
else:
OLDER_SCIPY = True
def test_transform_to():
for frame in (FK5, FK5(equinox=Time('J1975.0')),
FK4, FK4(equinox=Time('J1975.0')),
SkyCoord(RA, DEC, 'fk4', equinox='J1980')):
c_frame = C_ICRS.transform_to(frame)
s_icrs = SkyCoord(RA, DEC, frame='icrs')
s_frame = s_icrs.transform_to(frame)
assert allclose(c_frame.ra, s_frame.ra)
assert allclose(c_frame.dec, s_frame.dec)
assert allclose(c_frame.distance, s_frame.distance)
# set up for parametrized test
rt_sets = []
rt_frames = [ICRS, FK4, FK5, Galactic]
for rt_frame0 in rt_frames:
for rt_frame1 in rt_frames:
for equinox0 in (None, 'J1975.0'):
for obstime0 in (None, 'J1980.0'):
for equinox1 in (None, 'J1975.0'):
for obstime1 in (None, 'J1980.0'):
rt_sets.append((rt_frame0, rt_frame1,
equinox0, equinox1,
obstime0, obstime1))
rt_args = ('frame0', 'frame1', 'equinox0', 'equinox1', 'obstime0', 'obstime1')
@pytest.mark.parametrize(rt_args, rt_sets)
def test_round_tripping(frame0, frame1, equinox0, equinox1, obstime0, obstime1):
"""
Test round tripping out and back using transform_to in every combination.
"""
attrs0 = {'equinox': equinox0, 'obstime': obstime0}
attrs1 = {'equinox': equinox1, 'obstime': obstime1}
# Remove None values
attrs0 = dict((k, v) for k, v in attrs0.items() if v is not None)
attrs1 = dict((k, v) for k, v in attrs1.items() if v is not None)
# Go out and back
sc = SkyCoord(frame0, RA, DEC, **attrs0)
# Keep only frame attributes for frame1
attrs1 = dict((attr, val) for attr, val in attrs1.items()
if attr in frame1.get_frame_attr_names())
sc2 = sc.transform_to(frame1(**attrs1))
# When coming back only keep frame0 attributes for transform_to
attrs0 = dict((attr, val) for attr, val in attrs0.items()
if attr in frame0.get_frame_attr_names())
# also, if any are None, fill in with defaults
for attrnm in frame0.get_frame_attr_names():
if attrs0.get(attrnm, None) is None:
if attrnm == 'obstime' and frame0.get_frame_attr_names()[attrnm] is None:
if 'equinox' in attrs0:
attrs0[attrnm] = attrs0['equinox']
else:
attrs0[attrnm] = frame0.get_frame_attr_names()[attrnm]
sc_rt = sc2.transform_to(frame0(**attrs0))
if frame0 is Galactic:
assert allclose(sc.l, sc_rt.l)
assert allclose(sc.b, sc_rt.b)
else:
assert allclose(sc.ra, sc_rt.ra)
assert allclose(sc.dec, sc_rt.dec)
if equinox0:
assert type(sc.equinox) is Time and sc.equinox == sc_rt.equinox
if obstime0:
assert type(sc.obstime) is Time and sc.obstime == sc_rt.obstime
def test_coord_init_string():
"""
Spherical or Cartesian represenation input coordinates.
"""
sc = SkyCoord('1d 2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1d', '2d')
assert allclose(sc.ra, 1 * u.deg)
assert allclose(sc.dec, 2 * u.deg)
sc = SkyCoord('1°2′3″', '2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
sc = SkyCoord('1°2′3″ 2°3′4″')
assert allclose(sc.ra, Angle('1°2′3″'))
assert allclose(sc.dec, Angle('2°3′4″'))
with pytest.raises(ValueError) as err:
SkyCoord('1d 2d 3d')
assert "Cannot parse first argument data" in str(err)
sc1 = SkyCoord('8 00 00 +5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc1, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc11 = SkyCoord('8h00m00s+5d00m00.0s', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc11, SkyCoord)
assert allclose(sc1.ra, Angle(120 * u.deg))
assert allclose(sc1.dec, Angle(5 * u.deg))
sc2 = SkyCoord('8 00 -5 00 00.0', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc2, SkyCoord)
assert allclose(sc2.ra, Angle(120 * u.deg))
assert allclose(sc2.dec, Angle(-5 * u.deg))
sc3 = SkyCoord('8 00 -5 00.6', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc3, SkyCoord)
assert allclose(sc3.ra, Angle(120 * u.deg))
assert allclose(sc3.dec, Angle(-5.01 * u.deg))
sc4 = SkyCoord('J080000.00-050036.00', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc4, SkyCoord)
assert allclose(sc4.ra, Angle(120 * u.deg))
assert allclose(sc4.dec, Angle(-5.01 * u.deg))
sc41 = SkyCoord('J080000+050036', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc41, SkyCoord)
assert allclose(sc41.ra, Angle(120 * u.deg))
assert allclose(sc41.dec, Angle(+5.01 * u.deg))
sc5 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='icrs')
assert isinstance(sc5, SkyCoord)
assert allclose(sc5.ra, Angle(120.15 * u.deg))
assert allclose(sc5.dec, Angle(-5.01 * u.deg))
sc6 = SkyCoord('8h00.6m -5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc6, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6m-5d00.6m', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc61 = SkyCoord('8h00.6-5d00.6', unit=(u.hour, u.deg), frame='fk4')
assert isinstance(sc61, SkyCoord)
assert allclose(sc6.ra, Angle(120.15 * u.deg))
assert allclose(sc6.dec, Angle(-5.01 * u.deg))
sc7 = SkyCoord("J1874221.60+122421.6", unit=u.deg)
assert isinstance(sc7, SkyCoord)
assert allclose(sc7.ra, Angle(187.706 * u.deg))
assert allclose(sc7.dec, Angle(12.406 * u.deg))
with pytest.raises(ValueError):
SkyCoord('8 00 -5 00.6', unit=(u.deg, u.deg), frame='galactic')
def test_coord_init_unit():
"""
Test variations of the unit keyword.
"""
for unit in ('deg', 'deg,deg', ' deg , deg ', u.deg, (u.deg, u.deg),
np.array(['deg', 'deg'])):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(1 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('hourangle', 'hourangle,hourangle', ' hourangle , hourangle ',
u.hourangle, [u.hourangle, u.hourangle]):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(30 * u.deg))
for unit in ('hourangle,deg', (u.hourangle, u.deg)):
sc = SkyCoord(1, 2, unit=unit)
assert allclose(sc.ra, Angle(15 * u.deg))
assert allclose(sc.dec, Angle(2 * u.deg))
for unit in ('deg,deg,deg,deg', [u.deg, u.deg, u.deg, u.deg], None):
with pytest.raises(ValueError) as err:
SkyCoord(1, 2, unit=unit)
assert 'Unit keyword must have one to three unit values' in str(err)
for unit in ('m', (u.m, u.deg), ''):
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, unit=unit)
def test_coord_init_list():
"""
Spherical or Cartesian representation input coordinates.
"""
sc = SkyCoord([('1d', '2d'),
(1 * u.deg, 2 * u.deg),
'1d 2d',
('1°', '2°'),
'1° 2°'], unit='deg')
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord(['1d 2d 3d'])
assert "Cannot parse first argument data" in str(err)
with pytest.raises(ValueError) as err:
SkyCoord([('1d', '2d', '3d')])
assert "Cannot parse first argument data" in str(err)
sc = SkyCoord([1 * u.deg, 1 * u.deg], [2 * u.deg, 2 * u.deg])
assert allclose(sc.ra, Angle('1d'))
assert allclose(sc.dec, Angle('2d'))
with pytest.raises(ValueError) as err:
SkyCoord([1 * u.deg, 2 * u.deg]) # this list is taken as RA w/ missing dec
assert "One or more elements of input sequence does not have a length" in str(err)
def test_coord_init_array():
"""
Input in the form of a list array or numpy array
"""
for a in (['1 2', '3 4'],
[['1', '2'], ['3', '4']],
[[1, 2], [3, 4]]):
sc = SkyCoord(a, unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
sc = SkyCoord(np.array(a), unit='deg')
assert allclose(sc.ra - [1, 3] * u.deg, 0 * u.deg)
assert allclose(sc.dec - [2, 4] * u.deg, 0 * u.deg)
def test_coord_init_representation():
"""
Spherical or Cartesian represenation input coordinates.
"""
coord = SphericalRepresentation(lon=8 * u.deg, lat=5 * u.deg, distance=1 * u.kpc)
sc = SkyCoord(coord, 'icrs')
assert allclose(sc.ra, coord.lon)
assert allclose(sc.dec, coord.lat)
assert allclose(sc.distance, coord.distance)
with pytest.raises(ValueError) as err:
SkyCoord(coord, 'icrs', ra='1d')
assert "conflicts with keyword argument 'ra'" in str(err)
coord = CartesianRepresentation(1 * u.one, 2 * u.one, 3 * u.one)
sc = SkyCoord(coord, 'icrs')
sc_cart = sc.represent_as(CartesianRepresentation)
assert allclose(sc_cart.x, 1.0)
assert allclose(sc_cart.y, 2.0)
assert allclose(sc_cart.z, 3.0)
FRAME_DEPRECATION_WARNING = ("Passing a frame as a positional argument is now "
"deprecated, use the frame= keyword argument "
"instead.")
def test_frame_init():
"""
Different ways of providing the frame.
"""
sc = SkyCoord(RA, DEC, frame='icrs')
assert sc.frame.name == 'icrs'
sc = SkyCoord(RA, DEC, frame=ICRS)
assert sc.frame.name == 'icrs'
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(RA, DEC, 'icrs')
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(RA, DEC, ICRS)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord('icrs', RA, DEC)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
with catch_warnings(AstropyDeprecationWarning) as w:
sc = SkyCoord(ICRS, RA, DEC)
assert sc.frame.name == 'icrs'
assert len(w) == 1
assert str(w[0].message) == FRAME_DEPRECATION_WARNING
sc = SkyCoord(sc)
assert sc.frame.name == 'icrs'
sc = SkyCoord(C_ICRS)
assert sc.frame.name == 'icrs'
SkyCoord(C_ICRS, frame='icrs')
assert sc.frame.name == 'icrs'
with pytest.raises(ValueError) as err:
SkyCoord(C_ICRS, frame='galactic')
assert 'Cannot override frame=' in str(err)
def test_attr_inheritance():
"""
When initializing from an existing coord the representation attrs like
equinox should be inherited to the SkyCoord. If there is a conflict
then raise an exception.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # Doesn't have equinox there so we get FK4 defaults
assert sc2.equinox != sc.equinox
assert sc2.obstime != sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
sc2 = SkyCoord(sc)
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
sc2 = SkyCoord(sc.frame) # sc.frame has equinox, obstime
assert sc2.equinox == sc.equinox
assert sc2.obstime == sc.obstime
assert allclose(sc2.ra, sc.ra)
assert allclose(sc2.dec, sc.dec)
assert allclose(sc2.distance, sc.distance)
def test_attr_conflicts():
"""
Check conflicts resolution between coordinate attributes and init kwargs.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# OK because sc.frame doesn't have obstime
SkyCoord(sc.frame, equinox='J1999', obstime='J2100')
# Not OK if attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
# Same game but with fk4 which has equinox and obstime frame attrs
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
# OK if attrs both specified but with identical values
SkyCoord(sc, equinox='J1999', obstime='J2001')
# Not OK if SkyCoord attrs don't match
with pytest.raises(ValueError) as err:
SkyCoord(sc, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
# Not OK because sc.frame has different attrs
with pytest.raises(ValueError) as err:
SkyCoord(sc.frame, equinox='J1999', obstime='J2002')
assert "Coordinate attribute 'obstime'=" in str(err)
def test_frame_attr_getattr():
"""
When accessing frame attributes like equinox, the value should come
from self.frame when that object has the relevant attribute, otherwise
from self.
"""
sc = SkyCoord(1, 2, frame='icrs', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == 'J1999' # Just the raw value (not validated)
assert sc.obstime == 'J2001'
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999', obstime='J2001')
assert sc.equinox == Time('J1999') # Coming from the self.frame object
assert sc.obstime == Time('J2001')
sc = SkyCoord(1, 2, frame='fk4', unit='deg', equinox='J1999')
assert sc.equinox == Time('J1999')
assert sc.obstime == Time('J1999')
def test_to_string():
"""
Basic testing of converting SkyCoord to strings. This just tests
for a single input coordinate and and 1-element list. It does not
test the underlying `Angle.to_string` method itself.
"""
coord = '1h2m3s 1d2m3s'
for wrap in (lambda x: x, lambda x: [x]):
sc = SkyCoord(wrap(coord))
assert sc.to_string() == wrap('15.5125 1.03417')
assert sc.to_string('dms') == wrap('15d30m45s 1d02m03s')
assert sc.to_string('hmsdms') == wrap('01h02m03s +01d02m03s')
with_kwargs = sc.to_string('hmsdms', precision=3, pad=True, alwayssign=True)
assert with_kwargs == wrap('+01h02m03.000s +01d02m03.000s')
def test_seps():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(0 * u.deg, 2 * u.deg, frame='icrs')
sep = sc1.separation(sc2)
assert (sep - 1 * u.deg)/u.deg < 1e-10
with pytest.raises(ValueError):
sc1.separation_3d(sc2)
sc3 = SkyCoord(1 * u.deg, 1 * u.deg, distance=1 * u.kpc, frame='icrs')
sc4 = SkyCoord(1 * u.deg, 1 * u.deg, distance=2 * u.kpc, frame='icrs')
sep3d = sc3.separation_3d(sc4)
assert sep3d == 1 * u.kpc
def test_repr():
sc1 = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
assert repr(sc1) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' ({})>').format(' 0., 1.' if NUMPY_LT_1_14 else
'0., 1.')
assert repr(sc2) == ('<SkyCoord (ICRS): (ra, dec, distance) in (deg, deg, kpc)\n'
' ({})>').format(' 1., 1., 1.' if NUMPY_LT_1_14
else '1., 1., 1.')
sc3 = SkyCoord(0.25 * u.deg, [1, 2.5] * u.deg, frame='icrs')
assert repr(sc3).startswith('<SkyCoord (ICRS): (ra, dec) in deg\n')
sc_default = SkyCoord(0 * u.deg, 1 * u.deg)
assert repr(sc_default) == ('<SkyCoord (ICRS): (ra, dec) in deg\n'
' ({})>').format(' 0., 1.' if NUMPY_LT_1_14
else '0., 1.')
def test_repr_altaz():
sc2 = SkyCoord(1 * u.deg, 1 * u.deg, frame='icrs', distance=1 * u.kpc)
loc = EarthLocation(-2309223 * u.m, -3695529 * u.m, -4641767 * u.m)
time = Time('2005-03-21 00:00:00')
sc4 = sc2.transform_to(AltAz(location=loc, obstime=time))
assert repr(sc4).startswith("<SkyCoord (AltAz: obstime=2005-03-21 00:00:00.000, "
"location=(-2309223.0, -3695529.0, "
"-4641767.0) m, pressure=0.0 hPa, "
"temperature=0.0 deg_C, relative_humidity=0, "
"obswl=1.0 micron): (az, alt, distance) in "
"(deg, deg, m)\n")
def test_ops():
"""
Tests miscellaneous operations like `len`
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg, frame='icrs')
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg, frame='icrs')
sc_empty = SkyCoord([] * u.deg, [] * u.deg, frame='icrs')
assert sc.isscalar
assert not sc_arr.isscalar
assert not sc_empty.isscalar
with pytest.raises(TypeError):
len(sc)
assert len(sc_arr) == 2
assert len(sc_empty) == 0
assert bool(sc)
assert bool(sc_arr)
assert not bool(sc_empty)
assert sc_arr[0].isscalar
assert len(sc_arr[:1]) == 1
# A scalar shouldn't be indexable
with pytest.raises(TypeError):
sc[0:]
# but it should be possible to just get an item
sc_item = sc[()]
assert sc_item.shape == ()
# and to turn it into an array
sc_1d = sc[np.newaxis]
assert sc_1d.shape == (1,)
with pytest.raises(TypeError):
iter(sc)
assert not isiterable(sc)
assert isiterable(sc_arr)
assert isiterable(sc_empty)
it = iter(sc_arr)
assert next(it).dec == sc_arr[0].dec
assert next(it).dec == sc_arr[1].dec
with pytest.raises(StopIteration):
next(it)
def test_none_transform():
"""
Ensure that transforming from a SkyCoord with no frame provided works like
ICRS
"""
sc = SkyCoord(0 * u.deg, 1 * u.deg)
sc_arr = SkyCoord(0 * u.deg, [1, 2] * u.deg)
sc2 = sc.transform_to(ICRS)
assert sc.ra == sc2.ra and sc.dec == sc2.dec
sc5 = sc.transform_to('fk5')
assert sc5.ra == sc2.transform_to('fk5').ra
sc_arr2 = sc_arr.transform_to(ICRS)
sc_arr5 = sc_arr.transform_to('fk5')
npt.assert_array_equal(sc_arr5.ra, sc_arr2.transform_to('fk5').ra)
def test_position_angle():
c1 = SkyCoord(0*u.deg, 0*u.deg)
c2 = SkyCoord(1*u.deg, 0*u.deg)
assert_allclose(c1.position_angle(c2) - 90.0 * u.deg, 0*u.deg)
c3 = SkyCoord(1*u.deg, 0.1*u.deg)
assert c1.position_angle(c3) < 90*u.deg
c4 = SkyCoord(0*u.deg, 1*u.deg)
assert_allclose(c1.position_angle(c4), 0*u.deg)
carr1 = SkyCoord(0*u.deg, [0, 1, 2]*u.deg)
carr2 = SkyCoord([-1, -2, -3]*u.deg, [0.1, 1.1, 2.1]*u.deg)
res = carr1.position_angle(carr2)
assert res.shape == (3,)
assert np.all(res < 360*u.degree)
assert np.all(res > 270*u.degree)
cicrs = SkyCoord(0*u.deg, 0*u.deg, frame='icrs')
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
# because of the frame transform, it's just a *bit* more than 90 degrees
assert cicrs.position_angle(cfk5) > 90.0 * u.deg
assert cicrs.position_angle(cfk5) < 91.0 * u.deg
def test_position_angle_directly():
"""Regression check for #3800: position_angle should accept floats."""
from ..angle_utilities import position_angle
result = position_angle(10., 20., 10., 20.)
assert result.unit is u.radian
assert result.value == 0.
def test_sep_pa_equivalence():
"""Regression check for bug in #5702.
PA and separation from object 1 to 2 should be consistent with those
from 2 to 1
"""
cfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5')
cfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950')
# test with both default and explicit equinox #5722 and #3106
sep_forward = cfk5.separation(cfk5B1950)
sep_backward = cfk5B1950.separation(cfk5)
assert sep_forward != 0 and sep_backward != 0
assert_allclose(sep_forward, sep_backward)
posang_forward = cfk5.position_angle(cfk5B1950)
posang_backward = cfk5B1950.position_angle(cfk5)
assert posang_forward != 0 and posang_backward != 0
assert 179 < (posang_forward - posang_backward).wrap_at(360*u.deg).degree < 181
dcfk5 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', distance=1*u.pc)
dcfk5B1950 = SkyCoord(1*u.deg, 0*u.deg, frame='fk5', equinox='B1950',
distance=1.*u.pc)
sep3d_forward = dcfk5.separation_3d(dcfk5B1950)
sep3d_backward = dcfk5B1950.separation_3d(dcfk5)
assert sep3d_forward != 0 and sep3d_backward != 0
assert_allclose(sep3d_forward, sep3d_backward)
def test_table_to_coord():
"""
Checks "end-to-end" use of `Table` with `SkyCoord` - the `Quantity`
initializer is the intermediary that translate the table columns into
something coordinates understands.
(Regression test for #1762 )
"""
from ...table import Table, Column
t = Table()
t.add_column(Column(data=[1, 2, 3], name='ra', unit=u.deg))
t.add_column(Column(data=[4, 5, 6], name='dec', unit=u.deg))
c = SkyCoord(t['ra'], t['dec'])
assert allclose(c.ra.to(u.deg), [1, 2, 3] * u.deg)
assert allclose(c.dec.to(u.deg), [4, 5, 6] * u.deg)
def assert_quantities_allclose(coord, q1s, attrs):
"""
Compare two tuples of quantities. This assumes that the values in q1 are of
order(1) and uses atol=1e-13, rtol=0. It also asserts that the units of the
two quantities are the *same*, in order to check that the representation
output has the expected units.
"""
q2s = [getattr(coord, attr) for attr in attrs]
assert len(q1s) == len(q2s)
for q1, q2 in zip(q1s, q2s):
assert q1.shape == q2.shape
assert allclose(q1, q2, rtol=0, atol=1e-13 * q1.unit)
# Sets of inputs corresponding to Galactic frame
base_unit_attr_sets = [
('spherical', u.karcsec, u.karcsec, u.kpc, Latitude, 'l', 'b', 'distance'),
('unitspherical', u.karcsec, u.karcsec, None, Latitude, 'l', 'b', None),
('physicsspherical', u.karcsec, u.karcsec, u.kpc, Angle, 'phi', 'theta', 'r'),
('cartesian', u.km, u.km, u.km, u.Quantity, 'u', 'v', 'w'),
('cylindrical', u.km, u.karcsec, u.km, Angle, 'rho', 'phi', 'z')
]
units_attr_sets = []
for base_unit_attr_set in base_unit_attr_sets:
repr_name = base_unit_attr_set[0]
for representation in (repr_name, REPRESENTATION_CLASSES[repr_name]):
for c1, c2, c3 in ((1, 2, 3), ([1], [2], [3])):
for arrayify in True, False:
if arrayify:
c1 = np.array(c1)
c2 = np.array(c2)
c3 = np.array(c3)
units_attr_sets.append(base_unit_attr_set + (representation, c1, c2, c3))
units_attr_args = ('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3', 'representation', 'c1', 'c2', 'c3')
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(Galactic, c1, c2, c3, unit=(unit1, unit2, unit3),
representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), Galactic,
unit=(unit1, unit2, unit3), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3}
sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2, unit3),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1, attr2: c2, attr3: c3}
sc = SkyCoord(Galactic, unit=(unit1, unit2, unit3),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_skycoord_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = SkyCoord(Galactic, c1, c2, unit=(unit1, unit2),
representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
sc = SkyCoord(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
Galactic,
unit=(unit1, unit2, unit3), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
kwargs = {attr1: c1, attr2: c2}
sc = SkyCoord(Galactic, unit=(unit1, unit2),
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2),
(attr1, attr2))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets if x[0] != 'unitspherical'])
def test_galactic_three_components(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3,
representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2, COMP3)
and various representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2),
1000*c3*u.Unit(unit3/1000), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr3: c3*unit3}
sc = Galactic(c1*unit1, c2*unit2,
representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
kwargs = {attr1: c1*unit1, attr2: c2*unit2, attr3: c3*unit3}
sc = Galactic(representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2, c3*unit3),
(attr1, attr2, attr3))
@pytest.mark.parametrize(units_attr_args,
[x for x in units_attr_sets
if x[0] in ('spherical', 'unitspherical')])
def test_galactic_spherical_two_components(repr_name, unit1, unit2, unit3, cls2,
attr1, attr2, attr3, representation, c1, c2, c3):
"""
Tests positional inputs using components (COMP1, COMP2) for spherical
representations. Use weird units and Galactic frame.
"""
sc = Galactic(1000*c1*u.Unit(unit1/1000), cls2(c2, unit=unit2), representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
sc = Galactic(c1*unit1, c2*unit2, representation=representation)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
kwargs = {attr1: c1*unit1, attr2: c2*unit2}
sc = Galactic(representation=representation, **kwargs)
assert_quantities_allclose(sc, (c1*unit1, c2*unit2), (attr1, attr2))
@pytest.mark.parametrize(('repr_name', 'unit1', 'unit2', 'unit3', 'cls2', 'attr1', 'attr2', 'attr3'),
[x for x in base_unit_attr_sets if x[0] != 'unitspherical'])
def test_skycoord_coordinate_input(repr_name, unit1, unit2, unit3, cls2, attr1, attr2, attr3):
c1, c2, c3 = 1, 2, 3
sc = SkyCoord([(c1, c2, c3)], unit=(unit1, unit2, unit3), representation=repr_name,
frame='galactic')
assert_quantities_allclose(sc, ([c1]*unit1, [c2]*unit2, [c3]*unit3), (attr1, attr2, attr3))
c1, c2, c3 = 1*unit1, 2*unit2, 3*unit3
sc = SkyCoord([(c1, c2, c3)], representation=repr_name, frame='galactic')
assert_quantities_allclose(sc, ([1]*unit1, [2]*unit2, [3]*unit3), (attr1, attr2, attr3))
def test_skycoord_string_coordinate_input():
sc = SkyCoord('01 02 03 +02 03 04', unit='deg', representation='unitspherical')
assert_quantities_allclose(sc, (Angle('01:02:03', unit='deg'),
Angle('02:03:04', unit='deg')),
('ra', 'dec'))
sc = SkyCoord(['01 02 03 +02 03 04'], unit='deg', representation='unitspherical')
assert_quantities_allclose(sc, (Angle(['01:02:03'], unit='deg'),
Angle(['02:03:04'], unit='deg')),
('ra', 'dec'))
def test_units():
sc = SkyCoord(1, 2, 3, unit='m', representation='cartesian') # All get meters
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2*u.km, 3, unit='m', representation='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit=u.m, representation='cartesian') # All get u.m
assert sc.x.unit is u.m
assert sc.y.unit is u.m
assert sc.z.unit is u.m
sc = SkyCoord(1, 2, 3, unit='m, km, pc', representation='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
with pytest.raises(u.UnitsError) as err:
SkyCoord(1, 2, 3, unit=(u.m, u.m), representation='cartesian')
assert 'should have matching physical types' in str(err)
SkyCoord(1, 2, 3, unit=(u.m, u.km, u.pc), representation='cartesian')
assert_quantities_allclose(sc, (1*u.m, 2*u.km, 3*u.pc), ('x', 'y', 'z'))
@pytest.mark.xfail
def test_units_known_fail():
# should fail but doesn't => corner case oddity
with pytest.raises(u.UnitsError):
SkyCoord(1, 2, 3, unit=u.deg, representation='spherical')
def test_nodata_failure():
with pytest.raises(ValueError):
SkyCoord()
@pytest.mark.parametrize(('mode', 'origin'), [('wcs', 0),
('all', 0),
('all', 1)])
def test_wcs_methods(mode, origin):
from ...wcs import WCS
from ...utils.data import get_pkg_data_contents
from ...wcs.utils import pixel_to_skycoord
header = get_pkg_data_contents('../../wcs/tests/maps/1904-66_TAN.hdr', encoding='binary')
wcs = WCS(header)
ref = SkyCoord(0.1 * u.deg, -89. * u.deg, frame='icrs')
xp, yp = ref.to_pixel(wcs, mode=mode, origin=origin)
# WCS is in FK5 so we need to transform back to ICRS
new = pixel_to_skycoord(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(new.ra.degree, ref.ra.degree)
assert_allclose(new.dec.degree, ref.dec.degree)
# also try to round-trip with `from_pixel`
scnew = SkyCoord.from_pixel(xp, yp, wcs, mode=mode, origin=origin).transform_to('icrs')
assert_allclose(scnew.ra.degree, ref.ra.degree)
assert_allclose(scnew.dec.degree, ref.dec.degree)
# Also make sure the right type comes out
class SkyCoord2(SkyCoord):
pass
scnew2 = SkyCoord2.from_pixel(xp, yp, wcs, mode=mode, origin=origin)
assert scnew.__class__ is SkyCoord
assert scnew2.__class__ is SkyCoord2
def test_frame_attr_transform_inherit():
"""
Test that frame attributes get inherited as expected during transform.
Driven by #3106.
"""
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK5)
c2 = c.transform_to(FK4)
assert c2.equinox.value == 'B1950.000'
assert c2.obstime.value == 'B1950.000'
c2 = c.transform_to(FK4(equinox='J1975', obstime='J1980'))
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4)
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime is None
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J2000.000'
assert c2.obstime.value == 'J1980.000'
c = SkyCoord(1 * u.deg, 2 * u.deg, frame=FK4, equinox='J1975', obstime='J1980')
c2 = c.transform_to(FK5)
assert c2.equinox.value == 'J1975.000'
assert c2.obstime.value == 'J1980.000'
c2 = c.transform_to(FK5(equinox='J1990'))
assert c2.equinox.value == 'J1990.000'
assert c2.obstime.value == 'J1980.000'
# The work-around for #5722
c = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5')
c1 = SkyCoord(1 * u.deg, 2 * u.deg, frame='fk5', equinox='B1950.000')
c2 = c1.transform_to(c)
assert not c2.is_equivalent_frame(c) # counterintuitive, but documented
assert c2.equinox.value == 'B1950.000'
c3 = c1.transform_to(c, merge_attributes=False)
assert c3.equinox.value == 'J2000.000'
assert c3.is_equivalent_frame(c)
def test_deepcopy():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
c2 = copy.copy(c1)
c3 = copy.deepcopy(c1)
c4 = SkyCoord([1, 2] * u.m, [2, 3] * u.m, [3, 4] * u.m, representation='cartesian', frame='fk5',
obstime='J1999.9', equinox='J1988.8')
c5 = copy.deepcopy(c4)
assert np.all(c5.x == c4.x) # and y and z
assert c5.frame.name == c4.frame.name
assert c5.obstime == c4.obstime
assert c5.equinox == c4.equinox
assert c5.representation == c4.representation
def test_no_copy():
c1 = SkyCoord(np.arange(10.) * u.hourangle, np.arange(20., 30.) * u.deg)
c2 = SkyCoord(c1, copy=False)
# Note: c1.ra and c2.ra will *not* share memory, as these are recalculated
# to be in "preferred" units. See discussion in #4883.
assert np.may_share_memory(c1.data.lon, c2.data.lon)
c3 = SkyCoord(c1, copy=True)
assert not np.may_share_memory(c1.data.lon, c3.data.lon)
def test_immutable():
c1 = SkyCoord(1 * u.deg, 2 * u.deg)
with pytest.raises(AttributeError):
c1.ra = 3.0
c1.foo = 42
assert c1.foo == 42
@pytest.mark.skipif(str('not HAS_SCIPY'))
@pytest.mark.skipif(str('OLDER_SCIPY'))
def test_search_around():
"""
Test the search_around_* methods
Here we don't actually test the values are right, just that the methods of
SkyCoord work. The accuracy tests are in ``test_matching.py``
"""
from ...utils import NumpyRNGContext
with NumpyRNGContext(987654321):
sc1 = SkyCoord(np.random.rand(20) * 360.*u.degree,
(np.random.rand(20) * 180. - 90.)*u.degree)
sc2 = SkyCoord(np.random.rand(100) * 360. * u.degree,
(np.random.rand(100) * 180. - 90.)*u.degree)
sc1ds = SkyCoord(ra=sc1.ra, dec=sc1.dec, distance=np.random.rand(20)*u.kpc)
sc2ds = SkyCoord(ra=sc2.ra, dec=sc2.dec, distance=np.random.rand(100)*u.kpc)
idx1_sky, idx2_sky, d2d_sky, d3d_sky = sc1.search_around_sky(sc2, 10*u.deg)
idx1_3d, idx2_3d, d2d_3d, d3d_3d = sc1ds.search_around_3d(sc2ds, 250*u.pc)
def test_init_with_frame_instance_keyword():
# Frame instance
c1 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(equinox='J2010'))
assert c1.equinox == Time('J2010')
# Frame instance with data (data gets ignored)
c2 = SkyCoord(3 * u.deg, 4 * u.deg,
frame=FK5(1. * u.deg, 2 * u.deg,
equinox='J2010'))
assert c2.equinox == Time('J2010')
assert allclose(c2.ra.degree, 3)
assert allclose(c2.dec.degree, 4)
# SkyCoord instance
c3 = SkyCoord(3 * u.deg, 4 * u.deg, frame=c1)
assert c3.equinox == Time('J2010')
# Check duplicate arguments
with pytest.raises(ValueError) as exc:
c = SkyCoord(3 * u.deg, 4 * u.deg, frame=FK5(equinox='J2010'), equinox='J2001')
assert exc.value.args[0] == ("cannot specify frame attribute "
"'equinox' directly in SkyCoord "
"since a frame instance was passed in")
def test_init_with_frame_instance_positional():
# Frame instance
with pytest.raises(ValueError) as exc:
c1 = SkyCoord(3 * u.deg, 4 * u.deg, FK5(equinox='J2010'))
assert exc.value.args[0] == ("FK5 instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
# Positional frame instance with data raises exception
with pytest.raises(ValueError) as exc:
SkyCoord(3 * u.deg, 4 * u.deg, FK5(1. * u.deg, 2 * u.deg, equinox='J2010'))
assert exc.value.args[0] == ("FK5 instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
# Positional SkyCoord instance (for frame) raises exception
with pytest.raises(ValueError) as exc:
SkyCoord(3 * u.deg, 4 * u.deg, SkyCoord(1. * u.deg, 2 * u.deg, equinox='J2010'))
assert exc.value.args[0] == ("SkyCoord instance cannot be passed as a "
"positional argument for the frame, "
"pass it using the frame= keyword "
"instead.")
def test_guess_from_table():
from ...table import Table, Column
from ...utils import NumpyRNGContext
tab = Table()
with NumpyRNGContext(987654321):
tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='RA[J2000]'))
tab.add_column(Column(data=np.random.rand(1000), unit='deg', name='DEC[J2000]'))
sc = SkyCoord.guess_from_table(tab)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# try without units in the table
tab['RA[J2000]'].unit = None
tab['DEC[J2000]'].unit = None
# should fail if not given explicitly
with pytest.raises(u.UnitsError):
sc2 = SkyCoord.guess_from_table(tab)
# but should work if provided
sc2 = SkyCoord.guess_from_table(tab, unit=u.deg)
npt.assert_array_equal(sc.ra.deg, tab['RA[J2000]'])
npt.assert_array_equal(sc.dec.deg, tab['DEC[J2000]'])
# should fail if two options are available - ambiguity bad!
tab.add_column(Column(data=np.random.rand(1000), name='RA_J1900'))
with pytest.raises(ValueError) as excinfo:
sc3 = SkyCoord.guess_from_table(tab, unit=u.deg)
assert 'J1900' in excinfo.value.args[0] and 'J2000' in excinfo.value.args[0]
# should also fail if user specifies something already in the table, but
# should succeed even if the user has to give one of the components
tab.remove_column('RA_J1900')
with pytest.raises(ValueError):
sc3 = SkyCoord.guess_from_table(tab, ra=tab['RA[J2000]'], unit=u.deg)
oldra = tab['RA[J2000]']
tab.remove_column('RA[J2000]')
sc3 = SkyCoord.guess_from_table(tab, ra=oldra, unit=u.deg)
npt.assert_array_equal(sc3.ra.deg, oldra)
npt.assert_array_equal(sc3.dec.deg, tab['DEC[J2000]'])
# check a few non-ICRS/spherical systems
x, y, z = np.arange(3).reshape(3, 1) * u.pc
l, b = np.arange(2).reshape(2, 1) * u.deg
tabcart = Table([x, y, z], names=('x', 'y', 'z'))
tabgal = Table([b, l], names=('b', 'l'))
sc_cart = SkyCoord.guess_from_table(tabcart, representation='cartesian')
npt.assert_array_equal(sc_cart.x, x)
npt.assert_array_equal(sc_cart.y, y)
npt.assert_array_equal(sc_cart.z, z)
sc_gal = SkyCoord.guess_from_table(tabgal, frame='galactic')
npt.assert_array_equal(sc_gal.l, l)
npt.assert_array_equal(sc_gal.b, b)
# also try some column names that *end* with the attribute name
tabgal['b'].name = 'gal_b'
tabgal['l'].name = 'gal_l'
SkyCoord.guess_from_table(tabgal, frame='galactic')
tabgal['gal_b'].name = 'blob'
tabgal['gal_l'].name = 'central'
with pytest.raises(ValueError):
SkyCoord.guess_from_table(tabgal, frame='galactic')
def test_skycoord_list_creation():
"""
Test that SkyCoord can be created in a reasonable way with lists of SkyCoords
(regression for #2702)
"""
sc = SkyCoord(ra=[1, 2, 3]*u.deg, dec=[4, 5, 6]*u.deg)
sc0 = sc[0]
sc2 = sc[2]
scnew = SkyCoord([sc0, sc2])
assert np.all(scnew.ra == [1, 3]*u.deg)
assert np.all(scnew.dec == [4, 6]*u.deg)
# also check ranges
sc01 = sc[:2]
scnew2 = SkyCoord([sc01, sc2])
assert np.all(scnew2.ra == sc.ra)
assert np.all(scnew2.dec == sc.dec)
# now try with a mix of skycoord, frame, and repr objects
frobj = ICRS(2*u.deg, 5*u.deg)
reprobj = UnitSphericalRepresentation(3*u.deg, 6*u.deg)
scnew3 = SkyCoord([sc0, frobj, reprobj])
assert np.all(scnew3.ra == sc.ra)
assert np.all(scnew3.dec == sc.dec)
# should *fail* if different frame attributes or types are passed in
scfk5_j2000 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5')
with pytest.raises(ValueError):
SkyCoord([sc0, scfk5_j2000])
scfk5_j2010 = SkyCoord(1*u.deg, 4*u.deg, frame='fk5', equinox='J2010')
with pytest.raises(ValueError):
SkyCoord([scfk5_j2000, scfk5_j2010])
# but they should inherit if they're all consistent
scfk5_2_j2010 = SkyCoord(2*u.deg, 5*u.deg, frame='fk5', equinox='J2010')
scfk5_3_j2010 = SkyCoord(3*u.deg, 6*u.deg, frame='fk5', equinox='J2010')
scnew4 = SkyCoord([scfk5_j2010, scfk5_2_j2010, scfk5_3_j2010])
assert np.all(scnew4.ra == sc.ra)
assert np.all(scnew4.dec == sc.dec)
assert scnew4.equinox == Time('J2010')
def test_nd_skycoord_to_string():
c = SkyCoord(np.ones((2, 2)), 1, unit=('deg', 'deg'))
ts = c.to_string()
assert np.all(ts.shape == c.shape)
assert np.all(ts == u'1 1')
def test_equiv_skycoord():
sci1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
sci2 = SkyCoord(1*u.deg, 3*u.deg, frame='icrs')
assert sci1.is_equivalent_frame(sci1)
assert sci1.is_equivalent_frame(sci2)
assert sci1.is_equivalent_frame(ICRS())
assert not sci1.is_equivalent_frame(FK5())
with pytest.raises(TypeError):
sci1.is_equivalent_frame(10)
scf1 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5')
scf2 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', equinox='J2005')
# obstime is *not* an FK5 attribute, but we still want scf1 and scf3 to come
# to come out different because they're part of SkyCoord
scf3 = SkyCoord(1*u.deg, 2*u.deg, frame='fk5', obstime='J2005')
assert scf1.is_equivalent_frame(scf1)
assert not scf1.is_equivalent_frame(sci1)
assert scf1.is_equivalent_frame(FK5())
assert not scf1.is_equivalent_frame(scf2)
assert scf2.is_equivalent_frame(FK5(equinox='J2005'))
assert not scf3.is_equivalent_frame(scf1)
assert not scf3.is_equivalent_frame(FK5(equinox='J2005'))
def test_constellations():
# the actual test for accuracy is in test_funcs - this is just meant to make
# sure we get sensible answers
sc = SkyCoord(135*u.deg, 65*u.deg)
assert sc.get_constellation() == 'Ursa Major'
assert sc.get_constellation(short_name=True) == 'UMa'
scs = SkyCoord([135]*2*u.deg, [65]*2*u.deg)
npt.assert_equal(scs.get_constellation(), ['Ursa Major']*2)
npt.assert_equal(scs.get_constellation(short_name=True), ['UMa']*2)
@pytest.mark.remote_data
def test_constellations_with_nameresolve():
assert SkyCoord.from_name('And I').get_constellation(short_name=True) == 'And'
# you'd think "And ..." should be in Andromeda. But you'd be wrong.
assert SkyCoord.from_name('And VI').get_constellation() == 'Pegasus'
# maybe it's because And VI isn't really a galaxy?
assert SkyCoord.from_name('And XXII').get_constellation() == 'Pisces'
assert SkyCoord.from_name('And XXX').get_constellation() == 'Cassiopeia'
# ok maybe not
# ok, but at least some of the others do make sense...
assert SkyCoord.from_name('Coma Cluster').get_constellation(short_name=True) == 'Com'
assert SkyCoord.from_name('UMa II').get_constellation() == 'Ursa Major'
assert SkyCoord.from_name('Triangulum Galaxy').get_constellation() == 'Triangulum'
def test_getitem_representation():
"""
Make sure current representation survives __getitem__ even if different
from data representation.
"""
sc = SkyCoord([1, 1] * u.deg, [2, 2] * u.deg)
sc.representation = 'cartesian'
assert sc[0].representation is CartesianRepresentation
def test_spherical_offsets():
i00 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='icrs')
i01 = SkyCoord(0*u.arcmin, 1*u.arcmin, frame='icrs')
i10 = SkyCoord(1*u.arcmin, 0*u.arcmin, frame='icrs')
i11 = SkyCoord(1*u.arcmin, 1*u.arcmin, frame='icrs')
i22 = SkyCoord(2*u.arcmin, 2*u.arcmin, frame='icrs')
dra, ddec = i00.spherical_offsets_to(i01)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, 1*u.arcmin)
dra, ddec = i00.spherical_offsets_to(i10)
assert_allclose(dra, 1*u.arcmin)
assert_allclose(ddec, 0*u.arcmin)
dra, ddec = i10.spherical_offsets_to(i01)
assert_allclose(dra, -1*u.arcmin)
assert_allclose(ddec, 1*u.arcmin)
dra, ddec = i11.spherical_offsets_to(i22)
assert_allclose(ddec, 1*u.arcmin)
assert 0*u.arcmin < dra < 1*u.arcmin
fk5 = SkyCoord(0*u.arcmin, 0*u.arcmin, frame='fk5')
with pytest.raises(ValueError):
# different frames should fail
i00.spherical_offsets_to(fk5)
i1deg = ICRS(1*u.deg, 1*u.deg)
dra, ddec = i00.spherical_offsets_to(i1deg)
assert_allclose(dra, 1*u.deg)
assert_allclose(ddec, 1*u.deg)
# make sure an abbreviated array-based version of the above also works
i00s = SkyCoord([0]*4*u.arcmin, [0]*4*u.arcmin, frame='icrs')
i01s = SkyCoord([0]*4*u.arcmin, np.arange(4)*u.arcmin, frame='icrs')
dra, ddec = i00s.spherical_offsets_to(i01s)
assert_allclose(dra, 0*u.arcmin)
assert_allclose(ddec, np.arange(4)*u.arcmin)
def test_frame_attr_changes():
"""
This tests the case where a frame is added with a new frame attribute after
a SkyCoord has been created. This is necessary because SkyCoords get the
attributes set at creation time, but the set of attributes can change as
frames are added or removed from the transform graph. This makes sure that
everything continues to work consistently.
"""
sc_before = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' not in dir(sc_before)
class FakeFrame(BaseCoordinateFrame):
fakeattr = Attribute()
# doesn't matter what this does as long as it just puts the frame in the
# transform graph
transset = (ICRS, FakeFrame, lambda c, f: c)
frame_transform_graph.add_transform(*transset)
try:
assert 'fakeattr' in dir(sc_before)
assert sc_before.fakeattr is None
sc_after1 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs')
assert 'fakeattr' in dir(sc_after1)
assert sc_after1.fakeattr is None
sc_after2 = SkyCoord(1*u.deg, 2*u.deg, frame='icrs', fakeattr=1)
assert sc_after2.fakeattr == 1
finally:
frame_transform_graph.remove_transform(*transset)
assert 'fakeattr' not in dir(sc_before)
assert 'fakeattr' not in dir(sc_after1)
assert 'fakeattr' not in dir(sc_after2)
def test_cache_clear_sc():
from .. import SkyCoord
i = SkyCoord(1*u.deg, 2*u.deg)
# Add an in frame units version of the rep to the cache.
repr(i)
assert len(i.cache['representation']) == 2
i.cache.clear()
assert len(i.cache['representation']) == 0
def test_set_attribute_exceptions():
"""Ensure no attrbute for any frame can be set directly.
Though it is fine if the current frame does not have it."""
sc = SkyCoord(1.*u.deg, 2.*u.deg, frame='fk5')
assert hasattr(sc.frame, 'equinox')
with pytest.raises(AttributeError):
sc.equinox = 'B1950'
assert sc.relative_humidity is None
sc.relative_humidity = 0.5
assert sc.relative_humidity == 0.5
assert not hasattr(sc.frame, 'relative_humidity')
def test_extra_attributes():
"""Ensure any extra attributes are dealt with correctly.
Regression test against #5743.
"""
obstime_string = ['2017-01-01T00:00', '2017-01-01T00:10']
obstime = Time(obstime_string)
sc = SkyCoord([5, 10], [20, 30], unit=u.deg, obstime=obstime_string)
assert not hasattr(sc.frame, 'obstime')
assert type(sc.obstime) is Time
assert sc.obstime.shape == (2,)
assert np.all(sc.obstime == obstime)
# ensure equivalency still works for more than one obstime.
assert sc.is_equivalent_frame(sc)
sc_1 = sc[1]
assert sc_1.obstime == obstime[1]
# Transforming to FK4 should use sc.obstime.
sc_fk4 = sc.transform_to('fk4')
assert np.all(sc_fk4.frame.obstime == obstime)
# And transforming back should not loose it.
sc2 = sc_fk4.transform_to('icrs')
assert not hasattr(sc2.frame, 'obstime')
assert np.all(sc2.obstime == obstime)
# Ensure obstime get taken from the SkyCoord if passed in directly.
# (regression test for #5749).
sc3 = SkyCoord([0., 1.], [2., 3.], unit='deg', frame=sc)
assert np.all(sc3.obstime == obstime)
# Finally, check that we can delete such attributes.
del sc3.obstime
assert sc3.obstime is None
def test_apply_space_motion():
# use this 12 year period because it's a multiple of 4 to avoid the quirks
# of leap years while having 2 leap seconds in it
t1 = Time('2000-01-01T00:00')
t2 = Time('2012-01-01T00:00')
# Check a very simple case first:
frame = ICRS(ra=10.*u.deg, dec=0*u.deg,
distance=10.*u.pc,
pm_ra_cosdec=0.1*u.deg/u.yr,
pm_dec=0*u.mas/u.yr,
radial_velocity=0*u.km/u.s)
# Cases that should work (just testing input for now):
c1 = SkyCoord(frame, obstime=t1, pressure=101*u.kPa)
applied1 = c1.apply_space_motion(new_obstime=t2)
applied2 = c1.apply_space_motion(dt=12*u.year)
assert isinstance(applied1.frame, c1.frame.__class__)
assert isinstance(applied2.frame, c1.frame.__class__)
assert_allclose(applied1.ra, applied2.ra)
assert_allclose(applied1.pm_ra, applied2.pm_ra)
assert_allclose(applied1.dec, applied2.dec)
assert_allclose(applied1.distance, applied2.distance)
# ensure any frame attributes that were there before get passed through
assert applied1.pressure == c1.pressure
# there were 2 leap seconds between 2000 and 2010, so the difference in
# the two forms of time evolution should be ~2 sec
adt = np.abs(applied2.obstime - applied1.obstime)
assert 1.9*u.second < adt.to(u.second) < 2.1*u.second
c2 = SkyCoord(frame)
applied3 = c2.apply_space_motion(dt=6*u.year)
assert isinstance(applied3.frame, c1.frame.__class__)
assert applied3.obstime is None
# this should *not* be .6 deg due to space-motion on a sphere, but it
# should be fairly close
assert 0.5*u.deg < applied3.ra-c1.ra < .7*u.deg
# the two cases should only match somewhat due to it being space motion, but
# they should be at least this close
assert quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-3*u.deg)
# but *not* this close
assert not quantity_allclose(applied1.ra-c1.ra, (applied3.ra-c1.ra)*2, atol=1e-4*u.deg)
with pytest.raises(ValueError):
c2.apply_space_motion(new_obstime=t2)
def test_custom_frame_skycoord():
# also regression check for the case from #7069
class BlahBleeBlopFrame(BaseCoordinateFrame):
default_representation = SphericalRepresentation
# without a differential, SkyCoord creation fails
# default_differential = SphericalDifferential
_frame_specific_representation_info = {
'spherical': [
RepresentationMapping('lon', 'lon', 'recommended'),
RepresentationMapping('lat', 'lat', 'recommended'),
RepresentationMapping('distance', 'radius', 'recommended')
]
}
SkyCoord(lat=1*u.deg, lon=2*u.deg, frame=BlahBleeBlopFrame)
def test_user_friendly_pm_error():
"""
This checks that a more user-friendly error message is raised for the user
if they pass, e.g., pm_ra instead of pm_ra_cosdec
"""
with pytest.raises(ValueError) as e:
SkyCoord(ra=150*u.deg, dec=-11*u.deg,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr)
assert 'pm_ra_cosdec' in str(e.value)
with pytest.raises(ValueError) as e:
SkyCoord(l=150*u.deg, b=-11*u.deg,
pm_l=100*u.mas/u.yr, pm_b=10*u.mas/u.yr,
frame='galactic')
assert 'pm_l_cosb' in str(e.value)
# The special error should not turn on here:
with pytest.raises(ValueError) as e:
SkyCoord(x=1*u.pc, y=2*u.pc, z=3*u.pc,
pm_ra=100*u.mas/u.yr, pm_dec=10*u.mas/u.yr,
representation_type='cartesian')
assert 'pm_ra_cosdec' not in str(e.value)
|
325e3559f223e2bcc8d885057d3619e697dada7593446791c7de80e369e4852c | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This includes tests for the Distance class and related calculations
"""
import pytest
import numpy as np
from numpy import testing as npt
from ... import units as u
from ...units import allclose as quantity_allclose
from .. import Longitude, Latitude, Distance, CartesianRepresentation
from ..builtin_frames import ICRS, Galactic
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_distances():
"""
Tests functionality for Coordinate class distances and cartesian
transformations.
"""
'''
Distances can also be specified, and allow for a full 3D definition of a
coordinate.
'''
# try all the different ways to initialize a Distance
distance = Distance(12, u.parsec)
Distance(40, unit=u.au)
Distance(value=5, unit=u.kpc)
# need to provide a unit
with pytest.raises(u.UnitsError):
Distance(12)
# standard units are pre-defined
npt.assert_allclose(distance.lyr, 39.138765325702551)
npt.assert_allclose(distance.km, 370281309776063.0)
# Coordinate objects can be assigned a distance object, giving them a full
# 3D position
c = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree,
distance=Distance(12, u.parsec))
# or initialize distances via redshifts - this is actually tested in the
# function below that checks for scipy. This is kept here as an example
# c.distance = Distance(z=0.2) # uses current cosmology
# with whatever your preferred cosmology may be
# c.distance = Distance(z=0.2, cosmology=WMAP5)
# Coordinate objects can be initialized with a distance using special
# syntax
c1 = Galactic(l=158.558650*u.deg, b=-43.350066*u.deg, distance=12 * u.kpc)
# Coordinate objects can be instantiated with cartesian coordinates
# Internally they will immediately be converted to two angles + a distance
cart = CartesianRepresentation(x=2 * u.pc, y=4 * u.pc, z=8 * u.pc)
c2 = Galactic(cart)
sep12 = c1.separation_3d(c2)
# returns a *3d* distance between the c1 and c2 coordinates
# not that this does *not*
assert isinstance(sep12, Distance)
npt.assert_allclose(sep12.pc, 12005.784163916317, 10)
'''
All spherical coordinate systems with distances can be converted to
cartesian coordinates.
'''
cartrep2 = c2.cartesian
assert isinstance(cartrep2.x, u.Quantity)
npt.assert_allclose(cartrep2.x.value, 2)
npt.assert_allclose(cartrep2.y.value, 4)
npt.assert_allclose(cartrep2.z.value, 8)
# with no distance, the unit sphere is assumed when converting to cartesian
c3 = Galactic(l=158.558650*u.degree, b=-43.350066*u.degree, distance=None)
unitcart = c3.cartesian
npt.assert_allclose(((unitcart.x**2 + unitcart.y**2 +
unitcart.z**2)**0.5).value, 1.0)
# TODO: choose between these when CartesianRepresentation gets a definite
# decision on whether or not it gets __add__
#
# CartesianRepresentation objects can be added and subtracted, which are
# vector/elementwise they can also be given as arguments to a coordinate
# system
# csum = ICRS(c1.cartesian + c2.cartesian)
csumrep = CartesianRepresentation(c1.cartesian.xyz + c2.cartesian.xyz)
csum = ICRS(csumrep)
npt.assert_allclose(csumrep.x.value, -8.12016610185)
npt.assert_allclose(csumrep.y.value, 3.19380597435)
npt.assert_allclose(csumrep.z.value, -8.2294483707)
npt.assert_allclose(csum.ra.degree, 158.529401774)
npt.assert_allclose(csum.dec.degree, -43.3235825777)
npt.assert_allclose(csum.distance.kpc, 11.9942200501)
@pytest.mark.skipif(str('not HAS_SCIPY'))
def test_distances_scipy():
"""
The distance-related tests that require scipy due to the cosmology
module needing scipy integration routines
"""
from ...cosmology import WMAP5
# try different ways to initialize a Distance
d4 = Distance(z=0.23) # uses default cosmology - as of writing, WMAP7
npt.assert_allclose(d4.z, 0.23, rtol=1e-8)
d5 = Distance(z=0.23, cosmology=WMAP5)
npt.assert_allclose(d5.compute_z(WMAP5), 0.23, rtol=1e-8)
d6 = Distance(z=0.23, cosmology=WMAP5, unit=u.km)
npt.assert_allclose(d6.value, 3.5417046898762366e+22)
def test_distance_change():
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
c1 = ICRS(ra, dec, Distance(1, unit=u.kpc))
oldx = c1.cartesian.x.value
assert (oldx - 0.35284083171901953) < 1e-10
# first make sure distances are immutible
with pytest.raises(AttributeError):
c1.distance = Distance(2, unit=u.kpc)
# now x should increase with a bigger distance increases
c2 = ICRS(ra, dec, Distance(2, unit=u.kpc))
assert c2.cartesian.x.value == oldx * 2
def test_distance_is_quantity():
"""
test that distance behaves like a proper quantity
"""
Distance(2 * u.kpc)
d = Distance([2, 3.1], u.kpc)
assert d.shape == (2,)
a = d.view(np.ndarray)
q = d.view(u.Quantity)
a[0] = 1.2
q.value[1] = 5.4
assert d[0].value == 1.2
assert d[1].value == 5.4
q = u.Quantity(d, copy=True)
q.value[1] = 0
assert q.value[1] == 0
assert d.value[1] != 0
# regression test against #2261
d = Distance([2 * u.kpc, 250. * u.pc])
assert d.unit is u.kpc
assert np.all(d.value == np.array([2., 0.25]))
def test_distmod():
d = Distance(10, u.pc)
assert d.distmod.value == 0
d = Distance(distmod=20)
assert d.distmod.value == 20
assert d.kpc == 100
d = Distance(distmod=-1., unit=u.au)
npt.assert_allclose(d.value, 1301442.9440836983)
with pytest.raises(ValueError):
d = Distance(value=d, distmod=20)
with pytest.raises(ValueError):
d = Distance(z=.23, distmod=20)
# check the Mpc/kpc/pc behavior
assert Distance(distmod=1).unit == u.pc
assert Distance(distmod=11).unit == u.kpc
assert Distance(distmod=26).unit == u.Mpc
assert Distance(distmod=-21).unit == u.AU
# if an array, uses the mean of the log of the distances
assert Distance(distmod=[1, 11, 26]).unit == u.kpc
def test_parallax():
d = Distance(parallax=1*u.arcsecond)
assert d.pc == 1.
with pytest.raises(ValueError):
d = Distance(15*u.pc, parallax=20*u.milliarcsecond)
with pytest.raises(ValueError):
d = Distance(parallax=20*u.milliarcsecond, distmod=20)
# array
plx = [1, 10, 100.]*u.mas
d = Distance(parallax=plx)
assert quantity_allclose(d.pc, [1000., 100., 10.])
assert quantity_allclose(plx, d.parallax)
def test_distance_in_coordinates():
"""
test that distances can be created from quantities and that cartesian
representations come out right
"""
ra = Longitude("4:08:15.162342", unit=u.hour)
dec = Latitude("-41:08:15.162342", unit=u.degree)
coo = ICRS(ra, dec, distance=2*u.kpc)
cart = coo.cartesian
assert isinstance(cart.xyz, u.Quantity)
def test_negative_distance():
""" Test optional kwarg allow_negative """
with pytest.raises(ValueError):
Distance([-2, 3.1], u.kpc)
with pytest.raises(ValueError):
Distance([-2, -3.1], u.kpc)
with pytest.raises(ValueError):
Distance(-2, u.kpc)
d = Distance(-2, u.kpc, allow_negative=True)
assert d.value == -2
def test_distance_comparison():
"""Ensure comparisons of distances work (#2206, #2250)"""
a = Distance(15*u.kpc)
b = Distance(15*u.kpc)
assert a == b
c = Distance(1.*u.Mpc)
assert a < c
def test_distance_to_quantity_when_not_units_of_length():
"""Any operatation that leaves units other than those of length
should turn a distance into a quantity (#2206, #2250)"""
d = Distance(15*u.kpc)
twice = 2.*d
assert isinstance(twice, Distance)
area = 4.*np.pi*d**2
assert area.unit.is_equivalent(u.m**2)
assert not isinstance(area, Distance)
assert type(area) is u.Quantity
|
ab317a364fb9bcfaa39d571f0f2a2ce5b5cb73a9357953171ff27d31122a09b6 | # Licensed under a 3-clause BSD style license - see PYFITS.rst
import bz2
import gzip
import itertools
import os
import shutil
import sys
import warnings
import numpy as np
from . import compressed
from .base import _BaseHDU, _ValidHDU, _NonstandardHDU, ExtensionHDU
from .groups import GroupsHDU
from .image import PrimaryHDU, ImageHDU
from ..file import _File
from ..header import _pad_length
from ..util import (_is_int, _tmp_name, fileobj_closed, ignore_sigint,
_get_array_mmap, _free_space_check)
from ..verify import _Verify, _ErrList, VerifyError, VerifyWarning
from ....utils import indent
from ....utils.exceptions import AstropyUserWarning
from ....utils.decorators import deprecated_renamed_argument
def fitsopen(name, mode='readonly', memmap=None, save_backup=False,
cache=True, lazy_load_hdus=None, **kwargs):
"""Factory function to open a FITS file and return an `HDUList` object.
Parameters
----------
name : file path, file object, file-like object or pathlib.Path object
File to be opened.
mode : str, optional
Open mode, 'readonly' (default), 'update', 'append', 'denywrite', or
'ostream'.
If ``name`` is a file object that is already opened, ``mode`` must
match the mode the file was opened with, readonly (rb), update (rb+),
append (ab+), ostream (w), denywrite (rb)).
memmap : bool, optional
Is memory mapping to be used? ``memmap=True`` by default. This value
is obtained from the configuration item
``astropy.io.fits.Conf.use_memmap``.
save_backup : bool, optional
If the file was opened in update or append mode, this ensures that a
backup of the original file is saved before any changes are flushed.
The backup has the same name as the original file with ".bak" appended.
If "file.bak" already exists then "file.bak.1" is used, and so on.
cache : bool, optional
If the file name is a URL, `~astropy.utils.data.download_file` is used
to open the file. This specifies whether or not to save the file
locally in Astropy's download cache (default: `True`).
lazy_load_hdus : bool, option
By default `~astropy.io.fits.open` will not read all the HDUs and
headers in a FITS file immediately upon opening. This is an
optimization especially useful for large files, as FITS has no way
of determining the number and offsets of all the HDUs in a file
without scanning through the file and reading all the headers.
To disable lazy loading and read all HDUs immediately (the old
behavior) use ``lazy_load_hdus=False``. This can lead to fewer
surprises--for example with lazy loading enabled, ``len(hdul)``
can be slow, as it means the entire FITS file needs to be read in
order to determine the number of HDUs. ``lazy_load_hdus=False``
ensures that all HDUs have already been loaded after the file has
been opened.
.. versionadded:: 1.3
kwargs : dict, optional
additional optional keyword arguments, possible values are:
- **uint** : bool
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
This is enabled by default so that the pseudo-unsigned
integer convention is assumed.
Note, for backward compatibility, the kwarg **uint16** may
be used instead. The kwarg was renamed when support was
added for integers of any size.
- **ignore_missing_end** : bool
Do not issue an exception when opening a file that is
missing an ``END`` card in the last header.
- **checksum** : bool, str
If `True`, verifies that both ``DATASUM`` and
``CHECKSUM`` card values (when present in the HDU header)
match the header and data of all HDU's in the file. Updates to a
file that already has a checksum will preserve and update the
existing checksums unless this argument is given a value of
'remove', in which case the CHECKSUM and DATASUM values are not
checked, and are removed when saving changes to the file.
- **disable_image_compression** : bool
If `True`, treats compressed image HDU's like normal
binary table HDU's.
- **do_not_scale_image_data** : bool
If `True`, image data is not scaled using BSCALE/BZERO values
when read.
- **character_as_bytes** : bool
Whether to return bytes for string columns. By default this is `False`
and (unicode) strings are returned, but this does not respect memory
mapping and loads the whole column in memory when accessed.
- **ignore_blank** : bool
If `True`, the BLANK keyword is ignored if present.
- **scale_back** : bool
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data.
Returns
-------
hdulist : an `HDUList` object
`HDUList` containing all of the header data units in the
file.
"""
from .. import conf
if memmap is None:
# distinguish between True (kwarg explicitly set)
# and None (preference for memmap in config, might be ignored)
memmap = None if conf.use_memmap else False
else:
memmap = bool(memmap)
if lazy_load_hdus is None:
lazy_load_hdus = conf.lazy_load_hdus
else:
lazy_load_hdus = bool(lazy_load_hdus)
if 'uint' not in kwargs:
kwargs['uint'] = conf.enable_uint
if not name:
raise ValueError('Empty filename: {!r}'.format(name))
return HDUList.fromfile(name, mode, memmap, save_backup, cache,
lazy_load_hdus, **kwargs)
class HDUList(list, _Verify):
"""
HDU list class. This is the top-level FITS object. When a FITS
file is opened, a `HDUList` object is returned.
"""
def __init__(self, hdus=[], file=None):
"""
Construct a `HDUList` object.
Parameters
----------
hdus : sequence of HDU objects or single HDU, optional
The HDU object(s) to comprise the `HDUList`. Should be
instances of HDU classes like `ImageHDU` or `BinTableHDU`.
file : file object, bytes, optional
The opened physical file associated with the `HDUList`
or a bytes object containing the contents of the FITS
file.
"""
if isinstance(file, bytes):
self._data = file
self._file = None
else:
self._file = file
self._data = None
self._save_backup = False
# For internal use only--the keyword args passed to fitsopen /
# HDUList.fromfile/string when opening the file
self._open_kwargs = {}
self._in_read_next_hdu = False
# If we have read all the HDUs from the file or not
# The assumes that all HDUs have been written when we first opened the
# file; we do not currently support loading additional HDUs from a file
# while it is being streamed to. In the future that might be supported
# but for now this is only used for the purpose of lazy-loading of
# existing HDUs.
if file is None:
self._read_all = True
elif self._file is not None:
# Should never attempt to read HDUs in ostream mode
self._read_all = self._file.mode == 'ostream'
else:
self._read_all = False
if hdus is None:
hdus = []
# can take one HDU, as well as a list of HDU's as input
if isinstance(hdus, _ValidHDU):
hdus = [hdus]
elif not isinstance(hdus, (HDUList, list)):
raise TypeError("Invalid input for HDUList.")
for idx, hdu in enumerate(hdus):
if not isinstance(hdu, _BaseHDU):
raise TypeError("Element {} in the HDUList input is "
"not an HDU.".format(idx))
super().__init__(hdus)
if file is None:
# Only do this when initializing from an existing list of HDUs
# When initalizing from a file, this will be handled by the
# append method after the first HDU is read
self.update_extend()
def __len__(self):
if not self._in_read_next_hdu:
self.readall()
return super().__len__()
def __repr__(self):
# In order to correctly repr an HDUList we need to load all the
# HDUs as well
self.readall()
return super().__repr__()
def __iter__(self):
# While effectively this does the same as:
# for idx in range(len(self)):
# yield self[idx]
# the more complicated structure is here to prevent the use of len(),
# which would break the lazy loading
for idx in itertools.count():
try:
yield self[idx]
except IndexError:
break
def __getitem__(self, key):
"""
Get an HDU from the `HDUList`, indexed by number or name.
"""
# If the key is a slice we need to make sure the necessary HDUs
# have been loaded before passing the slice on to super.
if isinstance(key, slice):
max_idx = key.stop
# Check for and handle the case when no maximum was
# specified (e.g. [1:]).
if max_idx is None:
# We need all of the HDUs, so load them
# and reset the maximum to the actual length.
max_idx = len(self)
# Just in case the max_idx is negative...
max_idx = self._positive_index_of(max_idx)
number_loaded = super().__len__()
if max_idx >= number_loaded:
# We need more than we have, try loading up to and including
# max_idx. Note we do not try to be clever about skipping HDUs
# even though key.step might conceivably allow it.
for i in range(number_loaded, max_idx):
# Read until max_idx or to the end of the file, whichever
# comes first.
if not self._read_next_hdu():
break
try:
hdus = super().__getitem__(key)
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
else:
return HDUList(hdus)
# Originally this used recursion, but hypothetically an HDU with
# a very large number of HDUs could blow the stack, so use a loop
# instead
try:
return self._try_while_unread_hdus(super().__getitem__,
self._positive_index_of(key))
except IndexError as e:
# Raise a more helpful IndexError if the file was not fully read.
if self._read_all:
raise e
else:
raise IndexError('HDU not found, possibly because the index '
'is out of range, or because the file was '
'closed before all HDUs were read')
def __contains__(self, item):
"""
Returns `True` if ``HDUList.index_of(item)`` succeeds.
"""
try:
self._try_while_unread_hdus(self.index_of, item)
except KeyError:
return False
return True
def __setitem__(self, key, hdu):
"""
Set an HDU to the `HDUList`, indexed by number or name.
"""
_key = self._positive_index_of(key)
if isinstance(hdu, (slice, list)):
if _is_int(_key):
raise ValueError('An element in the HDUList must be an HDU.')
for item in hdu:
if not isinstance(item, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(item))
else:
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
try:
self._try_while_unread_hdus(super().__setitem__, _key, hdu)
except IndexError:
raise IndexError('Extension {} is out of bound or not found.'
.format(key))
self._resize = True
self._truncate = False
def __delitem__(self, key):
"""
Delete an HDU from the `HDUList`, indexed by number or name.
"""
if isinstance(key, slice):
end_index = len(self)
else:
key = self._positive_index_of(key)
end_index = len(self) - 1
self._try_while_unread_hdus(super().__delitem__, key)
if (key == end_index or key == -1 and not self._resize):
self._truncate = True
else:
self._truncate = False
self._resize = True
# Support the 'with' statement
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@classmethod
def fromfile(cls, fileobj, mode=None, memmap=None,
save_backup=False, cache=True, lazy_load_hdus=True,
**kwargs):
"""
Creates an `HDUList` instance from a file-like object.
The actual implementation of ``fitsopen()``, and generally shouldn't
be used directly. Use :func:`open` instead (and see its
documentation for details of the parameters accepted by this method).
"""
return cls._readfrom(fileobj=fileobj, mode=mode, memmap=memmap,
save_backup=save_backup, cache=cache,
lazy_load_hdus=lazy_load_hdus, **kwargs)
@classmethod
def fromstring(cls, data, **kwargs):
"""
Creates an `HDUList` instance from a string or other in-memory data
buffer containing an entire FITS file. Similar to
:meth:`HDUList.fromfile`, but does not accept the mode or memmap
arguments, as they are only relevant to reading from a file on disk.
This is useful for interfacing with other libraries such as CFITSIO,
and may also be useful for streaming applications.
Parameters
----------
data : str, buffer, memoryview, etc.
A string or other memory buffer containing an entire FITS file. It
should be noted that if that memory is read-only (such as a Python
string) the returned :class:`HDUList`'s data portions will also be
read-only.
kwargs : dict
Optional keyword arguments. See
:func:`astropy.io.fits.open` for details.
Returns
-------
hdul : HDUList
An :class:`HDUList` object representing the in-memory FITS file.
"""
try:
# Test that the given object supports the buffer interface by
# ensuring an ndarray can be created from it
np.ndarray((), dtype='ubyte', buffer=data)
except TypeError:
raise TypeError(
'The provided object {} does not contain an underlying '
'memory buffer. fromstring() requires an object that '
'supports the buffer interface such as bytes, buffer, '
'memoryview, ndarray, etc. This restriction is to ensure '
'that efficient access to the array/table data is possible.'
''.format(data))
return cls._readfrom(data=data, **kwargs)
def fileinfo(self, index):
"""
Returns a dictionary detailing information about the locations
of the indexed HDU within any associated file. The values are
only valid after a read or write of the associated file with
no intervening changes to the `HDUList`.
Parameters
----------
index : int
Index of HDU for which info is to be returned.
Returns
-------
fileinfo : dict or None
The dictionary details information about the locations of
the indexed HDU within an associated file. Returns `None`
when the HDU is not associated with a file.
Dictionary contents:
========== ========================================================
Key Value
========== ========================================================
file File object associated with the HDU
filename Name of associated file object
filemode Mode in which the file was opened (readonly,
update, append, denywrite, ostream)
resized Flag that when `True` indicates that the data has been
resized since the last read/write so the returned values
may not be valid.
hdrLoc Starting byte location of header in file
datLoc Starting byte location of data block in file
datSpan Data size including padding
========== ========================================================
"""
if self._file is not None:
output = self[index].fileinfo()
if not output:
# OK, the HDU associated with this index is not yet
# tied to the file associated with the HDUList. The only way
# to get the file object is to check each of the HDU's in the
# list until we find the one associated with the file.
f = None
for hdu in self:
info = hdu.fileinfo()
if info:
f = info['file']
fm = info['filemode']
break
output = {'file': f, 'filemode': fm, 'hdrLoc': None,
'datLoc': None, 'datSpan': None}
output['filename'] = self._file.name
output['resized'] = self._wasresized()
else:
output = None
return output
def __copy__(self):
"""
Return a shallow copy of an HDUList.
Returns
-------
copy : `HDUList`
A shallow copy of this `HDUList` object.
"""
return self[:]
# Syntactic sugar for `__copy__()` magic method
copy = __copy__
def __deepcopy__(self, memo=None):
return HDUList([hdu.copy() for hdu in self])
def pop(self, index=-1):
""" Remove an item from the list and return it.
Parameters
----------
index : int, str, tuple of (string, int), optional
An integer value of ``index`` indicates the position from which
``pop()`` removes and returns an HDU. A string value or a tuple
of ``(string, int)`` functions as a key for identifying the
HDU to be removed and returned. If ``key`` is a tuple, it is
of the form ``(key, ver)`` where ``ver`` is an ``EXTVER``
value that must match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous the numeric index
must be used to index the duplicate HDU.
Returns
-------
hdu : HDU object
The HDU object at position indicated by ``index`` or having name
and version specified by ``index``.
"""
# Make sure that HDUs are loaded before attempting to pop
self.readall()
list_index = self.index_of(index)
return super(HDUList, self).pop(list_index)
def insert(self, index, hdu):
"""
Insert an HDU into the `HDUList` at the given ``index``.
Parameters
----------
index : int
Index before which to insert the new HDU.
hdu : HDU object
The HDU object to insert
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('{} is not an HDU.'.format(hdu))
num_hdus = len(self)
if index == 0 or num_hdus == 0:
if num_hdus != 0:
# We are inserting a new Primary HDU so we need to
# make the current Primary HDU into an extension HDU.
if isinstance(self[0], GroupsHDU):
raise ValueError(
"The current Primary HDU is a GroupsHDU. "
"It can't be made into an extension HDU, "
"so another HDU cannot be inserted before it.")
hdu1 = ImageHDU(self[0].data, self[0].header)
# Insert it into position 1, then delete HDU at position 0.
super().insert(1, hdu1)
super().__delitem__(0)
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().insert(0, phdu)
index = 1
else:
if isinstance(hdu, GroupsHDU):
raise ValueError('A GroupsHDU must be inserted as a '
'Primary HDU.')
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
hdu = ImageHDU(hdu.data, hdu.header)
super().insert(index, hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def append(self, hdu):
"""
Append a new HDU to the `HDUList`.
Parameters
----------
hdu : HDU object
HDU to add to the `HDUList`.
"""
if not isinstance(hdu, _BaseHDU):
raise ValueError('HDUList can only append an HDU.')
if len(self) > 0:
if isinstance(hdu, GroupsHDU):
raise ValueError(
"Can't append a GroupsHDU to a non-empty HDUList")
if isinstance(hdu, PrimaryHDU):
# You passed a Primary HDU but we need an Extension HDU
# so create an Extension HDU from the input Primary HDU.
# TODO: This isn't necessarily sufficient to copy the HDU;
# _header_offset and friends need to be copied too.
hdu = ImageHDU(hdu.data, hdu.header)
else:
if not isinstance(hdu, (PrimaryHDU, _NonstandardHDU)):
# You passed in an Extension HDU but we need a Primary
# HDU.
# If you provided an ImageHDU then we can convert it to
# a primary HDU and use that.
if isinstance(hdu, ImageHDU):
hdu = PrimaryHDU(hdu.data, hdu.header)
else:
# You didn't provide an ImageHDU so we create a
# simple Primary HDU and append that first before
# we append the new Extension HDU.
phdu = PrimaryHDU()
super().append(phdu)
super().append(hdu)
hdu._new = True
self._resize = True
self._truncate = False
# make sure the EXTEND keyword is in primary HDU if there is extension
self.update_extend()
def index_of(self, key):
"""
Get the index of an HDU from the `HDUList`.
Parameters
----------
key : int, str or tuple of (string, int)
The key identifying the HDU. If ``key`` is a tuple, it is of the
form ``(key, ver)`` where ``ver`` is an ``EXTVER`` value that must
match the HDU being searched for.
If the key is ambiguous (e.g. there are multiple 'SCI' extensions)
the first match is returned. For a more precise match use the
``(name, ver)`` pair.
If even the ``(name, ver)`` pair is ambiguous (it shouldn't be
but it's not impossible) the numeric index must be used to index
the duplicate HDU.
Returns
-------
index : int
The index of the HDU in the `HDUList`.
"""
if _is_int(key):
return key
elif isinstance(key, tuple):
_key, _ver = key
else:
_key = key
_ver = None
if not isinstance(_key, str):
raise KeyError(
'{} indices must be integers, extension names as strings, '
'or (extname, version) tuples; got {}'
''.format(self.__class__.__name__, _key))
_key = (_key.strip()).upper()
found = None
for idx, hdu in enumerate(self):
name = hdu.name
if isinstance(name, str):
name = name.strip().upper()
# 'PRIMARY' should always work as a reference to the first HDU
if ((name == _key or (_key == 'PRIMARY' and idx == 0)) and
(_ver is None or _ver == hdu.ver)):
found = idx
break
if (found is None):
raise KeyError('Extension {!r} not found.'.format(key))
else:
return found
def _positive_index_of(self, key):
"""
Same as index_of, but ensures always returning a positive index
or zero.
(Really this should be called non_negative_index_of but it felt
too long.)
This means that if the key is a negative integer, we have to
convert it to the corresponding positive index. This means
knowing the length of the HDUList, which in turn means loading
all HDUs. Therefore using negative indices on HDULists is inherently
inefficient.
"""
index = self.index_of(key)
if index >= 0:
return index
if abs(index) > len(self):
raise IndexError(
'Extension {} is out of bound or not found.'.format(index))
return len(self) + index
def readall(self):
"""
Read data of all HDUs into memory.
"""
while self._read_next_hdu():
pass
@ignore_sigint
def flush(self, output_verify='fix', verbose=False):
"""
Force a write of the `HDUList` back to the file (for append and
update modes only).
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print verbose messages
"""
if self._file.mode not in ('append', 'update', 'ostream'):
warnings.warn("Flush for '{}' mode is not supported."
.format(self._file.mode), AstropyUserWarning)
return
if self._save_backup and self._file.mode in ('append', 'update'):
filename = self._file.name
if os.path.exists(filename):
# The the file doesn't actually exist anymore for some reason
# then there's no point in trying to make a backup
backup = filename + '.bak'
idx = 1
while os.path.exists(backup):
backup = filename + '.bak.' + str(idx)
idx += 1
warnings.warn('Saving a backup of {} to {}.'.format(
filename, backup), AstropyUserWarning)
try:
shutil.copy(filename, backup)
except OSError as exc:
raise OSError('Failed to save backup to destination {}: '
'{}'.format(filename, exc))
self.verify(option=output_verify)
if self._file.mode in ('append', 'ostream'):
for hdu in self:
if verbose:
try:
extver = str(hdu._header['extver'])
except KeyError:
extver = ''
# only append HDU's which are "new"
if hdu._new:
hdu._prewriteto(checksum=hdu._output_checksum)
with _free_space_check(self):
hdu._writeto(self._file)
if verbose:
print('append HDU', hdu.name, extver)
hdu._new = False
hdu._postwriteto()
elif self._file.mode == 'update':
self._flush_update()
def update_extend(self):
"""
Make sure that if the primary header needs the keyword ``EXTEND`` that
it has it and it is correct.
"""
if not len(self):
return
if not isinstance(self[0], PrimaryHDU):
# A PrimaryHDU will be automatically inserted at some point, but it
# might not have been added yet
return
hdr = self[0].header
def get_first_ext():
try:
return self[1]
except IndexError:
return None
if 'EXTEND' in hdr:
if not hdr['EXTEND'] and get_first_ext() is not None:
hdr['EXTEND'] = True
elif get_first_ext() is not None:
if hdr['NAXIS'] == 0:
hdr.set('EXTEND', True, after='NAXIS')
else:
n = hdr['NAXIS']
hdr.set('EXTEND', True, after='NAXIS' + str(n))
@deprecated_renamed_argument('clobber', 'overwrite', '2.0')
def writeto(self, fileobj, output_verify='exception', overwrite=False,
checksum=False):
"""
Write the `HDUList` to a new file.
Parameters
----------
fileobj : file path, file object or file-like object
File to write to. If a file object, must be opened in a
writeable mode.
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
overwrite : bool, optional
If ``True``, overwrite the output file if it exists. Raises an
``OSError`` if ``False`` and the output file exists. Default is
``False``.
.. versionchanged:: 1.3
``overwrite`` replaces the deprecated ``clobber`` argument.
checksum : bool
When `True` adds both ``DATASUM`` and ``CHECKSUM`` cards
to the headers of all HDU's written to the file.
"""
if (len(self) == 0):
warnings.warn("There is nothing to write.", AstropyUserWarning)
return
self.verify(option=output_verify)
# make sure the EXTEND keyword is there if there is extension
self.update_extend()
# make note of whether the input file object is already open, in which
# case we should not close it after writing (that should be the job
# of the caller)
closed = isinstance(fileobj, str) or fileobj_closed(fileobj)
# writeto is only for writing a new file from scratch, so the most
# sensible mode to require is 'ostream'. This can accept an open
# file object that's open to write only, or in append/update modes
# but only if the file doesn't exist.
fileobj = _File(fileobj, mode='ostream', overwrite=overwrite)
hdulist = self.fromfile(fileobj)
try:
dirname = os.path.dirname(hdulist._file.name)
except AttributeError:
dirname = None
with _free_space_check(self, dirname=dirname):
for hdu in self:
hdu._prewriteto(checksum=checksum)
hdu._writeto(hdulist._file)
hdu._postwriteto()
hdulist.close(output_verify=output_verify, closed=closed)
def close(self, output_verify='exception', verbose=False, closed=True):
"""
Close the associated FITS file and memmap object, if any.
Parameters
----------
output_verify : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``+warn``, or ``+exception"
(e.g. ``"fix+warn"``). See :ref:`verify` for more info.
verbose : bool
When `True`, print out verbose messages.
closed : bool
When `True`, close the underlying file object.
"""
try:
if (self._file and self._file.mode in ('append', 'update')
and not self._file.closed):
self.flush(output_verify=output_verify, verbose=verbose)
finally:
if self._file and closed and hasattr(self._file, 'close'):
self._file.close()
# Give individual HDUs an opportunity to do on-close cleanup
for hdu in self:
hdu._close(closed=closed)
def info(self, output=None):
"""
Summarize the info of the HDUs in this `HDUList`.
Note that this function prints its results to the console---it
does not return a value.
Parameters
----------
output : file, bool, optional
A file-like object to write the output to. If `False`, does not
output to a file and instead returns a list of tuples representing
the HDU info. Writes to ``sys.stdout`` by default.
"""
if output is None:
output = sys.stdout
if self._file is None:
name = '(No file associated with this HDUList)'
else:
name = self._file.name
results = ['Filename: {}'.format(name),
'No. Name Ver Type Cards Dimensions Format']
format = '{:3d} {:10} {:3} {:11} {:5d} {} {} {}'
default = ('', '', '', 0, (), '', '')
for idx, hdu in enumerate(self):
summary = hdu._summary()
if len(summary) < len(default):
summary += default[len(summary):]
summary = (idx,) + summary
if output:
results.append(format.format(*summary))
else:
results.append(summary)
if output:
output.write('\n'.join(results))
output.write('\n')
output.flush()
else:
return results[2:]
def filename(self):
"""
Return the file name associated with the HDUList object if one exists.
Otherwise returns None.
Returns
-------
filename : a string containing the file name associated with the
HDUList object if an association exists. Otherwise returns
None.
"""
if self._file is not None:
if hasattr(self._file, 'name'):
return self._file.name
return None
@classmethod
def _readfrom(cls, fileobj=None, data=None, mode=None,
memmap=None, save_backup=False, cache=True,
lazy_load_hdus=True, **kwargs):
"""
Provides the implementations from HDUList.fromfile and
HDUList.fromstring, both of which wrap this method, as their
implementations are largely the same.
"""
if fileobj is not None:
if not isinstance(fileobj, _File):
# instantiate a FITS file object (ffo)
fileobj = _File(fileobj, mode=mode, memmap=memmap, cache=cache)
# The Astropy mode is determined by the _File initializer if the
# supplied mode was None
mode = fileobj.mode
hdulist = cls(file=fileobj)
else:
if mode is None:
# The default mode
mode = 'readonly'
hdulist = cls(file=data)
# This method is currently only called from HDUList.fromstring and
# HDUList.fromfile. If fileobj is None then this must be the
# fromstring case; the data type of ``data`` will be checked in the
# _BaseHDU.fromstring call.
hdulist._save_backup = save_backup
hdulist._open_kwargs = kwargs
if fileobj is not None and fileobj.writeonly:
# Output stream--not interested in reading/parsing
# the HDUs--just writing to the output file
return hdulist
# Make sure at least the PRIMARY HDU can be read
read_one = hdulist._read_next_hdu()
# If we're trying to read only and no header units were found,
# raise an exception
if not read_one and mode in ('readonly', 'denywrite'):
# Close the file if necessary (issue #6168)
if hdulist._file.close_on_error:
hdulist._file.close()
raise OSError('Empty or corrupt FITS file')
if not lazy_load_hdus:
# Go ahead and load all HDUs
while hdulist._read_next_hdu():
pass
# initialize/reset attributes to be used in "update/append" mode
hdulist._resize = False
hdulist._truncate = False
return hdulist
def _try_while_unread_hdus(self, func, *args, **kwargs):
"""
Attempt an operation that accesses an HDU by index/name
that can fail if not all HDUs have been read yet. Keep
reading HDUs until the operation succeeds or there are no
more HDUs to read.
"""
while True:
try:
return func(*args, **kwargs)
except Exception:
if self._read_next_hdu():
continue
else:
raise
def _read_next_hdu(self):
"""
Lazily load a single HDU from the fileobj or data string the `HDUList`
was opened from, unless no further HDUs are found.
Returns True if a new HDU was loaded, or False otherwise.
"""
if self._read_all:
return False
saved_compression_enabled = compressed.COMPRESSION_ENABLED
fileobj, data, kwargs = self._file, self._data, self._open_kwargs
if fileobj is not None and fileobj.closed:
return False
try:
self._in_read_next_hdu = True
if ('disable_image_compression' in kwargs and
kwargs['disable_image_compression']):
compressed.COMPRESSION_ENABLED = False
# read all HDUs
try:
if fileobj is not None:
try:
# Make sure we're back to the end of the last read
# HDU
if len(self) > 0:
last = self[len(self) - 1]
if last._data_offset is not None:
offset = last._data_offset + last._data_size
fileobj.seek(offset, os.SEEK_SET)
hdu = _BaseHDU.readfrom(fileobj, **kwargs)
except EOFError:
self._read_all = True
return False
except OSError:
# Close the file: see
# https://github.com/astropy/astropy/issues/6168
#
if self._file.close_on_error:
self._file.close()
if fileobj.writeonly:
self._read_all = True
return False
else:
raise
else:
if not data:
self._read_all = True
return False
hdu = _BaseHDU.fromstring(data, **kwargs)
self._data = data[hdu._data_offset + hdu._data_size:]
super().append(hdu)
if len(self) == 1:
# Check for an extension HDU and update the EXTEND
# keyword of the primary HDU accordingly
self.update_extend()
hdu._new = False
if 'checksum' in kwargs:
hdu._output_checksum = kwargs['checksum']
# check in the case there is extra space after the last HDU or
# corrupted HDU
except (VerifyError, ValueError) as exc:
warnings.warn(
'Error validating header for HDU #{} (note: Astropy '
'uses zero-based indexing).\n{}\n'
'There may be extra bytes after the last HDU or the '
'file is corrupted.'.format(
len(self), indent(str(exc))), VerifyWarning)
del exc
self._read_all = True
return False
finally:
compressed.COMPRESSION_ENABLED = saved_compression_enabled
self._in_read_next_hdu = False
return True
def _verify(self, option='warn'):
errs = _ErrList([], unit='HDU')
# the first (0th) element must be a primary HDU
if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)) and \
(not isinstance(self[0], _NonstandardHDU)):
err_text = "HDUList's 0th element is not a primary HDU."
fix_text = 'Fixed by inserting one as 0th HDU.'
def fix(self=self):
self.insert(0, PrimaryHDU())
err = self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix)
errs.append(err)
if len(self) > 1 and ('EXTEND' not in self[0].header or
self[0].header['EXTEND'] is not True):
err_text = ('Primary HDU does not contain an EXTEND keyword '
'equal to T even though there are extension HDUs.')
fix_text = 'Fixed by inserting or updating the EXTEND keyword.'
def fix(header=self[0].header):
naxis = header['NAXIS']
if naxis == 0:
after = 'NAXIS'
else:
after = 'NAXIS' + str(naxis)
header.set('EXTEND', value=True, after=after)
errs.append(self.run_option(option, err_text=err_text,
fix_text=fix_text, fix=fix))
# each element calls their own verify
for idx, hdu in enumerate(self):
if idx > 0 and (not isinstance(hdu, ExtensionHDU)):
err_text = ("HDUList's element {} is not an "
"extension HDU.".format(str(idx)))
err = self.run_option(option, err_text=err_text, fixable=False)
errs.append(err)
else:
result = hdu._verify(option)
if result:
errs.append(result)
return errs
def _flush_update(self):
"""Implements flushing changes to a file in update mode."""
for hdu in self:
# Need to all _prewriteto() for each HDU first to determine if
# resizing will be necessary
hdu._prewriteto(checksum=hdu._output_checksum, inplace=True)
try:
self._wasresized()
# if the HDUList is resized, need to write out the entire contents of
# the hdulist to the file.
if self._resize or self._file.compression:
self._flush_resize()
else:
# if not resized, update in place
for hdu in self:
hdu._writeto(self._file, inplace=True)
# reset the modification attributes after updating
for hdu in self:
hdu._header._modified = False
finally:
for hdu in self:
hdu._postwriteto()
def _flush_resize(self):
"""
Implements flushing changes in update mode when parts of one or more HDU
need to be resized.
"""
old_name = self._file.name
old_memmap = self._file.memmap
name = _tmp_name(old_name)
if not self._file.file_like:
old_mode = os.stat(old_name).st_mode
# The underlying file is an actual file object. The HDUList is
# resized, so we need to write it to a tmp file, delete the
# original file, and rename the tmp file to the original file.
if self._file.compression == 'gzip':
new_file = gzip.GzipFile(name, mode='ab+')
elif self._file.compression == 'bzip2':
new_file = bz2.BZ2File(name, mode='w')
else:
new_file = name
with self.fromfile(new_file, mode='append') as hdulist:
for hdu in self:
hdu._writeto(hdulist._file, inplace=True, copy=True)
if sys.platform.startswith('win'):
# Collect a list of open mmaps to the data; this well be
# used later. See below.
mmaps = [(idx, _get_array_mmap(hdu.data), hdu.data)
for idx, hdu in enumerate(self) if hdu._has_data]
hdulist._file.close()
self._file.close()
if sys.platform.startswith('win'):
# Close all open mmaps to the data. This is only necessary on
# Windows, which will not allow a file to be renamed or deleted
# until all handles to that file have been closed.
for idx, mmap, arr in mmaps:
if mmap is not None:
mmap.close()
os.remove(self._file.name)
# reopen the renamed new file with "update" mode
os.rename(name, old_name)
os.chmod(old_name, old_mode)
if isinstance(new_file, gzip.GzipFile):
old_file = gzip.GzipFile(old_name, mode='rb+')
else:
old_file = old_name
ffo = _File(old_file, mode='update', memmap=old_memmap)
self._file = ffo
for hdu in self:
# Need to update the _file attribute and close any open mmaps
# on each HDU
if hdu._has_data and _get_array_mmap(hdu.data) is not None:
del hdu.data
hdu._file = ffo
if sys.platform.startswith('win'):
# On Windows, all the original data mmaps were closed above.
# However, it's possible that the user still has references to
# the old data which would no longer work (possibly even cause
# a segfault if they try to access it). This replaces the
# buffers used by the original arrays with the buffers of mmap
# arrays created from the new file. This seems to work, but
# it's a flaming hack and carries no guarantees that it won't
# lead to odd behavior in practice. Better to just not keep
# references to data from files that had to be resized upon
# flushing (on Windows--again, this is no problem on Linux).
for idx, mmap, arr in mmaps:
if mmap is not None:
arr.data = self[idx].data.data
del mmaps # Just to be sure
else:
# The underlying file is not a file object, it is a file like
# object. We can't write out to a file, we must update the file
# like object in place. To do this, we write out to a temporary
# file, then delete the contents in our file like object, then
# write the contents of the temporary file to the now empty file
# like object.
self.writeto(name)
hdulist = self.fromfile(name)
ffo = self._file
ffo.truncate(0)
ffo.seek(0)
for hdu in hdulist:
hdu._writeto(ffo, inplace=True, copy=True)
# Close the temporary file and delete it.
hdulist.close()
os.remove(hdulist._file.name)
# reset the resize attributes after updating
self._resize = False
self._truncate = False
for hdu in self:
hdu._header._modified = False
hdu._new = False
hdu._file = ffo
def _wasresized(self, verbose=False):
"""
Determine if any changes to the HDUList will require a file resize
when flushing the file.
Side effect of setting the objects _resize attribute.
"""
if not self._resize:
# determine if any of the HDU is resized
for hdu in self:
# Header:
nbytes = len(str(hdu._header))
if nbytes != (hdu._data_offset - hdu._header_offset):
self._resize = True
self._truncate = False
if verbose:
print('One or more header is resized.')
break
# Data:
if not hdu._has_data:
continue
nbytes = hdu.size
nbytes = nbytes + _pad_length(nbytes)
if nbytes != hdu._data_size:
self._resize = True
self._truncate = False
if verbose:
print('One or more data area is resized.')
break
if self._truncate:
try:
self._file.truncate(hdu._data_offset + hdu._data_size)
except OSError:
self._resize = True
self._truncate = False
return self._resize
|
d9b074bbabc4299b1a0a871b0bbcf3f998bc4c86bd4d7ae30998a7db213c9e16 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file. Here we mainly set up
# setup_requires and install_requires since these are determined
# programmatically.
import os
import builtins
import ah_bootstrap # noqa
from astropy_helpers.distutils_helpers import is_distutils_display_option
from astropy_helpers.setup_helpers import setup
from setuptools.config import read_configuration
# We set up the following variable because we then use this in astropy/__init__.py
# to make sure that we aren't importing astropy during the setup process (we used
# to do this)
builtins._ASTROPY_CORE_SETUP_ = True
if is_distutils_display_option():
# Avoid installing setup_requires dependencies if the user just
# queries for information
setup_requires = []
else:
setup_requires = read_configuration('setup.cfg')['options']['setup_requires']
# Make sure we have the packages needed for building astropy, but do not
# require them when installing from an sdist as the c files are included.
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):
setup_requires.extend(['cython>=0.21', 'jinja2>=2.7'])
setup(setup_requires=setup_requires)
|
e14506425551af91ac54eb0b17d607671b58d42e684faa43dfcd78f7fa6234de | # Licensed under a 3-clause BSD style license - see LICENSE.rst
pytest_plugins = [
'astropy.tests.plugins.display',
]
|
c45cdb74c430295950cb963d72e651567f253a209e656e20a3d0c003212fdf38 | """
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import io
import locale
import os
import re
import subprocess as sp
import sys
from distutils import log
from distutils.debug import DEBUG
from configparser import ConfigParser, RawConfigParser
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
# This is the minimum Python version required for astropy-helpers
__minimum_python_version__ = (3, 5)
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
# Start off by parsing the setup.cfg file
SETUP_CFG = ConfigParser()
if os.path.exists('setup.cfg'):
try:
SETUP_CFG.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
# We used package_name in the package template for a while instead of name
if SETUP_CFG.has_option('metadata', 'name'):
parent_package = SETUP_CFG.get('metadata', 'name')
elif SETUP_CFG.has_option('metadata', 'package_name'):
parent_package = SETUP_CFG.get('metadata', 'package_name')
else:
parent_package = None
if SETUP_CFG.has_option('options', 'python_requires'):
python_requires = SETUP_CFG.get('options', 'python_requires')
# The python_requires key has a syntax that can be parsed by SpecifierSet
# in the packaging package. However, we don't want to have to depend on that
# package, so instead we can use setuptools (which bundles packaging). We
# have to add 'python' to parse it with Requirement.
from pkg_resources import Requirement
req = Requirement.parse('python' + python_requires)
# We want the Python version as a string, which we can get from the platform module
import platform
# strip off trailing '+' incase this is a dev install of python
python_version = platform.python_version().strip('+')
# allow pre-releases to count as 'new enough'
if not req.specifier.contains(python_version, True):
if parent_package is None:
message = "ERROR: Python {} is required by this package\n".format(req.specifier)
else:
message = "ERROR: Python {} is required by {}\n".format(req.specifier, parent_package)
sys.stderr.write(message)
sys.exit(1)
if sys.version_info < __minimum_python_version__:
if parent_package is None:
message = "ERROR: Python {} or later is required by astropy-helpers\n".format(
__minimum_python_version__)
else:
message = "ERROR: Python {} or later is required by astropy-helpers for {}\n".format(
__minimum_python_version__, parent_package)
sys.stderr.write(message)
sys.exit(1)
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Check that setuptools 30.3 or later is present
from distutils.version import LooseVersion
try:
import setuptools
assert LooseVersion(setuptools.__version__) >= LooseVersion('30.3')
except (ImportError, AssertionError):
sys.stderr.write("ERROR: setuptools 30.3 or later is required by astropy-helpers\n")
sys.exit(1)
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not SETUP_CFG.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not SETUP_CFG.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = SETUP_CFG.getboolean('ah_bootstrap', option)
else:
value = SETUP_CFG.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
if allow_hosts is not None:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
r'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
r'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# http://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
9865434d1425084f297d2c6555ab9fc7714caa4fcaf6f39beb6a824d1b76c945 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astropy is a package intended to contain core functionality and some
common tools needed for performing astronomy and astrophysics research with
Python. It also provides an index for other astronomy packages and tools for
managing them.
"""
# Prior to Astropy 3.2, astropy was imported during setup.py commands. If we are
# in setup mode, then astropy-helpers defines an _ASTROPY_SETUP_ variable, which
# we used to use to conditionally import C extensions for example. However, the
# behavior of importing the package during the setup process is not good
# practice and we therefore now explicitly prevent the package from being
# imported in that case to prevent any regressions. We use _ASTROPY_CORE_SETUP_
# (defined in setup.py) rather than _ASTROPY_SETUP_ since the latter is also
# set up for affiliated packages, and those need to be able to import the
# (installed) core package during e.g. python setup.py test.
try:
_ASTROPY_CORE_SETUP_
except NameError:
pass
else:
raise RuntimeError("The astropy package cannot be imported during setup")
import sys
import os
from warnings import warn
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.13.0'
# ASDF is an optional dependency, but this is the minimum version that is
# compatible with Astropy when it is installed.
__minimum_asdf_version__ = '2.3.0'
class UnsupportedPythonError(Exception):
pass
# This is the same check as the one at the top of setup.py
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
raise UnsupportedPythonError("Astropy does not support Python < {}".format(__minimum_python_version__))
def _is_astropy_source(path=None):
"""
Returns whether the source for this module is directly in an astropy
source distribution or checkout.
"""
# If this __init__.py file is in ./astropy/ then import is within a source
# dir .astropy-root is a file distributed with the source, but that should
# not installed
if path is None:
path = os.path.join(os.path.dirname(__file__), os.pardir)
elif os.path.isfile(path):
path = os.path.dirname(path)
source_dir = os.path.abspath(path)
return os.path.exists(os.path.join(source_dir, '.astropy-root'))
def _is_astropy_setup():
"""
Returns whether we are currently being imported in the context of running
Astropy's setup.py.
"""
main_mod = sys.modules.get('__main__')
if not main_mod:
return False
return (getattr(main_mod, '__file__', False) and
os.path.basename(main_mod.__file__).rstrip('co') == 'setup.py' and
_is_astropy_source(main_mod.__file__))
try:
from .version import version as __version__
except ImportError:
# TODO: Issue a warning using the logging framework
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
# TODO: Issue a warning using the logging framework
__githash__ = ''
# The location of the online documentation for astropy
# This location will normally point to the current released version of astropy
if 'dev' in __version__:
online_docs_root = 'http://docs.astropy.org/en/latest/'
else:
online_docs_root = 'http://docs.astropy.org/en/{0}/'.format(__version__)
def _check_numpy():
"""
Check that Numpy is installed and it is of the minimum version we
require.
"""
# Note: We could have used distutils.version for this comparison,
# but it seems like overkill to import distutils at runtime.
requirement_met = False
try:
import numpy
except ImportError:
pass
else:
from .utils import minversion
requirement_met = minversion(numpy, __minimum_numpy_version__)
if not requirement_met:
msg = ("Numpy version {0} or later must be installed to use "
"Astropy".format(__minimum_numpy_version__))
raise ImportError(msg)
return numpy
_check_numpy()
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
'When True, use Unicode characters when outputting values, and '
'displaying widgets at the console.')
use_color = _config.ConfigItem(
sys.platform != 'win32',
'When True, use ANSI color escape sequences when writing to the console.',
aliases=['astropy.utils.console.USE_COLOR', 'astropy.logger.USE_COLOR'])
max_lines = _config.ConfigItem(
None,
description='Maximum number of lines in the display of pretty-printed '
'objects. If not provided, try to determine automatically from the '
'terminal size. Negative numbers mean no limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_lines'])
max_width = _config.ConfigItem(
None,
description='Maximum number of characters per line in the display of '
'pretty-printed objects. If not provided, try to determine '
'automatically from the terminal size. Negative numbers mean no '
'limit.',
cfgtype='integer(default=None)',
aliases=['astropy.table.pprint.max_width'])
conf = Conf()
# Create the test() function
from .tests.runner import TestRunner
test = TestRunner.make_test_runner_in(__path__[0])
# if we are *not* in setup mode, import the logger and possibly populate the
# configuration file with the defaults
def _initialize_astropy():
from . import config
def _rollback_import(message):
log.error(message)
# Now disable exception logging to avoid an annoying error in the
# exception logger before we raise the import error:
_teardown_log()
# Roll back any astropy sub-modules that have been imported thus
# far
for key in list(sys.modules):
if key.startswith('astropy.'):
del sys.modules[key]
raise ImportError('astropy')
try:
from .utils import _compiler
except ImportError:
if _is_astropy_source():
log.warning('You appear to be trying to import astropy from '
'within a source checkout without building the '
'extension modules first. Attempting to (re)build '
'extension modules:')
try:
_rebuild_extensions()
except BaseException as exc:
_rollback_import(
'An error occurred while attempting to rebuild the '
'extension modules. Please try manually running '
'`./setup.py develop` or `./setup.py build_ext '
'--inplace` to see what the issue was. Extension '
'modules must be successfully compiled and importable '
'in order to import astropy.')
# Reraise the Exception only in case it wasn't an Exception,
# for example if a "SystemExit" or "KeyboardInterrupt" was
# invoked.
if not isinstance(exc, Exception):
raise
else:
# Outright broken installation; don't be nice.
raise
# add these here so we only need to cleanup the namespace at the end
config_dir = os.path.dirname(__file__)
try:
config.configuration.update_default_config(__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
def _rebuild_extensions():
global __version__
global __githash__
import subprocess
import time
from .utils.console import Spinner
devnull = open(os.devnull, 'w')
old_cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
try:
sp = subprocess.Popen([sys.executable, 'setup.py', 'build_ext',
'--inplace'], stdout=devnull,
stderr=devnull)
with Spinner('Rebuilding extension modules') as spinner:
while sp.poll() is None:
next(spinner)
time.sleep(0.05)
finally:
os.chdir(old_cwd)
devnull.close()
if sp.returncode != 0:
raise OSError('Running setup.py build_ext --inplace failed '
'with error code {0}: try rerunning this command '
'manually to check what the error was.'.format(
sp.returncode))
# Try re-loading module-level globals from the astropy.version module,
# which may not have existed before this function ran
try:
from .version import version as __version__
except ImportError:
pass
try:
from .version import githash as __githash__
except ImportError:
pass
# Set the bibtex entry to the article referenced in CITATION.
def _get_bibtex():
citation_file = os.path.join(os.path.dirname(__file__), 'CITATION')
with open(citation_file, 'r') as citation:
refs = citation.read().split('@ARTICLE')[1:]
if len(refs) == 0: return ''
bibtexreference = "@ARTICLE{0}".format(refs[0])
return bibtexreference
__citation__ = __bibtex__ = _get_bibtex()
import logging
# Use the root logger as a dummy log before initilizing Astropy's logger
log = logging.getLogger()
from .logger import _init_log, _teardown_log
log = _init_log()
_initialize_astropy()
from .utils.misc import find_api_page
def online_help(query):
"""
Search the online Astropy documentation for the given query.
Opens the results in the default web browser. Requires an active
Internet connection.
Parameters
----------
query : str
The search query.
"""
from urllib.parse import urlencode
import webbrowser
version = __version__
if 'dev' in version:
version = 'latest'
else:
version = 'v' + version
url = 'http://docs.astropy.org/en/{0}/search.html?{1}'.format(
version, urlencode({'q': query}))
webbrowser.open(url)
__dir_inc__ = ['__version__', '__githash__', '__minimum_numpy_version__',
'__bibtex__', 'test', 'log', 'find_api_page', 'online_help',
'online_docs_root', 'conf']
from types import ModuleType as __module_type__
# Clean up top-level namespace--delete everything that isn't in __dir_inc__
# or is a magic attribute, and that isn't a submodule of this package
for varname in dir():
if not ((varname.startswith('__') and varname.endswith('__')) or
varname in __dir_inc__ or
(varname[0] != '_' and
isinstance(locals()[varname], __module_type__) and
locals()[varname].__name__.startswith(__name__ + '.'))):
# The last clause in the the above disjunction deserves explanation:
# When using relative imports like ``from .. import config``, the
# ``config`` variable is automatically created in the namespace of
# whatever module ``..`` resolves to (in this case astropy). This
# happens a few times just in the module setup above. This allows
# the cleanup to keep any public submodules of the astropy package
del locals()[varname]
del varname, __module_type__
|
6a5915e27350c7ca954cdc6874c8e312eee72c66a149ca3714bbb29bbbf99922 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This file contains pytest configuration settings that are astropy-specific
(i.e. those that would not necessarily be shared by affiliated packages
making use of astropy's test runner).
"""
import builtins
from importlib.util import find_spec
import astropy
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES
from astropy.tests.helper import enable_deprecations_as_exceptions
try:
import matplotlib
except ImportError:
HAS_MATPLOTLIB = False
else:
HAS_MATPLOTLIB = True
if find_spec('asdf') is not None:
from asdf import __version__ as asdf_version
if asdf_version >= astropy.__minimum_asdf_version__:
pytest_plugins = ['asdf.tests.schema_tester']
PYTEST_HEADER_MODULES['Asdf'] = 'asdf'
enable_deprecations_as_exceptions(
include_astropy_deprecations=False,
# This is a workaround for the OpenSSL deprecation warning that comes from
# the `requests` module. It only appears when both asdf and sphinx are
# installed. This can be removed once pyopenssl 1.7.20+ is released.
modules_to_ignore_on_import=['requests'])
if HAS_MATPLOTLIB:
matplotlib.use('Agg')
matplotlibrc_cache = {}
def pytest_configure(config):
builtins._pytest_running = True
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
matplotlibrc_cache.update(matplotlib.rcParams)
matplotlib.rcdefaults()
def pytest_unconfigure(config):
builtins._pytest_running = False
# do not assign to matplotlibrc_cache in function scope
if HAS_MATPLOTLIB:
matplotlib.rcParams.update(matplotlibrc_cache)
matplotlibrc_cache.clear()
PYTEST_HEADER_MODULES['Cython'] = 'cython'
PYTEST_HEADER_MODULES['Scikit-image'] = 'skimage'
|
ff67c10efd90ec239e3c9e298ce6b08bd485783c61743fccde71458d1fa4ec19 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import glob
def get_package_data():
# Find all files in data/ sub-directories since this is a standard location
# for data files. We then need to adjust the paths to be relative to here
# (otherwise glob will be evaluated relative to setup.py)
data_files = glob.glob('**/data/**/*', recursive=True)
data_files = [os.path.relpath(x, os.path.dirname(__file__)) for x in data_files]
# Glob doesn't recognize hidden files
data_files.append('utils/tests/data/.hidden_file.txt')
return {'astropy': ['astropy.cfg', 'CITATION'] + data_files}
|
ebd49c7d81d339b50f75d3a5b1e7534d532fffab2aaad30138f4dde8f9a0556b | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_docs" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
from datetime import datetime
import os
import sys
import astropy
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to be installed')
sys.exit(1)
plot_rcparams = {}
plot_rcparams['figure.figsize'] = (6, 6)
plot_rcparams['savefig.facecolor'] = 'none'
plot_rcparams['savefig.bbox'] = 'tight'
plot_rcparams['axes.labelsize'] = 'large'
plot_rcparams['figure.subplot.hspace'] = 0.5
plot_apply_rcparams = True
plot_html_show_source_link = False
plot_formats = ['png', 'svg', 'pdf']
# Don't use the default - which includes a numpy and matplotlib import
plot_pre_code = ""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
check_sphinx_version("1.2.1")
# The intersphinx_mapping in astropy_helpers.sphinx.conf refers to astropy for
# the benefit of affiliated packages who want to refer to objects in the
# astropy core. However, we don't want to cyclically reference astropy in its
# own build so we remove it here.
del intersphinx_mapping['astropy']
# add any custom intersphinx for astropy
intersphinx_mapping['pytest'] = ('https://docs.pytest.org/en/stable/', None)
intersphinx_mapping['ipython'] = ('https://ipython.readthedocs.io/en/stable/', None)
intersphinx_mapping['pandas'] = ('http://pandas.pydata.org/pandas-docs/stable/', None)
intersphinx_mapping['sphinx_automodapi'] = ('https://sphinx-automodapi.readthedocs.io/en/stable/', None)
intersphinx_mapping['packagetemplate'] = ('http://docs.astropy.org/projects/package-template/en/latest/', None)
intersphinx_mapping['h5py'] = ('http://docs.h5py.org/en/stable/', None)
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
exclude_patterns.append('_pkgtemplate.rst')
exclude_patterns.append('**/*.inc.rst') # .inc.rst mean *include* files, don't have sphinx process them
# Add any paths that contain templates here, relative to this directory.
if 'templates_path' not in locals(): # in case parent conf.py defines it
templates_path = []
templates_path.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
.. |minimum_python_version| replace:: {0.__minimum_python_version__}
.. |minimum_numpy_version| replace:: {0.__minimum_numpy_version__}
.. Astropy
.. _Astropy: http://astropy.org
.. _`Astropy mailing list`: https://mail.python.org/mailman/listinfo/astropy
.. _`astropy-dev mailing list`: http://groups.google.com/group/astropy-dev
""".format(astropy)
# -- Project information ------------------------------------------------------
project = u'Astropy'
author = u'The Astropy Developers'
copyright = u'2011–{0}, '.format(datetime.utcnow().year) + author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = astropy.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = astropy.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
#
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. The
# theme has options for controlling the text of the logo in the upper
# left corner. This is how you would specify the options in order to
# override the theme defaults (The following options *are* the
# defaults, so we do not actually need to set them here.)
#html_theme_options = {
# 'logotext1': 'astro', # white, semi-bold
# 'logotext2': 'py', # orange, light
# 'logotext3': ':docs' # white, light
# }
# A different theme can be used, or other parts of this theme can be
# modified, by overriding some of the variables set in the global
# configuration. The variables set in the global configuration are
# listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
'to_be_indexed': ['stable', 'latest']
}
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
latex_logo = '_static/astropy_logo.pdf'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# Setting this URL is requited by sphinx-astropy
github_issues_url = 'https://github.com/astropy/astropy/issues/'
# Enable nitpicky mode - which ensures that all references in the docs
# resolve.
nitpicky = True
nitpick_ignore = []
for line in open('nitpick-exceptions'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for the Sphinx gallery -------------------------------------------
try:
import sphinx_gallery
extensions += ["sphinx_gallery.gen_gallery"]
sphinx_gallery_conf = {
'backreferences_dir': 'generated/modules', # path to store the module using example template
'filename_pattern': '^((?!skip_).)*$', # execute all examples except those that start with "skip_"
'examples_dirs': '..{}examples'.format(os.sep), # path to the examples scripts
'gallery_dirs': 'generated/examples', # path to save gallery generated examples
'reference_url': {
'astropy': None,
'matplotlib': 'http://matplotlib.org/',
'numpy': 'http://docs.scipy.org/doc/numpy/',
},
'abort_on_example_error': True
}
except ImportError:
def setup(app):
msg = ('The sphinx_gallery extension is not installed, so the '
'gallery will not be built. You will probably see '
'additional warnings about undefined references due '
'to this.')
try:
app.warn(msg)
except AttributeError:
# Sphinx 1.6+
from sphinx.util import logging
logger = logging.getLogger(__name__)
logger.warning(msg)
# -- Options for linkcheck output -------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['https://journals.aas.org/manuscript-preparation/',
r'https://github\.com/astropy/astropy/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = ['robots.txt']
|
76ccb979de8392d41954608296fc6533b2c672ffffe7ae02f12acb50c50aee43 | # -*- coding: utf-8 -*-
"""
================================================================
Convert a radial velocity to the Galactic Standard of Rest (GSR)
================================================================
Radial or line-of-sight velocities of sources are often reported in a
Heliocentric or Solar-system barycentric reference frame. A common
transformation incorporates the projection of the Sun's motion along the
line-of-sight to the target, hence transforming it to a Galactic rest frame
instead (sometimes referred to as the Galactic Standard of Rest, GSR). This
transformation depends on the assumptions about the orientation of the Galactic
frame relative to the bary- or Heliocentric frame. It also depends on the
assumed solar velocity vector. Here we'll demonstrate how to perform this
transformation using a sky position and barycentric radial-velocity.
-------------------
*By: Adrian Price-Whelan*
*License: BSD*
-------------------
"""
################################################################################
# Make print work the same in all versions of Python and import the required
# Astropy packages:
import astropy.units as u
import astropy.coordinates as coord
################################################################################
# For this example, let's work with the coordinates and barycentric radial
# velocity of the star HD 155967, as obtained from
# `Simbad <http://simbad.harvard.edu/simbad/>`_:
icrs = coord.ICRS(ra=258.58356362*u.deg, dec=14.55255619*u.deg,
radial_velocity=-16.1*u.km/u.s)
################################################################################
# We next need to decide on the velocity of the Sun in the assumed GSR frame.
# We'll use the same velocity vector as used in the
# `~astropy.coordinates.Galactocentric` frame, and convert it to a
# `~astropy.coordinates.CartesianRepresentation` object using the
# ``.to_cartesian()`` method of the
# `~astropy.coordinates.CartesianDifferential` object ``galcen_v_sun``:
v_sun = coord.Galactocentric.galcen_v_sun.to_cartesian()
################################################################################
# We now need to get a unit vector in the assumed Galactic frame from the sky
# position in the ICRS frame above. We'll use this unit vector to project the
# solar velocity onto the line-of-sight:
gal = icrs.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
################################################################################
# Now we project the solar velocity using this unit vector:
v_proj = v_sun.dot(unit_vector)
################################################################################
# Finally, we add the projection of the solar velocity to the radial velocity
# to get a GSR radial velocity:
rv_gsr = icrs.radial_velocity + v_proj
print(rv_gsr)
################################################################################
# We could wrap this in a function so we can control the solar velocity and
# re-use the above code:
def rv_to_gsr(c, v_sun=None):
"""Transform a barycentric radial velocity to the Galactic Standard of Rest
(GSR).
The input radial velocity must be passed in as a
Parameters
----------
c : `~astropy.coordinates.BaseCoordinateFrame` subclass instance
The radial velocity, associated with a sky coordinates, to be
transformed.
v_sun : `~astropy.units.Quantity` (optional)
The 3D velocity of the solar system barycenter in the GSR frame.
Defaults to the same solar motion as in the
`~astropy.coordinates.Galactocentric` frame.
Returns
-------
v_gsr : `~astropy.units.Quantity`
The input radial velocity transformed to a GSR frame.
"""
if v_sun is None:
v_sun = coord.Galactocentric.galcen_v_sun.to_cartesian()
gal = c.transform_to(coord.Galactic)
cart_data = gal.data.to_cartesian()
unit_vector = cart_data / cart_data.norm()
v_proj = v_sun.dot(unit_vector)
return c.radial_velocity + v_proj
rv_gsr = rv_to_gsr(icrs)
print(rv_gsr)
|
019e420395fbf330f840b0aa2490b39e51da8c36a1b01a06c6f648fd9ed4f031 | # -*- coding: utf-8 -*-
"""
==========================================================
Create a new coordinate class (for the Sagittarius stream)
==========================================================
This document describes in detail how to subclass and define a custom spherical
coordinate frame, as discussed in :ref:`astropy-coordinates-design` and the
docstring for `~astropy.coordinates.BaseCoordinateFrame`. In this example, we
will define a coordinate system defined by the plane of orbit of the Sagittarius
Dwarf Galaxy (hereafter Sgr; as defined in Majewski et al. 2003). The Sgr
coordinate system is often referred to in terms of two angular coordinates,
:math:`\Lambda,B`.
To do this, we need to define a subclass of
`~astropy.coordinates.BaseCoordinateFrame` that knows the names and units of the
coordinate system angles in each of the supported representations. In this case
we support `~astropy.coordinates.SphericalRepresentation` with "Lambda" and
"Beta". Then we have to define the transformation from this coordinate system to
some other built-in system. Here we will use Galactic coordinates, represented
by the `~astropy.coordinates.Galactic` class.
See Also
--------
* The `gala package <http://gala.adrian.pw/>`_, which defines a number of
Astropy coordinate frames for stellar stream coordinate systems.
* Majewski et al. 2003, "A Two Micron All Sky Survey View of the Sagittarius
Dwarf Galaxy. I. Morphology of the Sagittarius Core and Tidal Arms",
https://arxiv.org/abs/astro-ph/0304198
* Law & Majewski 2010, "The Sagittarius Dwarf Galaxy: A Model for Evolution in a
Triaxial Milky Way Halo", https://arxiv.org/abs/1003.1132
* David Law's Sgr info page http://www.stsci.edu/~dlaw/Sgr/
-------------------
*By: Adrian Price-Whelan, Erik Tollerud*
*License: BSD*
-------------------
"""
##############################################################################
# Make `print` work the same in all versions of Python, set up numpy,
# matplotlib, and use a nicer set of plot parameters:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import astropy_mpl_style
plt.style.use(astropy_mpl_style)
##############################################################################
# Import the packages necessary for coordinates
from astropy.coordinates import frame_transform_graph
from astropy.coordinates.matrix_utilities import rotation_matrix, matrix_product, matrix_transpose
import astropy.coordinates as coord
import astropy.units as u
##############################################################################
# The first step is to create a new class, which we'll call
# ``Sagittarius`` and make it a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`:
class Sagittarius(coord.BaseCoordinateFrame):
"""
A Heliocentric spherical coordinate system defined by the orbit
of the Sagittarius dwarf galaxy, as described in
http://adsabs.harvard.edu/abs/2003ApJ...599.1082M
and further explained in
http://www.stsci.edu/~dlaw/Sgr/.
Parameters
----------
representation : `BaseRepresentation` or None
A representation object or None to have no data (or use the other keywords)
Lambda : `Angle`, optional, must be keyword
The longitude-like angle corresponding to Sagittarius' orbit.
Beta : `Angle`, optional, must be keyword
The latitude-like angle corresponding to Sagittarius' orbit.
distance : `Quantity`, optional, must be keyword
The Distance for this object along the line-of-sight.
pm_Lambda_cosBeta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion along the stream in ``Lambda`` (including the
``cos(Beta)`` factor) for this object (``pm_Beta`` must also be given).
pm_Beta : :class:`~astropy.units.Quantity`, optional, must be keyword
The proper motion in Declination for this object (``pm_ra_cosdec`` must
also be given).
radial_velocity : :class:`~astropy.units.Quantity`, optional, must be keyword
The radial velocity of this object.
"""
default_representation = coord.SphericalRepresentation
default_differential = coord.SphericalCosLatDifferential
frame_specific_representation_info = {
coord.SphericalRepresentation: [
coord.RepresentationMapping('lon', 'Lambda'),
coord.RepresentationMapping('lat', 'Beta'),
coord.RepresentationMapping('distance', 'distance')]
}
##############################################################################
# Breaking this down line-by-line, we define the class as a subclass of
# `~astropy.coordinates.BaseCoordinateFrame`. Then we include a descriptive
# docstring. The final lines are class-level attributes that specify the
# default representation for the data, default differential for the velocity
# information, and mappings from the attribute names used by representation
# objects to the names that are to be used by the ``Sagittarius`` frame. In this
# case we override the names in the spherical representations but don't do
# anything with other representations like cartesian or cylindrical.
#
# Next we have to define the transformation from this coordinate system to some
# other built-in coordinate system; we will use Galactic coordinates. We can do
# this by defining functions that return transformation matrices, or by simply
# defining a function that accepts a coordinate and returns a new coordinate in
# the new system. Because the transformation to the Sagittarius coordinate
# system is just a spherical rotation from Galactic coordinates, we'll just
# define a function that returns this matrix. We'll start by constructing the
# transformation matrix using pre-determined Euler angles and the
# ``rotation_matrix`` helper function:
SGR_PHI = (180 + 3.75) * u.degree # Euler angles (from Law & Majewski 2010)
SGR_THETA = (90 - 13.46) * u.degree
SGR_PSI = (180 + 14.111534) * u.degree
# Generate the rotation matrix using the x-convention (see Goldstein)
D = rotation_matrix(SGR_PHI, "z")
C = rotation_matrix(SGR_THETA, "x")
B = rotation_matrix(SGR_PSI, "z")
A = np.diag([1.,1.,-1.])
SGR_MATRIX = matrix_product(A, B, C, D)
##############################################################################
# Since we already constructed the transformation (rotation) matrix above, and
# the inverse of a rotation matrix is just its transpose, the required
# transformation functions are very simple:
@frame_transform_graph.transform(coord.StaticMatrixTransform, coord.Galactic, Sagittarius)
def galactic_to_sgr():
""" Compute the transformation matrix from Galactic spherical to
heliocentric Sgr coordinates.
"""
return SGR_MATRIX
##############################################################################
# The decorator ``@frame_transform_graph.transform(coord.StaticMatrixTransform,
# coord.Galactic, Sagittarius)`` registers this function on the
# ``frame_transform_graph`` as a coordinate transformation. Inside the function,
# we simply return the previously defined rotation matrix.
#
# We then register the inverse transformation by using the transpose of the
# rotation matrix (which is faster to compute than the inverse):
@frame_transform_graph.transform(coord.StaticMatrixTransform, Sagittarius, coord.Galactic)
def sgr_to_galactic():
""" Compute the transformation matrix from heliocentric Sgr coordinates to
spherical Galactic.
"""
return matrix_transpose(SGR_MATRIX)
##############################################################################
# Now that we've registered these transformations between ``Sagittarius`` and
# `~astropy.coordinates.Galactic`, we can transform between *any* coordinate
# system and ``Sagittarius`` (as long as the other system has a path to
# transform to `~astropy.coordinates.Galactic`). For example, to transform from
# ICRS coordinates to ``Sagittarius``, we would do:
icrs = coord.ICRS(280.161732*u.degree, 11.91934*u.degree)
sgr = icrs.transform_to(Sagittarius)
print(sgr)
##############################################################################
# Or, to transform from the ``Sagittarius`` frame to ICRS coordinates (in this
# case, a line along the ``Sagittarius`` x-y plane):
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
##############################################################################
# As an example, we'll now plot the points in both coordinate systems:
fig, axes = plt.subplots(2, 1, figsize=(8, 10),
subplot_kw={'projection': 'aitoff'})
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.wrap_at(180*u.deg).radian, sgr.Beta.radian,
linestyle='none', marker='.')
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.wrap_at(180*u.deg).radian, icrs.dec.radian,
linestyle='none', marker='.')
plt.show()
##############################################################################
# This particular transformation is just a spherical rotation, which is a
# special case of an Affine transformation with no vector offset. The
# transformation of velocity components is therefore natively supported as
# well:
sgr = Sagittarius(Lambda=np.linspace(0, 2*np.pi, 128)*u.radian,
Beta=np.zeros(128)*u.radian,
pm_Lambda_cosBeta=np.random.uniform(-5, 5, 128)*u.mas/u.yr,
pm_Beta=np.zeros(128)*u.mas/u.yr)
icrs = sgr.transform_to(coord.ICRS)
print(icrs)
fig, axes = plt.subplots(3, 1, figsize=(8, 10), sharex=True)
axes[0].set_title("Sagittarius")
axes[0].plot(sgr.Lambda.degree,
sgr.pm_Lambda_cosBeta.value,
linestyle='none', marker='.')
axes[0].set_xlabel(r"$\Lambda$ [deg]")
axes[0].set_ylabel(r"$\mu_\Lambda \, \cos B$ [{0}]"
.format(sgr.pm_Lambda_cosBeta.unit.to_string('latex_inline')))
axes[1].set_title("ICRS")
axes[1].plot(icrs.ra.degree, icrs.pm_ra_cosdec.value,
linestyle='none', marker='.')
axes[1].set_ylabel(r"$\mu_\alpha \, \cos\delta$ [{0}]"
.format(icrs.pm_ra_cosdec.unit.to_string('latex_inline')))
axes[2].set_title("ICRS")
axes[2].plot(icrs.ra.degree, icrs.pm_dec.value,
linestyle='none', marker='.')
axes[2].set_xlabel("RA [deg]")
axes[2].set_ylabel(r"$\mu_\delta$ [{0}]"
.format(icrs.pm_dec.unit.to_string('latex_inline')))
plt.show()
|
41467f9a1ba7bfa2fe8df1f554f512fa51bcf63ace7facfeeb8ca61b35fd821f | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import numpy as np
from astropy.utils import isiterable
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyUserWarning
try:
import bottleneck # pylint: disable=W0611
HAS_BOTTLENECK = True
from astropy.units import Quantity
except ImportError:
HAS_BOTTLENECK = False
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
def _move_tuple_axes_first(array, axis):
"""
Bottleneck can only take integer axis, not tuple, so this function
takes all the axes to be operated on and combines them into the
first dimension of the array so that we can then use axis=0
"""
# Figure out how many axes we are operating over
naxis = len(axis)
# Add remaining axes to the axis tuple
axis += tuple(i for i in range(array.ndim) if i not in axis)
# The new position of each axis is just in order
destination = tuple(range(array.ndim))
# Reorder the array so that the axes being operated on are at the beginning
array_new = np.moveaxis(array, axis, destination)
# Collapse the dimensions being operated on into a single dimension so that
# we can then use axis=0 with the bottleneck functions
array_new = array_new.reshape((-1,) + array_new.shape[naxis:])
return array_new
def _nanmean(array, axis=None):
"""Bottleneck nanmean function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmean(array, axis=axis))
else:
return bottleneck.nanmean(array, axis=axis)
def _nanmedian(array, axis=None):
"""Bottleneck nanmedian function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanmedian(array, axis=axis))
else:
return bottleneck.nanmedian(array, axis=axis)
def _nanstd(array, axis=None, ddof=0):
"""Bottleneck nanstd function that handle tuple axis."""
if isinstance(axis, tuple):
array = _move_tuple_axes_first(array, axis=axis)
axis = 0
if isinstance(array, Quantity):
return array.__array_wrap__(bottleneck.nanstd(array, axis=axis,
ddof=ddof))
else:
return bottleneck.nanstd(array, axis=axis, ddof=ddof)
class SigmaClip:
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e. NaN or inf) are automatically clipped.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e. NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``s = SigmaClip(cenfunc='mean', maxiters=None);
s(data, axis=None)``).
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
See Also
--------
sigma_clip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, maxiters=5)
>>> filtered_data = sigclip(randvar)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and modifies the data in-place::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, maxiters=None, cenfunc='mean')
>>> filtered_data = sigclip(randvar, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None,
maxiters=5, cenfunc='median', stdfunc='std'):
self.sigma = sigma
self.sigma_lower = sigma_lower or sigma
self.sigma_upper = sigma_upper or sigma
self.maxiters = maxiters or np.inf
self.cenfunc = self._parse_cenfunc(cenfunc)
self.stdfunc = self._parse_stdfunc(stdfunc)
def __repr__(self):
return ('SigmaClip(sigma={0}, sigma_lower={1}, sigma_upper={2}, '
'maxiters={3}, cenfunc={4}, stdfunc={5})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.maxiters, self.cenfunc, self.stdfunc))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'maxiters', 'cenfunc',
'stdfunc']
for attr in attrs:
lines.append(' {0}: {1}'.format(attr, getattr(self, attr)))
return '\n'.join(lines)
def _parse_cenfunc(self, cenfunc):
if isinstance(cenfunc, str):
if cenfunc == 'median':
if HAS_BOTTLENECK:
cenfunc = _nanmedian
else:
cenfunc = np.nanmedian # pragma: no cover
elif cenfunc == 'mean':
if HAS_BOTTLENECK:
cenfunc = _nanmean
else:
cenfunc = np.nanmean # pragma: no cover
else:
raise ValueError('{} is an invalid cenfunc.'.format(cenfunc))
return cenfunc
def _parse_stdfunc(self, stdfunc):
if isinstance(stdfunc, str):
if stdfunc != 'std':
raise ValueError('{} is an invalid stdfunc.'.format(stdfunc))
if HAS_BOTTLENECK:
stdfunc = _nanstd
else:
stdfunc = np.nanstd # pragma: no cover
return stdfunc
def _compute_bounds(self, data, axis=None):
# ignore RuntimeWarning if the array (or along an axis) has only
# NaNs
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
self._max_value = self.cenfunc(data, axis=axis)
std = self.stdfunc(data, axis=axis)
self._min_value = self._max_value - (std * self.sigma_lower)
self._max_value += std * self.sigma_upper
def _sigmaclip_noaxis(self, data, masked=True, return_bounds=False,
copy=True):
"""
Sigma clip the data when ``axis`` is None.
In this simple case, we remove clipped elements from the
flattened array during each iteration.
"""
filtered_data = data.ravel()
# remove masked values and convert to ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = filtered_data.data[~filtered_data.mask]
# remove invalid values
good_mask = np.isfinite(filtered_data)
if np.any(~good_mask):
filtered_data = filtered_data[good_mask]
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
size = filtered_data.size
self._compute_bounds(filtered_data, axis=None)
filtered_data = filtered_data[(filtered_data >= self._min_value) &
(filtered_data <= self._max_value)]
nchanged = size - filtered_data.size
self._niterations = iteration
if masked:
# return a masked array and optional bounds
filtered_data = np.ma.masked_invalid(data, copy=copy)
# update the mask in place, ignoring RuntimeWarnings for
# comparisons with NaN data values
with np.errstate(invalid='ignore'):
filtered_data.mask |= np.logical_or(data < self._min_value,
data > self._max_value)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def _sigmaclip_withaxis(self, data, axis=None, masked=True,
return_bounds=False, copy=True):
"""
Sigma clip the data when ``axis`` is specified.
In this case, we replace clipped values with NaNs as placeholder
values.
"""
# float array type is needed to insert nans into the array
filtered_data = data.astype(float) # also makes a copy
# remove invalid values
bad_mask = ~np.isfinite(filtered_data)
if np.any(bad_mask):
filtered_data[bad_mask] = np.nan
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically clipped.',
AstropyUserWarning)
# remove masked values and convert to plain ndarray
if isinstance(filtered_data, np.ma.MaskedArray):
filtered_data = np.ma.masked_invalid(filtered_data).astype(float)
filtered_data = filtered_data.filled(np.nan)
# convert negative axis/axes
if not isiterable(axis):
axis = (axis,)
axis = tuple(filtered_data.ndim + n if n < 0 else n for n in axis)
# define the shape of min/max arrays so that they can be broadcast
# with the data
mshape = tuple(1 if dim in axis else size
for dim, size in enumerate(filtered_data.shape))
nchanged = 1
iteration = 0
while nchanged != 0 and (iteration < self.maxiters):
iteration += 1
n_nan = np.count_nonzero(np.isnan(filtered_data))
self._compute_bounds(filtered_data, axis=axis)
if not np.isscalar(self._min_value):
self._min_value = self._min_value.reshape(mshape)
self._max_value = self._max_value.reshape(mshape)
with np.errstate(invalid='ignore'):
filtered_data[(filtered_data < self._min_value) |
(filtered_data > self._max_value)] = np.nan
nchanged = n_nan - np.count_nonzero(np.isnan(filtered_data))
self._niterations = iteration
if masked:
# create an output masked array
if copy:
filtered_data = np.ma.masked_invalid(filtered_data)
else:
# ignore RuntimeWarnings for comparisons with NaN data values
with np.errstate(invalid='ignore'):
out = np.ma.masked_invalid(data, copy=False)
filtered_data = np.ma.masked_where(np.logical_or(
out < self._min_value, out > self._max_value),
out, copy=False)
if return_bounds:
return filtered_data, self._min_value, self._max_value
else:
return filtered_data
def __call__(self, data, axis=None, masked=True, return_bounds=False,
copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where
the mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping
thresholds are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are
also returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If
`False` and ``masked=True``, then the returned masked array
data will contain the same array as the input ``data`` (if
``data`` is a `~numpy.ndarray` or `~numpy.ma.MaskedArray`).
The default is `True`.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is
returned, where the mask is `True` for clipped values. If
``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the (masked)
array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array
is a flattened 1D `~numpy.ndarray` where the clipped values
have been removed. If ``return_bounds=True`` then the
returned minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the
output `~numpy.ndarray` will have the same shape as the
input ``data`` and contain ``np.nan`` where values were
clipped. If ``return_bounds=True`` then the returned
minimum and maximum clipping thresholds will be be
`~numpy.ndarray`\\s.
"""
data = np.asanyarray(data)
if data.size == 0:
return data
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return data
# These two cases are treated separately because when
# ``axis=None`` we can simply remove clipped values from the
# array. This is not possible when ``axis`` is specified, so
# instead we replace clipped values with NaNs as a placeholder
# value.
if axis is None:
return self._sigmaclip_noaxis(data, masked=masked,
return_bounds=return_bounds,
copy=copy)
else:
return self._sigmaclip_withaxis(data, axis=axis, masked=masked,
return_bounds=return_bounds,
copy=copy)
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', axis=None, masked=True,
return_bounds=False, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting values that are
less or more than a specified number of standard deviations from a
center value.
Clipped (rejected) pixels are those where::
data < cenfunc(data [,axis=int]) - (sigma_lower * stdfunc(data [,axis=int]))
data > cenfunc(data [,axis=int]) + (sigma_upper * stdfunc(data [,axis=int]))
Invalid data values (i.e. NaN or inf) are automatically clipped.
For an object-oriented interface to sigma clipping, see
:class:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class. Also, its
input data cannot be a masked array and it does not handle data
that contains invalid values (i.e. NaN or inf). Also note that
it uses the mean as the centering function.
If your data is a `~numpy.ndarray` with no invalid values and
you want to use the mean as the centering function with
``axis=None`` and iterate to convergence, then
`scipy.stats.sigmaclip` is ~25-30% faster than the equivalent
settings here (``sigma_clip(data, cenfunc='mean', maxiters=None,
axis=None)``).
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed to the
``cenfunc`` and ``stdfunc``. The default is `None`.
masked : bool, optional
If `True`, then a `~numpy.ma.MaskedArray` is returned, where the
mask is `True` for clipped values. If `False`, then a
`~numpy.ndarray` and the minimum and maximum clipping thresholds
are returned. The default is `True`.
return_bounds : bool, optional
If `True`, then the minimum and maximum clipping bounds are also
returned.
copy : bool, optional
If `True`, then the ``data`` array will be copied. If `False`
and ``masked=True``, then the returned masked array data will
contain the same array as the input ``data`` (if ``data`` is a
`~numpy.ndarray` or `~numpy.ma.MaskedArray`). The default is
`True`.
Returns
-------
result : flexible
If ``masked=True``, then a `~numpy.ma.MaskedArray` is returned,
where the mask is `True` for clipped values. If
``masked=False``, then a `~numpy.ndarray` is returned.
If ``return_bounds=True``, then in addition to the (masked)
array above, the minimum and maximum clipping bounds are
returned.
If ``masked=False`` and ``axis=None``, then the output array is
a flattened 1D `~numpy.ndarray` where the clipped values have
been removed. If ``return_bounds=True`` then the returned
minimum and maximum thresholds are scalars.
If ``masked=False`` and ``axis`` is specified, then the output
`~numpy.ndarray` will have the same shape as the input ``data``
and contain ``np.nan`` where values were clipped. If
``return_bounds=True`` then the returned minimum and maximum
clipping thresholds will be be `~numpy.ndarray`\\s.
See Also
--------
SigmaClip, sigma_clipped_stats
Examples
--------
This example uses a data array of random variates from a Gaussian
distribution. We clip all points that are more than 2 sample
standard deviations from the median. The result is a masked array,
where the mask is `True` for clipped data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, maxiters=5)
This example clips all points that are more than 3 sigma relative to
the sample *mean*, clips until convergence, returns an unmasked
`~numpy.ndarray`, and does not copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, maxiters=None,
... cenfunc=mean, masked=False, copy=False)
This example sigma clips along one axis::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be clipped, as the
standard deviation is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc)
return sigclip(data, axis=axis, masked=masked,
return_bounds=return_bounds, copy=copy)
@deprecated_renamed_argument('iters', 'maxiters', '3.1')
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', std_ddof=0,
axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g. `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/kwgoodman/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g. `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc)
data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False,
copy=False)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
|
b39f1fe3914ebc6692cd0efc451826e5f74f0ed0807b1ebe8e0b07eb9066011e | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Bayesian Blocks for Time Series Analysis
========================================
Dynamic programming algorithm for solving a piecewise-constant model for
various datasets. This is based on the algorithm presented in Scargle
et al 2012 [1]_. This code was ported from the astroML project [2]_.
Applications include:
- finding an optimal histogram with adaptive bin widths
- finding optimal segmentation of time series data
- detecting inflection points in the rate of event data
The primary interface to these routines is the :func:`bayesian_blocks`
function. This module provides fitness functions suitable for three types
of data:
- Irregularly-spaced event data via the :class:`Events` class
- Regularly-spaced event data via the :class:`RegularEvents` class
- Irregularly-spaced point measurements via the :class:`PointMeasures` class
For more fine-tuned control over the fitness functions used, it is possible
to define custom :class:`FitnessFunc` classes directly and use them with
the :func:`bayesian_blocks` routine.
One common application of the Bayesian Blocks algorithm is the determination
of optimal adaptive-width histogram bins. This uses the same fitness function
as for irregularly-spaced time series events. The easiest interface for
creating Bayesian Blocks histograms is the :func:`astropy.stats.histogram`
function.
References
----------
.. [1] http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
.. [2] http://astroML.org/ https://github.com//astroML/astroML/
"""
import warnings
import numpy as np
from inspect import signature
from astropy.utils.exceptions import AstropyUserWarning
# TODO: implement other fitness functions from appendix B of Scargle 2012
__all__ = ['FitnessFunc', 'Events', 'RegularEvents', 'PointMeasures',
'bayesian_blocks']
def bayesian_blocks(t, x=None, sigma=None,
fitness='events', **kwargs):
r"""Compute optimal segmentation of data with Scargle's Bayesian Blocks
This is a flexible implementation of the Bayesian Blocks algorithm
described in Scargle 2012 [1]_.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
fitness : str or object
the fitness function to use for the model.
If a string, the following options are supported:
- 'events' : binned or unbinned event data. Arguments are ``gamma``,
which gives the slope of the prior on the number of bins, or
``ncp_prior``, which is :math:`-\ln({\tt gamma})`.
- 'regular_events' : non-overlapping events measured at multiples of a
fundamental tick rate, ``dt``, which must be specified as an
additional argument. Extra arguments are ``p0``, which gives the
false alarm probability to compute the prior, or ``gamma``, which
gives the slope of the prior on the number of bins, or ``ncp_prior``,
which is :math:`-\ln({\tt gamma})`.
- 'measures' : fitness for a measured sequence with Gaussian errors.
Extra arguments are ``p0``, which gives the false alarm probability
to compute the prior, or ``gamma``, which gives the slope of the
prior on the number of bins, or ``ncp_prior``, which is
:math:`-\ln({\tt gamma})`.
In all three cases, if more than one of ``p0``, ``gamma``, and
``ncp_prior`` is chosen, ``ncp_prior`` takes precedence over ``gamma``
which takes precedence over ``p0``.
Alternatively, the fitness parameter can be an instance of
:class:`FitnessFunc` or a subclass thereof.
**kwargs :
any additional keyword arguments will be passed to the specified
:class:`FitnessFunc` derived class.
Returns
-------
edges : ndarray
array containing the (N+1) edges defining the N bins
Examples
--------
Event data:
>>> t = np.random.normal(size=100)
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Event data with repeats:
>>> t = np.random.normal(size=100)
>>> t[80:] = t[:20]
>>> edges = bayesian_blocks(t, fitness='events', p0=0.01)
Regular event data:
>>> dt = 0.05
>>> t = dt * np.arange(1000)
>>> x = np.zeros(len(t))
>>> x[np.random.randint(0, len(t), len(t) // 10)] = 1
>>> edges = bayesian_blocks(t, x, fitness='regular_events', dt=dt)
Measured point data with errors:
>>> t = 100 * np.random.random(100)
>>> x = np.exp(-0.5 * (t - 50) ** 2)
>>> sigma = 0.1
>>> x_obs = np.random.normal(x, sigma)
>>> edges = bayesian_blocks(t, x_obs, sigma, fitness='measures')
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
See Also
--------
astropy.stats.histogram : compute a histogram using bayesian blocks
"""
FITNESS_DICT = {'events': Events,
'regular_events': RegularEvents,
'measures': PointMeasures}
fitness = FITNESS_DICT.get(fitness, fitness)
if type(fitness) is type and issubclass(fitness, FitnessFunc):
fitfunc = fitness(**kwargs)
elif isinstance(fitness, FitnessFunc):
fitfunc = fitness
else:
raise ValueError("fitness parameter not understood")
return fitfunc.fit(t, x, sigma)
class FitnessFunc:
"""Base class for bayesian blocks fitness functions
Derived classes should overload the following method:
``fitness(self, **kwargs)``:
Compute the fitness given a set of named arguments.
Arguments accepted by fitness must be among ``[T_k, N_k, a_k, b_k, c_k]``
(See [1]_ for details on the meaning of these parameters).
Additionally, other methods may be overloaded as well:
``__init__(self, **kwargs)``:
Initialize the fitness function with any parameters beyond the normal
``p0`` and ``gamma``.
``validate_input(self, t, x, sigma)``:
Enable specific checks of the input data (``t``, ``x``, ``sigma``)
to be performed prior to the fit.
``compute_ncp_prior(self, N)``: If ``ncp_prior`` is not defined explicitly,
this function is called in order to define it before fitting. This may be
calculated from ``gamma``, ``p0``, or whatever method you choose.
``p0_prior(self, N)``:
Specify the form of the prior given the false-alarm probability ``p0``
(See [1]_ for details).
For examples of implemented fitness functions, see :class:`Events`,
:class:`RegularEvents`, and :class:`PointMeasures`.
References
----------
.. [1] Scargle, J et al. (2012)
http://adsabs.harvard.edu/abs/2012arXiv1207.5578S
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
self.p0 = p0
self.gamma = gamma
self.ncp_prior = ncp_prior
def validate_input(self, t, x=None, sigma=None):
"""Validate inputs to the model.
Parameters
----------
t : array_like
times of observations
x : array_like (optional)
values observed at each time
sigma : float or array_like (optional)
errors in values x
Returns
-------
t, x, sigma : array_like, float or None
validated and perhaps modified versions of inputs
"""
# validate array input
t = np.asarray(t, dtype=float)
if x is not None:
x = np.asarray(x)
if sigma is not None:
sigma = np.asarray(sigma)
# find unique values of t
t = np.array(t)
if t.ndim != 1:
raise ValueError("t must be a one-dimensional array")
unq_t, unq_ind, unq_inv = np.unique(t, return_index=True,
return_inverse=True)
# if x is not specified, x will be counts at each time
if x is None:
if sigma is not None:
raise ValueError("If sigma is specified, x must be specified")
else:
sigma = 1
if len(unq_t) == len(t):
x = np.ones_like(t)
else:
x = np.bincount(unq_inv)
t = unq_t
# if x is specified, then we need to simultaneously sort t and x
else:
# TODO: allow broadcasted x?
x = np.asarray(x)
if x.shape not in [(), (1,), (t.size,)]:
raise ValueError("x does not match shape of t")
x += np.zeros_like(t)
if len(unq_t) != len(t):
raise ValueError("Repeated values in t not supported when "
"x is specified")
t = unq_t
x = x[unq_ind]
# verify the given sigma value
if sigma is None:
sigma = 1
else:
sigma = np.asarray(sigma)
if sigma.shape not in [(), (1,), (t.size,)]:
raise ValueError('sigma does not match the shape of x')
return t, x, sigma
def fitness(self, **kwargs):
raise NotImplementedError()
def p0_prior(self, N):
"""
Empirical prior, parametrized by the false alarm probability ``p0``
See eq. 21 in Scargle (2012)
Note that there was an error in this equation in the original Scargle
paper (the "log" was missing). The following corrected form is taken
from https://arxiv.org/abs/1304.2818
"""
return 4 - np.log(73.53 * self.p0 * (N ** -0.478))
# the fitness_args property will return the list of arguments accepted by
# the method fitness(). This allows more efficient computation below.
@property
def _fitness_args(self):
return signature(self.fitness).parameters.keys()
def compute_ncp_prior(self, N):
"""
If ``ncp_prior`` is not explicitly defined, compute it from ``gamma``
or ``p0``.
"""
if self.gamma is not None:
return -np.log(self.gamma)
elif self.p0 is not None:
return self.p0_prior(N)
else:
raise ValueError("``ncp_prior`` cannot be computed as neither "
"``gamma`` nor ``p0`` is defined.")
def fit(self, t, x=None, sigma=None):
"""Fit the Bayesian Blocks model given the specified fitness function.
Parameters
----------
t : array_like
data times (one dimensional, length N)
x : array_like (optional)
data values
sigma : array_like or float (optional)
data errors
Returns
-------
edges : ndarray
array containing the (M+1) edges defining the M optimal bins
"""
t, x, sigma = self.validate_input(t, x, sigma)
# compute values needed for computation, below
if 'a_k' in self._fitness_args:
ak_raw = np.ones_like(x) / sigma ** 2
if 'b_k' in self._fitness_args:
bk_raw = x / sigma ** 2
if 'c_k' in self._fitness_args:
ck_raw = x * x / sigma ** 2
# create length-(N + 1) array of cell edges
edges = np.concatenate([t[:1],
0.5 * (t[1:] + t[:-1]),
t[-1:]])
block_length = t[-1] - edges
# arrays to store the best configuration
N = len(t)
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Compute ncp_prior if not defined
if self.ncp_prior is None:
ncp_prior = self.compute_ncp_prior(N)
else:
ncp_prior = self.ncp_prior
# ----------------------------------------------------------------
# Start with first data cell; add one cell at each iteration
# ----------------------------------------------------------------
for R in range(N):
# Compute fit_vec : fitness of putative last block (end at R)
kwds = {}
# T_k: width/duration of each block
if 'T_k' in self._fitness_args:
kwds['T_k'] = block_length[:R + 1] - block_length[R + 1]
# N_k: number of elements in each block
if 'N_k' in self._fitness_args:
kwds['N_k'] = np.cumsum(x[:R + 1][::-1])[::-1]
# a_k: eq. 31
if 'a_k' in self._fitness_args:
kwds['a_k'] = 0.5 * np.cumsum(ak_raw[:R + 1][::-1])[::-1]
# b_k: eq. 32
if 'b_k' in self._fitness_args:
kwds['b_k'] = - np.cumsum(bk_raw[:R + 1][::-1])[::-1]
# c_k: eq. 33
if 'c_k' in self._fitness_args:
kwds['c_k'] = 0.5 * np.cumsum(ck_raw[:R + 1][::-1])[::-1]
# evaluate fitness function
fit_vec = self.fitness(**kwds)
A_R = fit_vec - ncp_prior
A_R[1:] += best[:R]
i_max = np.argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
# ----------------------------------------------------------------
# Now find changepoints by iteratively peeling off the last block
# ----------------------------------------------------------------
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
return edges[change_points]
class Events(FitnessFunc):
r"""Bayesian blocks fitness for binned or unbinned events
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on
:math:`N_{\rm blocks}` (see eq. 21 of Scargle 2012). For the Events
type data, ``p0`` does not seem to be an accurate representation of the
actual false alarm probability. If you are using this fitness function
for a triggering type condition, it is recommended that you run
statistical trials on signal-free noise to determine an appropriate
value of ``gamma`` or ``ncp_prior`` to use for a desired false alarm
rate.
gamma : float (optional)
If specified, then use this gamma to compute the general prior form,
:math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0
is ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`.
If ``ncp_prior`` is specified, ``gamma`` and ``p0`` is ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
if p0 is not None and gamma is None and ncp_prior is None:
warnings.warn('p0 does not seem to accurately represent the false '
'positive rate for event data. It is highly '
'recommended that you run random trials on signal-'
'free noise to calibrate ncp_prior to achieve a '
'desired false positive rate.', AstropyUserWarning)
super().__init__(p0, gamma, ncp_prior)
def fitness(self, N_k, T_k):
# eq. 19 from Scargle 2012
return N_k * (np.log(N_k) - np.log(T_k))
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if x is not None and np.any(x % 1 > 0):
raise ValueError("x must be integer counts for fitness='events'")
return t, x, sigma
class RegularEvents(FitnessFunc):
r"""Bayesian blocks fitness for regular events
This is for data which has a fundamental "tick" length, so that all
measured values are multiples of this tick length. In each tick, there
are either zero or one counts.
Parameters
----------
dt : float
tick rate for data
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, dt, p0=0.05, gamma=None, ncp_prior=None):
self.dt = dt
super().__init__(p0, gamma, ncp_prior)
def validate_input(self, t, x, sigma):
t, x, sigma = super().validate_input(t, x, sigma)
if not np.all((x == 0) | (x == 1)):
raise ValueError("Regular events must have only 0 and 1 in x")
return t, x, sigma
def fitness(self, T_k, N_k):
# Eq. 75 of Scargle 2012
M_k = T_k / self.dt
N_over_M = N_k / M_k
eps = 1E-8
if np.any(N_over_M > 1 + eps):
warnings.warn('regular events: N/M > 1. '
'Is the time step correct?', AstropyUserWarning)
one_m_NM = 1 - N_over_M
N_over_M[N_over_M <= 0] = 1
one_m_NM[one_m_NM <= 0] = 1
return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM)
class PointMeasures(FitnessFunc):
r"""Bayesian blocks fitness for point measures
Parameters
----------
p0 : float (optional)
False alarm probability, used to compute the prior on :math:`N_{\rm
blocks}` (see eq. 21 of Scargle 2012). If gamma is specified, p0 is
ignored.
ncp_prior : float (optional)
If specified, use the value of ``ncp_prior`` to compute the prior as
above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt
gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are
ignored.
"""
def __init__(self, p0=0.05, gamma=None, ncp_prior=None):
super().__init__(p0, gamma, ncp_prior)
def fitness(self, a_k, b_k):
# eq. 41 from Scargle 2012
return (b_k * b_k) / (4 * a_k)
def validate_input(self, t, x, sigma):
if x is None:
raise ValueError("x must be specified for point measures")
return super().validate_input(t, x, sigma)
|
440649fdf63a372dee076e0080ae4c4becff790c28800338fe92f6dd569c68f4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements functions and classes for spatial statistics.
"""
import numpy as np
import math
class RipleysKEstimator:
"""
Estimators for Ripley's K function for two-dimensional spatial data.
See [1]_, [2]_, [3]_, [4]_, [5]_ for detailed mathematical and
practical aspects of those estimators.
Parameters
----------
area : float
Area of study from which the points where observed.
x_max, y_max : float, float, optional
Maximum rectangular coordinates of the area of study.
Required if ``mode == 'translation'`` or ``mode == ohser``.
x_min, y_min : float, float, optional
Minimum rectangular coordinates of the area of study.
Required if ``mode == 'variable-width'`` or ``mode == ohser``.
Examples
--------
>>> import numpy as np
>>> from matplotlib import pyplot as plt # doctest: +SKIP
>>> from astropy.stats import RipleysKEstimator
>>> z = np.random.uniform(low=5, high=10, size=(100, 2))
>>> Kest = RipleysKEstimator(area=25, x_max=10, y_max=10,
... x_min=5, y_min=5)
>>> r = np.linspace(0, 2.5, 100)
>>> plt.plot(r, Kest.poisson(r)) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='none')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='translation')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ohser')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='var-width')) # doctest: +SKIP
>>> plt.plot(r, Kest(data=z, radii=r, mode='ripley')) # doctest: +SKIP
References
----------
.. [1] Peebles, P.J.E. *The large scale structure of the universe*.
<http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1980lssu.book.....P&db_key=AST>
.. [2] Spatial descriptive statistics.
<https://en.wikipedia.org/wiki/Spatial_descriptive_statistics>
.. [3] Package spatstat.
<https://cran.r-project.org/web/packages/spatstat/spatstat.pdf>
.. [4] Cressie, N.A.C. (1991). Statistics for Spatial Data,
Wiley, New York.
.. [5] Stoyan, D., Stoyan, H. (1992). Fractals, Random Shapes and
Point Fields, Akademie Verlag GmbH, Chichester.
"""
def __init__(self, area, x_max=None, y_max=None, x_min=None, y_min=None):
self.area = area
self.x_max = x_max
self.y_max = y_max
self.x_min = x_min
self.y_min = y_min
@property
def area(self):
return self._area
@area.setter
def area(self, value):
if isinstance(value, (float, int)) and value > 0:
self._area = value
else:
raise ValueError('area is expected to be a positive number. '
'Got {}.'.format(value))
@property
def y_max(self):
return self._y_max
@y_max.setter
def y_max(self, value):
if value is None or isinstance(value, (float, int)):
self._y_max = value
else:
raise ValueError('y_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def x_max(self):
return self._x_max
@x_max.setter
def x_max(self, value):
if value is None or isinstance(value, (float, int)):
self._x_max = value
else:
raise ValueError('x_max is expected to be a real number '
'or None. Got {}.'.format(value))
@property
def y_min(self):
return self._y_min
@y_min.setter
def y_min(self, value):
if value is None or isinstance(value, (float, int)):
self._y_min = value
else:
raise ValueError('y_min is expected to be a real number. '
'Got {}.'.format(value))
@property
def x_min(self):
return self._x_min
@x_min.setter
def x_min(self, value):
if value is None or isinstance(value, (float, int)):
self._x_min = value
else:
raise ValueError('x_min is expected to be a real number. '
'Got {}.'.format(value))
def __call__(self, data, radii, mode='none'):
return self.evaluate(data=data, radii=radii, mode=mode)
def _pairwise_diffs(self, data):
npts = len(data)
diff = np.zeros(shape=(npts * (npts - 1) // 2, 2), dtype=np.double)
k = 0
for i in range(npts - 1):
size = npts - i - 1
diff[k:k + size] = abs(data[i] - data[i+1:])
k += size
return diff
def poisson(self, radii):
"""
Evaluates the Ripley K function for the homogeneous Poisson process,
also known as Complete State of Randomness (CSR).
Parameters
----------
radii : 1D array
Set of distances in which Ripley's K function will be evaluated.
Returns
-------
output : 1D array
Ripley's K function evaluated at ``radii``.
"""
return np.pi * radii * radii
def Lfunction(self, data, radii, mode='none'):
"""
Evaluates the L function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return np.sqrt(self.evaluate(data, radii, mode=mode) / np.pi)
def Hfunction(self, data, radii, mode='none'):
"""
Evaluates the H function at ``radii``. For parameter description
see ``evaluate`` method.
"""
return self.Lfunction(data, radii, mode=mode) - radii
def evaluate(self, data, radii, mode='none'):
"""
Evaluates the Ripley K estimator for a given set of values ``radii``.
Parameters
----------
data : 2D array
Set of observed points in as a n by 2 array which will be used to
estimate Ripley's K function.
radii : 1D array
Set of distances in which Ripley's K estimator will be evaluated.
Usually, it's common to consider max(radii) < (area/2)**0.5.
mode : str
Keyword which indicates the method for edge effects correction.
Available methods are 'none', 'translation', 'ohser', 'var-width',
and 'ripley'.
* 'none'
this method does not take into account any edge effects
whatsoever.
* 'translation'
computes the intersection of rectangular areas centered at
the given points provided the upper bounds of the
dimensions of the rectangular area of study. It assumes that
all the points lie in a bounded rectangular region satisfying
x_min < x_i < x_max; y_min < y_i < y_max. A detailed
description of this method can be found on ref [4].
* 'ohser'
this method uses the isotropized set covariance function of
the window of study as a weight to correct for
edge-effects. A detailed description of this method can be
found on ref [4].
* 'var-width'
this method considers the distance of each observed point to
the nearest boundary of the study window as a factor to
account for edge-effects. See [3] for a brief description of
this method.
* 'ripley'
this method is known as Ripley's edge-corrected estimator.
The weight for edge-correction is a function of the
proportions of circumferences centered at each data point
which crosses another data point of interest. See [3] for
a detailed description of this method.
Returns
-------
ripley : 1D array
Ripley's K function estimator evaluated at ``radii``.
"""
data = np.asarray(data)
if not data.shape[1] == 2:
raise ValueError('data must be an n by 2 array, where n is the '
'number of observed points.')
npts = len(data)
ripley = np.zeros(len(radii))
if mode == 'none':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
for r in range(len(radii)):
ripley[r] = (distances < radii[r]).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
# eq. 15.11 Stoyan book page 283
elif mode == 'translation':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
intersec_area = (((self.x_max - self.x_min) - diff[:, 0]) *
((self.y_max - self.y_min) - diff[:, 1]))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / intersec_area) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Stoyan book page 123 and eq 15.13
elif mode == 'ohser':
diff = self._pairwise_diffs(data)
distances = np.hypot(diff[:, 0], diff[:, 1])
a = self.area
b = max((self.y_max - self.y_min) / (self.x_max - self.x_min),
(self.x_max - self.x_min) / (self.y_max - self.y_min))
x = distances / math.sqrt(a / b)
u = np.sqrt((x * x - 1) * (x > 1))
v = np.sqrt((x * x - b ** 2) * (x < math.sqrt(b ** 2 + 1)) * (x > b))
c1 = np.pi - 2 * x * (1 + 1 / b) + x * x / b
c2 = 2 * np.arcsin((1 / x) * (x > 1)) - 1 / b - 2 * (x - u)
c3 = (2 * np.arcsin(((b - u * v) / (x * x))
* (x > b) * (x < math.sqrt(b ** 2 + 1)))
+ 2 * u + 2 * v / b - b - (1 + x * x) / b)
cov_func = ((a / np.pi) * (c1 * (x >= 0) * (x <= 1)
+ c2 * (x > 1) * (x <= b)
+ c3 * (b < x) * (x < math.sqrt(b ** 2 + 1))))
for r in range(len(radii)):
dist_indicator = distances < radii[r]
ripley[r] = ((1 / cov_func) * dist_indicator).sum()
ripley = (self.area**2 / (npts * (npts - 1))) * 2 * ripley
# Cressie book eq 8.2.20 page 616
elif mode == 'var-width':
lt_dist = np.minimum(np.minimum(self.x_max - data[:, 0], self.y_max - data[:, 1]),
np.minimum(data[:, 0] - self.x_min, data[:, 1] - self.y_min))
for r in range(len(radii)):
for i in range(npts):
for j in range(npts):
if i != j:
diff = abs(data[i] - data[j])
dist = math.sqrt((diff * diff).sum())
if dist < radii[r] < lt_dist[i]:
ripley[r] = ripley[r] + 1
lt_dist_sum = (lt_dist > radii[r]).sum()
if not lt_dist_sum == 0:
ripley[r] = ripley[r] / lt_dist_sum
ripley = self.area * ripley / npts
# Cressie book eq 8.4.22 page 640
elif mode == 'ripley':
hor_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
ver_dist = np.zeros(shape=(npts * (npts - 1)) // 2,
dtype=np.double)
for k in range(npts - 1):
min_hor_dist = min(self.x_max - data[k][0],
data[k][0] - self.x_min)
min_ver_dist = min(self.y_max - data[k][1],
data[k][1] - self.y_min)
start = (k * (2 * (npts - 1) - (k - 1))) // 2
end = ((k + 1) * (2 * (npts - 1) - k)) // 2
hor_dist[start: end] = min_hor_dist * np.ones(npts - 1 - k)
ver_dist[start: end] = min_ver_dist * np.ones(npts - 1 - k)
diff = self._pairwise_diffs(data)
dist = np.hypot(diff[:, 0], diff[:, 1])
dist_ind = dist <= np.hypot(hor_dist, ver_dist)
w1 = (1 - (np.arccos(np.minimum(ver_dist, dist) / dist) +
np.arccos(np.minimum(hor_dist, dist) / dist)) / np.pi)
w2 = (3 / 4 - 0.5 * (np.arccos(ver_dist / dist * ~dist_ind) +
np.arccos(hor_dist / dist * ~dist_ind)) / np.pi)
weight = dist_ind * w1 + ~dist_ind * w2
for r in range(len(radii)):
ripley[r] = ((dist < radii[r]) / weight).sum()
ripley = self.area * 2. * ripley / (npts * (npts - 1))
else:
raise ValueError('mode {} is not implemented.'.format(mode))
return ripley
|
95c51e7d88d26e440e1461e6df9fbd4032c5c4520c79f9a61e845b8b5bcb4527 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple statistical algorithms that are
straightforwardly implemented as a single python function (or family of
functions).
This module should generally not be used directly. Everything in
`__all__` is imported into `astropy.stats`, and hence that package
should be used for access.
"""
import math
import itertools
import numpy as np
from warnings import warn
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils import isiterable
from . import _stats
__all__ = ['gaussian_fwhm_to_sigma', 'gaussian_sigma_to_fwhm',
'binom_conf_interval', 'binned_binom_proportion',
'poisson_conf_interval', 'median_absolute_deviation', 'mad_std',
'signal_to_noise_oir_ccd', 'bootstrap', 'kuiper', 'kuiper_two',
'kuiper_false_positive_probability', 'cdf_from_intervals',
'interval_overlap_length', 'histogram_intervals', 'fold_intervals']
__doctest_skip__ = ['binned_binom_proportion']
__doctest_requires__ = {'binom_conf_interval': ['scipy.special'],
'poisson_conf_interval': ['scipy.special',
'scipy.optimize',
'scipy.integrate']}
gaussian_sigma_to_fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0))
"""
Factor with which to multiply Gaussian 1-sigma standard deviation to
convert it to full width at half maximum (FWHM).
"""
gaussian_fwhm_to_sigma = 1. / gaussian_sigma_to_fwhm
"""
Factor with which to multiply Gaussian full width at half maximum (FWHM)
to convert it to 1-sigma standard deviation.
"""
# TODO Note scipy dependency
def binom_conf_interval(k, n, conf=0.68269, interval='wilson'):
r"""Binomial proportion confidence interval given k successes,
n trials.
Parameters
----------
k : int or numpy.ndarray
Number of successes (0 <= ``k`` <= ``n``).
n : int or numpy.ndarray
Number of trials (``n`` > 0). If both ``k`` and ``n`` are arrays,
they must have the same shape.
conf : float in [0, 1], optional
Desired probability content of interval. Default is 0.68269,
corresponding to 1 sigma in a 1-dimensional Gaussian distribution.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used for confidence interval. See notes for details. The
``'wilson'`` and ``'jeffreys'`` intervals generally give similar
results, while 'flat' is somewhat different, especially for small
values of ``n``. ``'wilson'`` should be somewhat faster than
``'flat'`` or ``'jeffreys'``. The 'wald' interval is generally not
recommended. It is provided for comparison purposes. Default is
``'wilson'``.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``k``, ``n``.
Notes
-----
In situations where a probability of success is not known, it can
be estimated from a number of trials (N) and number of
observed successes (k). For example, this is done in Monte
Carlo experiments designed to estimate a detection efficiency. It
is simple to take the sample proportion of successes (k/N)
as a reasonable best estimate of the true probability
:math:`\epsilon`. However, deriving an accurate confidence
interval on :math:`\epsilon` is non-trivial. There are several
formulas for this interval (see [1]_). Four intervals are implemented
here:
**1. The Wilson Interval.** This interval, attributed to Wilson [2]_,
is given by
.. math::
CI_{\rm Wilson} = \frac{k + \kappa^2/2}{N + \kappa^2}
\pm \frac{\kappa n^{1/2}}{n + \kappa^2}
((\hat{\epsilon}(1 - \hat{\epsilon}) + \kappa^2/(4n))^{1/2}
where :math:`\hat{\epsilon} = k / N` and :math:`\kappa` is the
number of standard deviations corresponding to the desired
confidence interval for a *normal* distribution (for example,
1.0 for a confidence interval of 68.269%). For a
confidence interval of 100(1 - :math:`\alpha`)%,
.. math::
\kappa = \Phi^{-1}(1-\alpha/2) = \sqrt{2}{\rm erf}^{-1}(1-\alpha).
**2. The Jeffreys Interval.** This interval is derived by applying
Bayes' theorem to the binomial distribution with the
noninformative Jeffreys prior [3]_, [4]_. The noninformative Jeffreys
prior is the Beta distribution, Beta(1/2, 1/2), which has the density
function
.. math::
f(\epsilon) = \pi^{-1} \epsilon^{-1/2}(1-\epsilon)^{-1/2}.
The justification for this prior is that it is invariant under
reparameterizations of the binomial proportion.
The posterior density function is also a Beta distribution: Beta(k
+ 1/2, N - k + 1/2). The interval is then chosen so that it is
*equal-tailed*: Each tail (outside the interval) contains
:math:`\alpha`/2 of the posterior probability, and the interval
itself contains 1 - :math:`\alpha`. This interval must be
calculated numerically. Additionally, when k = 0 the lower limit
is set to 0 and when k = N the upper limit is set to 1, so that in
these cases, there is only one tail containing :math:`\alpha`/2
and the interval itself contains 1 - :math:`\alpha`/2 rather than
the nominal 1 - :math:`\alpha`.
**3. A Flat prior.** This is similar to the Jeffreys interval,
but uses a flat (uniform) prior on the binomial proportion
over the range 0 to 1 rather than the reparametrization-invariant
Jeffreys prior. The posterior density function is a Beta distribution:
Beta(k + 1, N - k + 1). The same comments about the nature of the
interval (equal-tailed, etc.) also apply to this option.
**4. The Wald Interval.** This interval is given by
.. math::
CI_{\rm Wald} = \hat{\epsilon} \pm
\kappa \sqrt{\frac{\hat{\epsilon}(1-\hat{\epsilon})}{N}}
The Wald interval gives acceptable results in some limiting
cases. Particularly, when N is very large, and the true proportion
:math:`\epsilon` is not "too close" to 0 or 1. However, as the
later is not verifiable when trying to estimate :math:`\epsilon`,
this is not very helpful. Its use is not recommended, but it is
provided here for comparison purposes due to its prevalence in
everyday practical statistics.
References
----------
.. [1] Brown, Lawrence D.; Cai, T. Tony; DasGupta, Anirban (2001).
"Interval Estimation for a Binomial Proportion". Statistical
Science 16 (2): 101-133. doi:10.1214/ss/1009213286
.. [2] Wilson, E. B. (1927). "Probable inference, the law of
succession, and statistical inference". Journal of the American
Statistical Association 22: 209-212.
.. [3] Jeffreys, Harold (1946). "An Invariant Form for the Prior
Probability in Estimation Problems". Proc. R. Soc. Lond.. A 24 186
(1007): 453-461. doi:10.1098/rspa.1946.0056
.. [4] Jeffreys, Harold (1998). Theory of Probability. Oxford
University Press, 3rd edition. ISBN 978-0198503682
Examples
--------
Integer inputs return an array with shape (2,):
>>> binom_conf_interval(4, 5, interval='wilson')
array([ 0.57921724, 0.92078259])
Arrays of arbitrary dimension are supported. The Wilson and Jeffreys
intervals give similar results, even for small k, N:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wilson')
array([[ 0. , 0.07921741, 0.21597328, 0.83333304],
[ 0.16666696, 0.42078276, 0.61736012, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='jeffreys')
array([[ 0. , 0.0842525 , 0.21789949, 0.82788246],
[ 0.17211754, 0.42218001, 0.61753691, 1. ]])
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='flat')
array([[ 0. , 0.12139799, 0.24309021, 0.73577037],
[ 0.26422963, 0.45401727, 0.61535699, 1. ]])
In contrast, the Wald interval gives poor results for small k, N.
For k = 0 or k = N, the interval always has zero length.
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald')
array([[ 0. , 0.02111437, 0.18091075, 1. ],
[ 0. , 0.37888563, 0.61908925, 1. ]])
For confidence intervals approaching 1, the Wald interval for
0 < k < N can give intervals that extend outside [0, 1]:
>>> binom_conf_interval([0, 1, 2, 5], 5, interval='wald', conf=0.99)
array([[ 0. , -0.26077835, -0.16433593, 1. ],
[ 0. , 0.66077835, 0.96433593, 1. ]])
"""
if conf < 0. or conf > 1.:
raise ValueError('conf must be between 0. and 1.')
alpha = 1. - conf
k = np.asarray(k).astype(int)
n = np.asarray(n).astype(int)
if (n <= 0).any():
raise ValueError('n must be positive')
if (k < 0).any() or (k > n).any():
raise ValueError('k must be in {0, 1, .., n}')
if interval == 'wilson' or interval == 'wald':
from scipy.special import erfinv
kappa = np.sqrt(2.) * min(erfinv(conf), 1.e10) # Avoid overflows.
k = k.astype(float)
n = n.astype(float)
p = k / n
if interval == 'wilson':
midpoint = (k + kappa ** 2 / 2.) / (n + kappa ** 2)
halflength = (kappa * np.sqrt(n)) / (n + kappa ** 2) * \
np.sqrt(p * (1 - p) + kappa ** 2 / (4 * n))
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
# Correct intervals out of range due to floating point errors.
conf_interval[conf_interval < 0.] = 0.
conf_interval[conf_interval > 1.] = 1.
else:
midpoint = p
halflength = kappa * np.sqrt(p * (1. - p) / n)
conf_interval = np.array([midpoint - halflength,
midpoint + halflength])
elif interval == 'jeffreys' or interval == 'flat':
from scipy.special import betaincinv
if interval == 'jeffreys':
lowerbound = betaincinv(k + 0.5, n - k + 0.5, 0.5 * alpha)
upperbound = betaincinv(k + 0.5, n - k + 0.5, 1. - 0.5 * alpha)
else:
lowerbound = betaincinv(k + 1, n - k + 1, 0.5 * alpha)
upperbound = betaincinv(k + 1, n - k + 1, 1. - 0.5 * alpha)
# Set lower or upper bound to k/n when k/n = 0 or 1
# We have to treat the special case of k/n being scalars,
# which is an ugly kludge
if lowerbound.ndim == 0:
if k == 0:
lowerbound = 0.
elif k == n:
upperbound = 1.
else:
lowerbound[k == 0] = 0
upperbound[k == n] = 1
conf_interval = np.array([lowerbound, upperbound])
else:
raise ValueError('Unrecognized interval: {0:s}'.format(interval))
return conf_interval
# TODO Note scipy dependency (needed in binom_conf_interval)
def binned_binom_proportion(x, success, bins=10, range=None, conf=0.68269,
interval='wilson'):
"""Binomial proportion and confidence interval in bins of a continuous
variable ``x``.
Given a set of datapoint pairs where the ``x`` values are
continuously distributed and the ``success`` values are binomial
("success / failure" or "true / false"), place the pairs into
bins according to ``x`` value and calculate the binomial proportion
(fraction of successes) and confidence interval in each bin.
Parameters
----------
x : list_like
Values.
success : list_like (bool)
Success (`True`) or failure (`False`) corresponding to each value
in ``x``. Must be same length as ``x``.
bins : int or sequence of scalars, optional
If bins is an int, it defines the number of equal-width bins
in the given range (10, by default). If bins is a sequence, it
defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths (in this case, 'range' is ignored).
range : (float, float), optional
The lower and upper range of the bins. If `None` (default),
the range is set to ``(x.min(), x.max())``. Values outside the
range are ignored.
conf : float in [0, 1], optional
Desired probability content in the confidence
interval ``(p - perr[0], p + perr[1])`` in each bin. Default is
0.68269.
interval : {'wilson', 'jeffreys', 'flat', 'wald'}, optional
Formula used to calculate confidence interval on the
binomial proportion in each bin. See `binom_conf_interval` for
definition of the intervals. The 'wilson', 'jeffreys',
and 'flat' intervals generally give similar results. 'wilson'
should be somewhat faster, while 'jeffreys' and 'flat' are
marginally superior, but differ in the assumed prior.
The 'wald' interval is generally not recommended.
It is provided for comparison purposes. Default is 'wilson'.
Returns
-------
bin_ctr : numpy.ndarray
Central value of bins. Bins without any entries are not returned.
bin_halfwidth : numpy.ndarray
Half-width of each bin such that ``bin_ctr - bin_halfwidth`` and
``bin_ctr + bins_halfwidth`` give the left and right side of each bin,
respectively.
p : numpy.ndarray
Efficiency in each bin.
perr : numpy.ndarray
2-d array of shape (2, len(p)) representing the upper and lower
uncertainty on p in each bin.
See Also
--------
binom_conf_interval : Function used to estimate confidence interval in
each bin.
Examples
--------
Suppose we wish to estimate the efficiency of a survey in
detecting astronomical sources as a function of magnitude (i.e.,
the probability of detecting a source given its magnitude). In a
realistic case, we might prepare a large number of sources with
randomly selected magnitudes, inject them into simulated images,
and then record which were detected at the end of the reduction
pipeline. As a toy example, we generate 100 data points with
randomly selected magnitudes between 20 and 30 and "observe" them
with a known detection function (here, the error function, with
50% detection probability at magnitude 25):
>>> from scipy.special import erf
>>> from scipy.stats.distributions import binom
>>> def true_efficiency(x):
... return 0.5 - 0.5 * erf((x - 25.) / 2.)
>>> mag = 20. + 10. * np.random.rand(100)
>>> detected = binom.rvs(1, true_efficiency(mag))
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20)
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('Detection efficiency vs magnitude')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
The above example uses the Wilson confidence interval to calculate
the uncertainty ``perr`` in each bin (see the definition of various
confidence intervals in `binom_conf_interval`). A commonly used
alternative is the Wald interval. However, the Wald interval can
give nonsensical uncertainties when the efficiency is near 0 or 1,
and is therefore **not** recommended. As an illustration, the
following example shows the same data as above but uses the Wald
interval rather than the Wilson interval to calculate ``perr``:
>>> bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
... interval='wald')
>>> plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
... label='estimate')
.. plot::
import numpy as np
from scipy.special import erf
from scipy.stats.distributions import binom
import matplotlib.pyplot as plt
from astropy.stats import binned_binom_proportion
def true_efficiency(x):
return 0.5 - 0.5 * erf((x - 25.) / 2.)
np.random.seed(400)
mag = 20. + 10. * np.random.rand(100)
np.random.seed(600)
detected = binom.rvs(1, true_efficiency(mag))
bins, binshw, p, perr = binned_binom_proportion(mag, detected, bins=20,
interval='wald')
plt.errorbar(bins, p, xerr=binshw, yerr=perr, ls='none', marker='o',
label='estimate')
X = np.linspace(20., 30., 1000)
plt.plot(X, true_efficiency(X), label='true efficiency')
plt.ylim(0., 1.)
plt.title('The Wald interval can give nonsensical uncertainties')
plt.xlabel('Magnitude')
plt.ylabel('Detection efficiency')
plt.legend()
plt.show()
"""
x = np.ravel(x)
success = np.ravel(success).astype(bool)
if x.shape != success.shape:
raise ValueError('sizes of x and success must match')
# Put values into a histogram (`n`). Put "successful" values
# into a second histogram (`k`) with identical binning.
n, bin_edges = np.histogram(x, bins=bins, range=range)
k, bin_edges = np.histogram(x[success], bins=bin_edges)
bin_ctr = (bin_edges[:-1] + bin_edges[1:]) / 2.
bin_halfwidth = bin_ctr - bin_edges[:-1]
# Remove bins with zero entries.
valid = n > 0
bin_ctr = bin_ctr[valid]
bin_halfwidth = bin_halfwidth[valid]
n = n[valid]
k = k[valid]
p = k / n
bounds = binom_conf_interval(k, n, conf=conf, interval=interval)
perr = np.abs(bounds - p)
return bin_ctr, bin_halfwidth, p, perr
def _check_poisson_conf_inputs(sigma, background, conflevel, name):
if sigma != 1:
raise ValueError("Only sigma=1 supported for interval {0}"
.format(name))
if background != 0:
raise ValueError("background not supported for interval {0}"
.format(name))
if conflevel is not None:
raise ValueError("conflevel not supported for interval {0}"
.format(name))
def poisson_conf_interval(n, interval='root-n', sigma=1, background=0,
conflevel=None):
r"""Poisson parameter confidence interval given observed counts
Parameters
----------
n : int or numpy.ndarray
Number of counts (0 <= ``n``).
interval : {'root-n','root-n-0','pearson','sherpagehrels','frequentist-confidence', 'kraft-burrows-nousek'}, optional
Formula used for confidence interval. See notes for details.
Default is ``'root-n'``.
sigma : float, optional
Number of sigma for confidence interval; only supported for
the 'frequentist-confidence' mode.
background : float, optional
Number of counts expected from the background; only supported for
the 'kraft-burrows-nousek' mode. This number is assumed to be determined
from a large region so that the uncertainty on its value is negligible.
conflevel : float, optional
Confidence level between 0 and 1; only supported for the
'kraft-burrows-nousek' mode.
Returns
-------
conf_interval : numpy.ndarray
``conf_interval[0]`` and ``conf_interval[1]`` correspond to the lower
and upper limits, respectively, for each element in ``n``.
Notes
-----
The "right" confidence interval to use for Poisson data is a
matter of debate. The CDF working group [recommends][pois_eb]
using root-n throughout, largely in the interest of
comprehensibility, but discusses other possibilities. The ATLAS
group also [discusses][ErrorBars] several possibilities but
concludes that no single representation is suitable for all cases.
The suggestion has also been [floated][ac12] that error bars should be
attached to theoretical predictions instead of observed data,
which this function will not help with (but it's easy; then you
really should use the square root of the theoretical prediction).
The intervals implemented here are:
**1. 'root-n'** This is a very widely used standard rule derived
from the maximum-likelihood estimator for the mean of the Poisson
process. While it produces questionable results for small n and
outright wrong results for n=0, it is standard enough that people are
(supposedly) used to interpreting these wonky values. The interval is
.. math::
CI = (n-\sqrt{n}, n+\sqrt{n})
**2. 'root-n-0'** This is identical to the above except that where
n is zero the interval returned is (0,1).
**3. 'pearson'** This is an only-slightly-more-complicated rule
based on Pearson's chi-squared rule (as [explained][pois_eb] by
the CDF working group). It also has the nice feature that
if your theory curve touches an endpoint of the interval, then your
data point is indeed one sigma away. The interval is
.. math::
CI = (n+0.5-\sqrt{n+0.25}, n+0.5+\sqrt{n+0.25})
**4. 'sherpagehrels'** This rule is used by default in the fitting
package 'sherpa'. The [documentation][sherpa_gehrels] claims it is
based on a numerical approximation published in
[Gehrels 1986][gehrels86] but it does not actually appear there.
It is symmetrical, and while the upper limits
are within about 1% of those given by 'frequentist-confidence', the
lower limits can be badly wrong. The interval is
.. math::
CI = (n-1-\sqrt{n+0.75}, n+1+\sqrt{n+0.75})
**5. 'frequentist-confidence'** These are frequentist central
confidence intervals:
.. math::
CI = (0.5 F_{\chi^2}^{-1}(\alpha;2n),
0.5 F_{\chi^2}^{-1}(1-\alpha;2(n+1)))
where :math:`F_{\chi^2}^{-1}` is the quantile of the chi-square
distribution with the indicated number of degrees of freedom and
:math:`\alpha` is the one-tailed probability of the normal
distribution (at the point given by the parameter 'sigma'). See
[Maxwell 2011][maxw11] for further details.
**6. 'kraft-burrows-nousek'** This is a Bayesian approach which allows
for the presence of a known background :math:`B` in the source signal
:math:`N`.
For a given confidence level :math:`CL` the confidence interval
:math:`[S_\mathrm{min}, S_\mathrm{max}]` is given by:
.. math::
CL = \int^{S_\mathrm{max}}_{S_\mathrm{min}} f_{N,B}(S)dS
where the function :math:`f_{N,B}` is:
.. math::
f_{N,B}(S) = C \frac{e^{-(S+B)}(S+B)^N}{N!}
and the normalization constant :math:`C`:
.. math::
C = \left[ \int_0^\infty \frac{e^{-(S+B)}(S+B)^N}{N!} dS \right] ^{-1}
= \left( \sum^N_{n=0} \frac{e^{-B}B^n}{n!} \right)^{-1}
See [KraftBurrowsNousek][kbn1991] for further details.
These formulas implement a positive, uniform prior.
[KraftBurrowsNousek][kbn1991] discuss this choice in more detail and show
that the problem is relatively insensitive to the choice of prior.
This functions has an optional dependency: Either scipy or
`mpmath <http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
Examples
--------
>>> poisson_conf_interval(np.arange(10), interval='root-n').T
array([[ 0. , 0. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='root-n-0').T
array([[ 0. , 1. ],
[ 0. , 2. ],
[ 0.58578644, 3.41421356],
[ 1.26794919, 4.73205081],
[ 2. , 6. ],
[ 2.76393202, 7.23606798],
[ 3.55051026, 8.44948974],
[ 4.35424869, 9.64575131],
[ 5.17157288, 10.82842712],
[ 6. , 12. ]])
>>> poisson_conf_interval(np.arange(10), interval='pearson').T
array([[ 0. , 1. ],
[ 0.38196601, 2.61803399],
[ 1. , 4. ],
[ 1.69722436, 5.30277564],
[ 2.43844719, 6.56155281],
[ 3.20871215, 7.79128785],
[ 4. , 9. ],
[ 4.8074176 , 10.1925824 ],
[ 5.62771868, 11.37228132],
[ 6.45861873, 12.54138127]])
>>> poisson_conf_interval(np.arange(10),
... interval='frequentist-confidence').T
array([[ 0. , 1.84102165],
[ 0.17275378, 3.29952656],
[ 0.70818544, 4.63785962],
[ 1.36729531, 5.91818583],
[ 2.08566081, 7.16275317],
[ 2.84030886, 8.38247265],
[ 3.62006862, 9.58364155],
[ 4.41852954, 10.77028072],
[ 5.23161394, 11.94514152],
[ 6.05653896, 13.11020414]])
>>> poisson_conf_interval(7,
... interval='frequentist-confidence').T
array([ 4.41852954, 10.77028072])
>>> poisson_conf_interval(10, background=1.5, conflevel=0.95,
... interval='kraft-burrows-nousek').T
array([ 3.47894005, 16.113329533]) # doctest: +FLOAT_CMP
[pois_eb]: http://www-cdf.fnal.gov/physics/statistics/notes/pois_eb.txt
[ErrorBars]: http://www.pp.rhul.ac.uk/~cowan/atlas/ErrorBars.pdf
[ac12]: http://adsabs.harvard.edu/abs/2012EPJP..127...24A
[maxw11]: http://adsabs.harvard.edu/abs/2011arXiv1102.0822M
[gehrels86]: http://adsabs.harvard.edu/abs/1986ApJ...303..336G
[sherpa_gehrels]: http://cxc.harvard.edu/sherpa4.4/statistics/#chigehrels
[kbn1991]: http://adsabs.harvard.edu/abs/1991ApJ...374..344K
"""
if not np.isscalar(n):
n = np.asanyarray(n)
if interval == 'root-n':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
elif interval == 'root-n-0':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - np.sqrt(n),
n + np.sqrt(n)])
if np.isscalar(n):
if n == 0:
conf_interval[1] = 1
else:
conf_interval[1, n == 0] = 1
elif interval == 'pearson':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n + 0.5 - np.sqrt(n + 0.25),
n + 0.5 + np.sqrt(n + 0.25)])
elif interval == 'sherpagehrels':
_check_poisson_conf_inputs(sigma, background, conflevel, interval)
conf_interval = np.array([n - 1 - np.sqrt(n + 0.75),
n + 1 + np.sqrt(n + 0.75)])
elif interval == 'frequentist-confidence':
_check_poisson_conf_inputs(1., background, conflevel, interval)
import scipy.stats
alpha = scipy.stats.norm.sf(sigma)
conf_interval = np.array([0.5 * scipy.stats.chi2(2 * n).ppf(alpha),
0.5 * scipy.stats.chi2(2 * n + 2).isf(alpha)])
if np.isscalar(n):
if n == 0:
conf_interval[0] = 0
else:
conf_interval[0, n == 0] = 0
elif interval == 'kraft-burrows-nousek':
if conflevel is None:
raise ValueError('Set conflevel for method {0}. (sigma is '
'ignored.)'.format(interval))
conflevel = np.asanyarray(conflevel)
if np.any(conflevel <= 0) or np.any(conflevel >= 1):
raise ValueError('Conflevel must be a number between 0 and 1.')
background = np.asanyarray(background)
if np.any(background < 0):
raise ValueError('Background must be >= 0.')
conf_interval = np.vectorize(_kraft_burrows_nousek,
cache=True)(n, background, conflevel)
conf_interval = np.vstack(conf_interval)
else:
raise ValueError("Invalid method for Poisson confidence intervals: "
"{}".format(interval))
return conf_interval
@deprecated_renamed_argument('a', 'data', '2.0')
def median_absolute_deviation(data, axis=None, func=None, ignore_nan=False):
"""
Calculate the median absolute deviation (MAD).
The MAD is defined as ``median(abs(a - median(a)))``.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the MADs are computed. The default (`None`) is
to compute the MAD of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis==None`` and numpy's version
is >1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad : float or `~numpy.ndarray`
The median absolute deviation of the input array. If ``axis``
is `None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
Generate random variates from a Gaussian distribution and return the
median absolute deviation for that distribution::
>>> import numpy as np
>>> from astropy.stats import median_absolute_deviation
>>> rand = np.random.RandomState(12345)
>>> from numpy.random import randn
>>> mad = median_absolute_deviation(rand.randn(1000))
>>> print(mad) # doctest: +FLOAT_CMP
0.65244241428454486
See Also
--------
mad_std
"""
if func is None:
# Check if the array has a mask and if so use np.ma.median
# See https://github.com/numpy/numpy/issues/7330 why using np.ma.median
# for normal arrays should not be done (summary: np.ma.median always
# returns an masked array even if the result should be scalar). (#4658)
if isinstance(data, np.ma.MaskedArray):
is_masked = True
func = np.ma.median
if ignore_nan:
data = np.ma.masked_invalid(data)
elif ignore_nan:
is_masked = False
func = np.nanmedian
else:
is_masked = False
func = np.median
else:
is_masked = None
data = np.asanyarray(data)
# np.nanmedian has `keepdims`, which is a good option if we're not allowing
# user-passed functions here
data_median = func(data, axis=axis)
# broadcast the median array before subtraction
if axis is not None:
if isiterable(axis):
for ax in sorted(list(axis)):
data_median = np.expand_dims(data_median, axis=ax)
else:
data_median = np.expand_dims(data_median, axis=axis)
result = func(np.abs(data - data_median), axis=axis, overwrite_input=True)
if axis is None and np.ma.isMaskedArray(result):
# return scalar version
result = result.item()
elif np.ma.isMaskedArray(result) and not is_masked:
# if the input array was not a masked array, we don't want to return a
# masked array
result = result.filled(fill_value=np.nan)
return result
def mad_std(data, axis=None, func=None, ignore_nan=False):
r"""
Calculate a robust standard deviation using the `median absolute
deviation (MAD)
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_.
The standard deviation estimator is given by:
.. math::
\sigma \approx \frac{\textrm{MAD}}{\Phi^{-1}(3/4)}
\approx 1.4826 \ \textrm{MAD}
where :math:`\Phi^{-1}(P)` is the normal inverse cumulative
distribution function evaluated at probability :math:`P = 3/4`.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis along which the robust standard deviations are computed.
The default (`None`) is to compute the robust standard deviation
of the flattened array.
func : callable, optional
The function used to compute the median. Defaults to `numpy.ma.median`
for masked arrays, otherwise to `numpy.median`.
ignore_nan : bool
Ignore NaN values (treat them as if they are not in the array) when
computing the median. This will use `numpy.ma.median` if ``axis`` is
specified, or `numpy.nanmedian` if ``axis=None`` and numpy's version is
>1.10 because nanmedian is slightly faster in this case.
Returns
-------
mad_std : float or `~numpy.ndarray`
The robust standard deviation of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
Examples
--------
>>> import numpy as np
>>> from astropy.stats import mad_std
>>> rand = np.random.RandomState(12345)
>>> madstd = mad_std(rand.normal(5, 2, (100, 100)))
>>> print(madstd) # doctest: +FLOAT_CMP
2.0232764659422626
See Also
--------
biweight_midvariance, biweight_midcovariance, median_absolute_deviation
"""
# NOTE: 1. / scipy.stats.norm.ppf(0.75) = 1.482602218505602
MAD = median_absolute_deviation(
data, axis=axis, func=func, ignore_nan=ignore_nan)
return MAD * 1.482602218505602
def signal_to_noise_oir_ccd(t, source_eps, sky_eps, dark_eps, rd, npix,
gain=1.0):
"""Computes the signal to noise ratio for source being observed in the
optical/IR using a CCD.
Parameters
----------
t : float or numpy.ndarray
CCD integration time in seconds
source_eps : float
Number of electrons (photons) or DN per second in the aperture from the
source. Note that this should already have been scaled by the filter
transmission and the quantum efficiency of the CCD. If the input is in
DN, then be sure to set the gain to the proper value for the CCD.
If the input is in electrons per second, then keep the gain as its
default of 1.0.
sky_eps : float
Number of electrons (photons) or DN per second per pixel from the sky
background. Should already be scaled by filter transmission and QE.
This must be in the same units as source_eps for the calculation to
make sense.
dark_eps : float
Number of thermal electrons per second per pixel. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
rd : float
Read noise of the CCD in electrons. If this is given in
DN or ADU, then multiply by the gain to get the value in electrons.
npix : float
Size of the aperture in pixels
gain : float, optional
Gain of the CCD. In units of electrons per DN.
Returns
----------
SNR : float or numpy.ndarray
Signal to noise ratio calculated from the inputs
"""
signal = t * source_eps * gain
noise = np.sqrt(t * (source_eps * gain + npix *
(sky_eps * gain + dark_eps)) + npix * rd ** 2)
return signal / noise
def bootstrap(data, bootnum=100, samples=None, bootfunc=None):
"""Performs bootstrap resampling on numpy arrays.
Bootstrap resampling is used to understand confidence intervals of sample
estimates. This function returns versions of the dataset resampled with
replacement ("case bootstrapping"). These can all be run through a function
or statistic to produce a distribution of values which can then be used to
find the confidence intervals.
Parameters
----------
data : numpy.ndarray
N-D array. The bootstrap resampling will be performed on the first
index, so the first index should access the relevant information
to be bootstrapped.
bootnum : int, optional
Number of bootstrap resamples
samples : int, optional
Number of samples in each resample. The default `None` sets samples to
the number of datapoints
bootfunc : function, optional
Function to reduce the resampled data. Each bootstrap resample will
be put through this function and the results returned. If `None`, the
bootstrapped data will be returned
Returns
-------
boot : numpy.ndarray
If bootfunc is None, then each row is a bootstrap resample of the data.
If bootfunc is specified, then the columns will correspond to the
outputs of bootfunc.
Examples
--------
Obtain a twice resampled array:
>>> from astropy.stats import bootstrap
>>> import numpy as np
>>> from astropy.utils import NumpyRNGContext
>>> bootarr = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2)
...
>>> bootresult # doctest: +FLOAT_CMP
array([[6., 9., 0., 6., 1., 1., 2., 8., 7., 0.],
[3., 5., 6., 3., 5., 3., 5., 8., 8., 0.]])
>>> bootresult.shape
(2, 10)
Obtain a statistic on the array
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 2, bootfunc=np.mean)
...
>>> bootresult # doctest: +FLOAT_CMP
array([4. , 4.6])
Obtain a statistic with two outputs on the array
>>> test_statistic = lambda x: (np.sum(x), np.mean(x))
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=test_statistic)
>>> bootresult # doctest: +FLOAT_CMP
array([[40. , 4. ],
[46. , 4.6],
[35. , 3.5]])
>>> bootresult.shape
(3, 2)
Obtain a statistic with two outputs on the array, keeping only the first
output
>>> bootfunc = lambda x:test_statistic(x)[0]
>>> with NumpyRNGContext(1):
... bootresult = bootstrap(bootarr, 3, bootfunc=bootfunc)
...
>>> bootresult # doctest: +FLOAT_CMP
array([40., 46., 35.])
>>> bootresult.shape
(3,)
"""
if samples is None:
samples = data.shape[0]
# make sure the input is sane
if samples < 1 or bootnum < 1:
raise ValueError("neither 'samples' nor 'bootnum' can be less than 1.")
if bootfunc is None:
resultdims = (bootnum,) + (samples,) + data.shape[1:]
else:
# test number of outputs from bootfunc, avoid single outputs which are
# array-like
try:
resultdims = (bootnum, len(bootfunc(data)))
except TypeError:
resultdims = (bootnum,)
# create empty boot array
boot = np.empty(resultdims)
for i in range(bootnum):
bootarr = np.random.randint(low=0, high=data.shape[0], size=samples)
if bootfunc is None:
boot[i] = data[bootarr]
else:
boot[i] = bootfunc(data[bootarr])
return boot
def _scipy_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server uses the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires `scipy`. This implementation will cause Overflow Errors for about
N > 100 (the exact limit depends on details of how scipy was compiled).
See `~astropy.stats.mpmath_poisson_upper_limit` for an implementation that
is slower, but can deal with arbitrarily high numbers since it is based on
the `mpmath <http://mpmath.org/>`_ library.
'''
from scipy.optimize import brentq
from scipy.integrate import quad
from math import exp
def eqn8(N, B):
n = np.arange(N + 1, dtype=np.float64)
# Create an array containing the factorials. scipy.special.factorial
# requires SciPy 0.14 (#5064) therefore this is calculated by using
# numpy.cumprod. This could be replaced by factorial again as soon as
# older SciPy are not supported anymore but the cumprod alternative
# might also be a bit faster.
factorial_n = np.ones(n.shape, dtype=np.float64)
np.cumprod(n[1:], out=factorial_n[1:])
return 1. / (exp(-B) * np.sum(np.power(B, n) / factorial_n))
# The parameters of eqn8 do not vary between calls so we can calculate the
# result once and reuse it. The same is True for the factorial of N.
# eqn7 is called hundred times so "caching" these values yields a
# significant speedup (factor 10).
eqn8_res = eqn8(N, B)
factorial_N = float(math.factorial(N))
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
return quad(eqn7, S_min, S_max, args=(N, B), limit=500)
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
return brentq(lambda x: eqn7(x, N, B) - y_S_max, 0, N - B)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out[0] - CL
S_max = brentq(func, N - B, 100)
S_min = find_s_min(S_max, N, B)
return S_min, S_max
def _mpmath_kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
Requires the `mpmath <http://mpmath.org/>`_ library. See
`~astropy.stats.scipy_poisson_upper_limit` for an implementation
that is based on scipy and evaluates faster, but runs only to about
N = 100.
'''
from mpmath import mpf, factorial, findroot, fsum, power, exp, quad
N = mpf(N)
B = mpf(B)
CL = mpf(CL)
def eqn8(N, B):
sumterms = [power(B, n) / factorial(n) for n in range(int(N) + 1)]
return 1. / (exp(-B) * fsum(sumterms))
eqn8_res = eqn8(N, B)
factorial_N = factorial(N)
def eqn7(S, N, B):
SpB = S + B
return eqn8_res * (exp(-SpB) * SpB**N / factorial_N)
def eqn9_left(S_min, S_max, N, B):
def eqn7NB(S):
return eqn7(S, N, B)
return quad(eqn7NB, [S_min, S_max])
def find_s_min(S_max, N, B):
'''
Kraft, Burrows and Nousek suggest to integrate from N-B in both
directions at once, so that S_min and S_max move similarly (see
the article for details). Here, this is implemented differently:
Treat S_max as the optimization parameters in func and then
calculate the matching s_min that has has eqn7(S_max) =
eqn7(S_min) here.
'''
y_S_max = eqn7(S_max, N, B)
if eqn7(0, N, B) >= y_S_max:
return 0.
else:
def eqn7ysmax(x):
return eqn7(x, N, B) - y_S_max
return findroot(eqn7ysmax, (N - B) / 2.)
def func(s):
s_min = find_s_min(s, N, B)
out = eqn9_left(s_min, s, N, B)
return out - CL
S_max = findroot(func, N - B, tol=1e-4)
S_min = find_s_min(S_max, N, B)
return float(S_min), float(S_max)
def _kraft_burrows_nousek(N, B, CL):
'''Upper limit on a poisson count rate
The implementation is based on Kraft, Burrows and Nousek in
`ApJ 374, 344 (1991) <http://adsabs.harvard.edu/abs/1991ApJ...374..344K>`_.
The XMM-Newton upper limit server used the same formalism.
Parameters
----------
N : int
Total observed count number
B : float
Background count rate (assumed to be known with negligible error
from a large background area).
CL : float
Confidence level (number between 0 and 1)
Returns
-------
S : source count limit
Notes
-----
This functions has an optional dependency: Either `scipy` or `mpmath
<http://mpmath.org/>`_ need to be available. (Scipy only works for
N < 100).
'''
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
try:
import mpmath
HAS_MPMATH = True
except ImportError:
HAS_MPMATH = False
if HAS_SCIPY and N <= 100:
try:
return _scipy_kraft_burrows_nousek(N, B, CL)
except OverflowError:
if not HAS_MPMATH:
raise ValueError('Need mpmath package for input numbers this '
'large.')
if HAS_MPMATH:
return _mpmath_kraft_burrows_nousek(N, B, CL)
raise ImportError('Either scipy or mpmath are required.')
def kuiper_false_positive_probability(D, N):
"""Compute the false positive probability for the Kuiper statistic.
Uses the set of four formulas described in Paltani 2004; they report
the resulting function never underestimates the false positive
probability but can be a bit high in the N=40..50 range.
(They quote a factor 1.5 at the 1e-7 level.)
Parameters
----------
D : float
The Kuiper test score.
N : float
The effective sample size.
Returns
-------
fpp : float
The probability of a score this large arising from the null hypothesis.
Notes
-----
Eq 7 of Paltani 2004 appears to incorrectly quote the original formula
(Stephens 1965). This function implements the original formula, as it
produces a result closer to Monte Carlo simulations.
References
----------
.. [1] Paltani, S., "Searching for periods in X-ray observations using
Kuiper's test. Application to the ROSAT PSPC archive",
Astronomy and Astrophysics, v.240, p.789-790, 2004.
.. [2] Stephens, M. A., "The goodness-of-fit statistic VN: distribution
and significance points", Biometrika, v.52, p.309, 1965.
"""
try:
from scipy.special import factorial, comb
except ImportError:
# Retained for backwards compatibility with older versions of scipy
# (factorial appears to have moved here in 0.14)
from scipy.misc import factorial, comb
if D < 0. or D > 2.:
raise ValueError("Must have 0<=D<=2 by definition of the Kuiper test")
if D < 2. / N:
return 1. - factorial(N) * (D - 1. / N)**(N - 1)
elif D < 3. / N:
k = -(N * D - 1.) / 2.
r = np.sqrt(k**2 - (N * D - 2.)**2 / 2.)
a, b = -k + r, -k - r
return 1 - (factorial(N - 1) * (b**(N - 1) * (1 - a) - a**(N - 1) * (1 - b))
/ N**(N - 2) / (b - a))
elif (D > 0.5 and N % 2 == 0) or (D > (N - 1.) / (2. * N) and N % 2 == 1):
# NOTE: the upper limit of this sum is taken from Stephens 1965
t = np.arange(np.floor(N * (1 - D)) + 1)
y = D + t / N
Tt = y**(t - 3) * (y**3 * N
- y**2 * t * (3 - 2 / N)
+ y * t * (t - 1) * (3 - 2 / N) / N
- t * (t - 1) * (t - 2) / N**2)
term = Tt * comb(N, t) * (1 - D - t / N)**(N - t - 1)
return term.sum()
else:
z = D * np.sqrt(N)
# When m*z>18.82 (sqrt(-log(finfo(double))/2)), exp(-2m**2z**2)
# underflows. Cutting off just before avoids triggering a (pointless)
# underflow warning if `under="warn"`.
ms = np.arange(1, 18.82 / z)
S1 = (2 * (4 * ms**2 * z**2 - 1) * np.exp(-2 * ms**2 * z**2)).sum()
S2 = (ms**2 * (4 * ms**2 * z**2 - 3) * np.exp(-2 * ms**2 * z**2)).sum()
return S1 - 8 * D / 3 * S2
def kuiper(data, cdf=lambda x: x, args=()):
"""Compute the Kuiper statistic.
Use the Kuiper statistic version of the Kolmogorov-Smirnov test to
find the probability that a sample like ``data`` was drawn from the
distribution whose CDF is given as ``cdf``.
.. warning::
This will not work correctly for distributions that are actually
discrete (Poisson, for example).
Parameters
----------
data : array-like
The data values.
cdf : callable
A callable to evaluate the CDF of the distribution being tested
against. Will be called with a vector of all values at once.
The default is a uniform distribution.
args : list-like, optional
Additional arguments to be supplied to cdf.
Returns
-------
D : float
The raw statistic.
fpp : float
The probability of a D this large arising with a sample drawn from
the distribution whose CDF is cdf.
Notes
-----
The Kuiper statistic resembles the Kolmogorov-Smirnov test in that
it is nonparametric and invariant under reparameterizations of the data.
The Kuiper statistic, in addition, is equally sensitive throughout
the domain, and it is also invariant under cyclic permutations (making
it particularly appropriate for analyzing circular data).
Returns (D, fpp), where D is the Kuiper D number and fpp is the
probability that a value as large as D would occur if data was
drawn from cdf.
.. warning::
The fpp is calculated only approximately, and it can be
as much as 1.5 times the true value.
Stephens 1970 claims this is more effective than the KS at detecting
changes in the variance of a distribution; the KS is (he claims) more
sensitive at detecting changes in the mean.
If cdf was obtained from data by fitting, then fpp is not correct and
it will be necessary to do Monte Carlo simulations to interpret D.
D should normally be independent of the shape of CDF.
References
----------
.. [1] Stephens, M. A., "Use of the Kolmogorov-Smirnov, Cramer-Von Mises
and Related Statistics Without Extensive Tables", Journal of the
Royal Statistical Society. Series B (Methodological), Vol. 32,
No. 1. (1970), pp. 115-122.
"""
data = np.sort(data)
cdfv = cdf(data, *args)
N = len(data)
D = (np.amax(cdfv - np.arange(N) / float(N)) +
np.amax((np.arange(N) + 1) / float(N) - cdfv))
return D, kuiper_false_positive_probability(D, N)
def kuiper_two(data1, data2):
"""Compute the Kuiper statistic to compare two samples.
Parameters
----------
data1 : array-like
The first set of data values.
data2 : array-like
The second set of data values.
Returns
-------
D : float
The raw test statistic.
fpp : float
The probability of obtaining two samples this different from
the same distribution.
.. warning::
The fpp is quite approximate, especially for small samples.
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1, = data1.shape
n2, = data2.shape
common_type = np.find_common_type([], [data1.dtype, data2.dtype])
if not (np.issubdtype(common_type, np.number)
and not np.issubdtype(common_type, np.complexfloating)):
raise ValueError('kuiper_two only accepts real inputs')
# nans, if any, are at the end after sorting.
if np.isnan(data1[-1]) or np.isnan(data2[-1]):
raise ValueError('kuiper_two only accepts non-nan inputs')
D = _stats.ks_2samp(np.asarray(data1, common_type),
np.asarray(data2, common_type))
Ne = len(data1) * len(data2) / float(len(data1) + len(data2))
return D, kuiper_false_positive_probability(D, Ne)
def fold_intervals(intervals):
"""Fold the weighted intervals to the interval (0,1).
Convert a list of intervals (ai, bi, wi) to a list of non-overlapping
intervals covering (0,1). Each output interval has a weight equal
to the sum of the wis of all the intervals that include it. All intervals
are interpreted modulo 1, and weights are accumulated counting
multiplicity. This is appropriate, for example, if you have one or more
blocks of observation and you want to determine how much observation
time was spent on different parts of a system's orbit (the blocks
should be converted to units of the orbital period first).
Parameters
----------
intervals : list of three-element tuples (ai,bi,wi)
The intervals to fold; ai and bi are the limits of the interval, and
wi is the weight to apply to the interval.
Returns
-------
breaks : array of floats length N
The endpoints of a set of intervals covering [0,1]; breaks[0]=0 and
breaks[-1] = 1
weights : array of floats of length N-1
The ith element is the sum of number of times the interval
breaks[i],breaks[i+1] is included in each interval times the weight
associated with that interval.
"""
r = []
breaks = set()
tot = 0
for (a, b, wt) in intervals:
tot += (np.ceil(b) - np.floor(a)) * wt
fa = a % 1
breaks.add(fa)
r.append((0, fa, -wt))
fb = b % 1
breaks.add(fb)
r.append((fb, 1, -wt))
breaks.add(0.)
breaks.add(1.)
breaks = sorted(breaks)
breaks_map = dict([(f, i) for (i, f) in enumerate(breaks)])
totals = np.zeros(len(breaks) - 1)
totals += tot
for (a, b, wt) in r:
totals[breaks_map[a]:breaks_map[b]] += wt
return np.array(breaks), totals
def cdf_from_intervals(breaks, totals):
"""Construct a callable piecewise-linear CDF from a pair of arrays.
Take a pair of arrays in the format returned by fold_intervals and
make a callable cumulative distribution function on the interval
(0,1).
Parameters
----------
breaks : array of floats of length N
The boundaries of successive intervals.
totals : array of floats of length N-1
The weight for each interval.
Returns
-------
f : callable
A cumulative distribution function corresponding to the
piecewise-constant probability distribution given by breaks, weights
"""
if breaks[0] != 0 or breaks[-1] != 1:
raise ValueError("Intervals must be restricted to [0,1]")
if np.any(np.diff(breaks) <= 0):
raise ValueError("Breaks must be strictly increasing")
if np.any(totals < 0):
raise ValueError(
"Total weights in each subinterval must be nonnegative")
if np.all(totals == 0):
raise ValueError("At least one interval must have positive exposure")
b = breaks.copy()
c = np.concatenate(((0,), np.cumsum(totals * np.diff(b))))
c /= c[-1]
return lambda x: np.interp(x, b, c, 0, 1)
def interval_overlap_length(i1, i2):
"""Compute the length of overlap of two intervals.
Parameters
----------
i1, i2 : pairs of two floats
The two intervals.
Returns
-------
l : float
The length of the overlap between the two intervals.
"""
(a, b) = i1
(c, d) = i2
if a < c:
if b < c:
return 0.
elif b < d:
return b - c
else:
return d - c
elif a < d:
if b < d:
return b - a
else:
return d - a
else:
return 0
def histogram_intervals(n, breaks, totals):
"""Histogram of a piecewise-constant weight function.
This function takes a piecewise-constant weight function and
computes the average weight in each histogram bin.
Parameters
----------
n : int
The number of bins
breaks : array of floats of length N
Endpoints of the intervals in the PDF
totals : array of floats of length N-1
Probability densities in each bin
Returns
-------
h : array of floats
The average weight for each bin
"""
h = np.zeros(n)
start = breaks[0]
for i in range(len(totals)):
end = breaks[i + 1]
for j in range(n):
ol = interval_overlap_length((float(j) / n,
float(j + 1) / n), (start, end))
h[j] += ol / (1. / n) * totals[i]
start = end
return h
|
67a6e45653402e70e5ec0fce50ec43b9aa97f806ecf166e72c7de0bd3b9431f0 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains simple functions for model selection.
"""
import numpy as np
__all__ = ['bayesian_info_criterion', 'bayesian_info_criterion_lsq',
'akaike_info_criterion', 'akaike_info_criterion_lsq']
__doctest_requires__ = {'bayesian_info_criterion_lsq': ['scipy'],
'akaike_info_criterion_lsq': ['scipy']}
def bayesian_info_criterion(log_likelihood, n_params, n_samples):
r""" Computes the Bayesian Information Criterion (BIC) given the log of the
likelihood function evaluated at the estimated (or analytically derived)
parameters, the number of parameters, and the number of samples.
The BIC is usually applied to decide whether increasing the number of free
parameters (hence, increasing the model complexity) yields significantly
better fittings. The decision is in favor of the model with the lowest
BIC.
BIC is given as
.. math::
\mathrm{BIC} = k \ln(n) - 2L,
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
When comparing two models define
:math:`\Delta \mathrm{BIC} = \mathrm{BIC}_h - \mathrm{BIC}_l`, in which
:math:`\mathrm{BIC}_h` is the higher BIC, and :math:`\mathrm{BIC}_l` is
the lower BIC. The higher is :math:`\Delta \mathrm{BIC}` the stronger is
the evidence against the model with higher BIC.
The general rule of thumb is:
:math:`0 < \Delta\mathrm{BIC} \leq 2`: weak evidence that model low is
better
:math:`2 < \Delta\mathrm{BIC} \leq 6`: moderate evidence that model low is
better
:math:`6 < \Delta\mathrm{BIC} \leq 10`: strong evidence that model low is
better
:math:`\Delta\mathrm{BIC} > 10`: very strong evidence that model low is
better
For a detailed explanation, see [1]_ - [5]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Bayesian Information Criterion.
Examples
--------
The following example was originally presented in [1]_. Consider a
Gaussian model (mu, sigma) and a t-Student model (mu, sigma, delta).
In addition, assume that the t model has presented a higher likelihood.
The question that the BIC is proposed to answer is: "Is the increase in
likelihood due to larger number of parameters?"
>>> from astropy.stats.info_theory import bayesian_info_criterion
>>> lnL_g = -176.4
>>> lnL_t = -173.0
>>> n_params_g = 2
>>> n_params_t = 3
>>> n_samples = 100
>>> bic_g = bayesian_info_criterion(lnL_g, n_params_g, n_samples)
>>> bic_t = bayesian_info_criterion(lnL_t, n_params_t, n_samples)
>>> bic_g - bic_t # doctest: +FLOAT_CMP
2.1948298140119391
Therefore, there exist a moderate evidence that the increasing in
likelihood for t-Student model is due to the larger number of parameters.
References
----------
.. [1] Richards, D. Maximum Likelihood Estimation and the Bayesian
Information Criterion.
<https://hea-www.harvard.edu/astrostat/Stat310_0910/dr_20100323_mle.pdf>
.. [2] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [3] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [4] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [5] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
return n_params*np.log(n_samples) - 2.0*log_likelihood
def bayesian_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Bayesian Information Criterion (BIC) assuming that the
observations come from a Gaussian distribution.
In this case, BIC is given as
.. math::
\mathrm{BIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + k\ln(n)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic. See [1]_ and [2]_.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
bic : float
Examples
--------
Consider the simple 1-D fitting example presented in the Astropy
modeling webpage [3]_. There, two models (Box and Gaussian) were fitted to
a source flux using the least squares statistic. However, the fittings
themselves do not tell much about which model better represents this
hypothetical source. Therefore, we are going to apply to BIC in order to
decide in favor of a model.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import bayesian_info_criterion_lsq
>>> # Generate fake data
>>> np.random.seed(0)
>>> x = np.linspace(-5., 5., 200)
>>> y = 3 * np.exp(-0.5 * (x - 1.3)**2 / 0.8**2)
>>> y += np.random.normal(0., 0.2, x.shape)
>>> # Fit the data using a Box model.
>>> # Bounds are not really needed but included here to demonstrate usage.
>>> t_init = models.Trapezoid1D(amplitude=1., x_0=0., width=1., slope=0.5,
... bounds={"x_0": (-5., 5.)})
>>> fit_t = fitting.LevMarLSQFitter()
>>> t = fit_t(t_init, x, y)
>>> # Fit the data using a Gaussian
>>> g_init = models.Gaussian1D(amplitude=1., mean=0, stddev=1.)
>>> fit_g = fitting.LevMarLSQFitter()
>>> g = fit_g(g_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_t = np.sum((t(x) - y)*(t(x) - y))
>>> ssr_g = np.sum((g(x) - y)*(g(x) - y))
>>> # Compute the bics
>>> bic_t = bayesian_info_criterion_lsq(ssr_t, 4, x.shape[0])
>>> bic_g = bayesian_info_criterion_lsq(ssr_g, 3, x.shape[0])
>>> bic_t - bic_g # doctest: +FLOAT_CMP
30.644474706065466
Hence, there is a very strong evidence that the Gaussian model has a
significantly better representation of the data than the Box model. This
is, obviously, expected since the true model is Gaussian.
References
----------
.. [1] Wikipedia. Bayesian Information Criterion.
<https://en.wikipedia.org/wiki/Bayesian_information_criterion>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [3] Astropy Models and Fitting
<http://docs.astropy.org/en/stable/modeling>
"""
return bayesian_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
def akaike_info_criterion(log_likelihood, n_params, n_samples):
r"""
Computes the Akaike Information Criterion (AIC).
Like the Bayesian Information Criterion, the AIC is a measure of
relative fitting quality which is used for fitting evaluation and model
selection. The decision is in favor of the model with the lowest AIC.
AIC is given as
.. math::
\mathrm{AIC} = 2(k - L)
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters, and :math:`L` is the log likelihood function of the model
evaluated at the maximum likelihood estimate (i. e., the parameters for
which L is maximized).
In case that the sample size is not "large enough" a correction is
applied, i.e.
.. math::
\mathrm{AIC} = 2(k - L) + \dfrac{2k(k+1)}{n - k - 1}
Rule of thumb [1]_:
:math:`\Delta\mathrm{AIC}_i = \mathrm{AIC}_i - \mathrm{AIC}_{min}`
:math:`\Delta\mathrm{AIC}_i < 2`: substantial support for model i
:math:`3 < \Delta\mathrm{AIC}_i < 7`: considerably less support for model i
:math:`\Delta\mathrm{AIC}_i > 10`: essentially none support for model i
in which :math:`\mathrm{AIC}_{min}` stands for the lower AIC among the
models which are being compared.
For detailed explanations see [1]_-[6]_.
Parameters
----------
log_likelihood : float
Logarithm of the likelihood function of the model evaluated at the
point of maxima (with respect to the parameter space).
n_params : int
Number of free parameters of the model, i.e., dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
The following example was originally presented in [2]_. Basically, two
models are being compared. One with six parameters (model 1) and another
with five parameters (model 2). Despite of the fact that model 2 has a
lower AIC, we could decide in favor of model 1 since the difference (in
AIC) between them is only about 1.0.
>>> n_samples = 121
>>> lnL1 = -3.54
>>> n1_params = 6
>>> lnL2 = -4.17
>>> n2_params = 5
>>> aic1 = akaike_info_criterion(lnL1, n1_params, n_samples)
>>> aic2 = akaike_info_criterion(lnL2, n2_params, n_samples)
>>> aic1 - aic2 # doctest: +FLOAT_CMP
0.9551029748283746
Therefore, we can strongly support the model 1 with the advantage that
it has more free parameters.
References
----------
.. [1] Cavanaugh, J. E. Model Selection Lecture II: The Akaike
Information Criterion.
<http://machinelearning102.pbworks.com/w/file/fetch/47699383/ms_lec_2_ho.pdf>
.. [2] Mazerolle, M. J. Making sense out of Akaike's Information
Criterion (AIC): its use and interpretation in model selection and
inference from ecological data.
<http://theses.ulaval.ca/archimede/fichiers/21842/apa.html>
.. [3] Wikipedia. Akaike Information Criterion.
<https://en.wikipedia.org/wiki/Akaike_information_criterion>
.. [4] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
.. [5] Liddle, A. R. Information Criteria for Astrophysical Model
Selection. 2008. <https://arxiv.org/pdf/astro-ph/0701113v2.pdf>
.. [6] Liddle, A. R. How many cosmological parameters? 2008.
<https://arxiv.org/pdf/astro-ph/0401198v3.pdf>
"""
# Correction in case of small number of observations
if n_samples/float(n_params) >= 40.0:
aic = 2.0 * (n_params - log_likelihood)
else:
aic = (2.0 * (n_params - log_likelihood) +
2.0 * n_params * (n_params + 1.0) /
(n_samples - n_params - 1.0))
return aic
def akaike_info_criterion_lsq(ssr, n_params, n_samples):
r"""
Computes the Akaike Information Criterion assuming that the observations
are Gaussian distributed.
In this case, AIC is given as
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k
In case that the sample size is not "large enough", a correction is
applied, i.e.
.. math::
\mathrm{AIC} = n\ln\left(\dfrac{\mathrm{SSR}}{n}\right) + 2k +
\dfrac{2k(k+1)}{n-k-1}
in which :math:`n` is the sample size, :math:`k` is the number of free
parameters and :math:`\mathrm{SSR}` stands for the sum of squared residuals
between model and data.
This is applicable, for instance, when the parameters of a model are
estimated using the least squares statistic.
Parameters
----------
ssr : float
Sum of squared residuals (SSR) between model and data.
n_params : int
Number of free parameters of the model, i.e., the dimension of the
parameter space.
n_samples : int
Number of observations.
Returns
-------
aic : float
Akaike Information Criterion.
Examples
--------
This example is based on Astropy Modeling webpage, Compound models
section.
>>> import numpy as np
>>> from astropy.modeling import models, fitting
>>> from astropy.stats.info_theory import akaike_info_criterion_lsq
>>> np.random.seed(42)
>>> # Generate fake data
>>> g1 = models.Gaussian1D(.1, 0, 0.2) # changed this to noise level
>>> g2 = models.Gaussian1D(.1, 0.3, 0.2) # and added another Gaussian
>>> g3 = models.Gaussian1D(2.5, 0.5, 0.1)
>>> x = np.linspace(-1, 1, 200)
>>> y = g1(x) + g2(x) + g3(x) + np.random.normal(0., 0.2, x.shape)
>>> # Fit with three Gaussians
>>> g3_init = (models.Gaussian1D(.1, 0, 0.1)
... + models.Gaussian1D(.1, 0.2, 0.15)
... + models.Gaussian1D(2., .4, 0.1))
>>> fitter = fitting.LevMarLSQFitter()
>>> g3_fit = fitter(g3_init, x, y)
>>> # Fit with two Gaussians
>>> g2_init = (models.Gaussian1D(.1, 0, 0.1) +
... models.Gaussian1D(2, 0.5, 0.1))
>>> g2_fit = fitter(g2_init, x, y)
>>> # Fit with only one Gaussian
>>> g1_init = models.Gaussian1D(amplitude=2., mean=0.3, stddev=.5)
>>> g1_fit = fitter(g1_init, x, y)
>>> # Compute the mean squared errors
>>> ssr_g3 = np.sum((g3_fit(x) - y)**2.0)
>>> ssr_g2 = np.sum((g2_fit(x) - y)**2.0)
>>> ssr_g1 = np.sum((g1_fit(x) - y)**2.0)
>>> akaike_info_criterion_lsq(ssr_g3, 9, x.shape[0]) # doctest: +FLOAT_CMP
-660.41075962620482
>>> akaike_info_criterion_lsq(ssr_g2, 6, x.shape[0]) # doctest: +FLOAT_CMP
-662.83834510232043
>>> akaike_info_criterion_lsq(ssr_g1, 3, x.shape[0]) # doctest: +FLOAT_CMP
-647.47312032659499
Hence, from the AIC values, we would prefer to choose the model g2_fit.
However, we can considerably support the model g3_fit, since the
difference in AIC is about 2.4. We should reject the model g1_fit.
References
----------
.. [1] Hu, S. Akaike Information Criterion.
<http://www4.ncsu.edu/~shu3/Presentation/AIC.pdf>
.. [2] Origin Lab. Comparing Two Fitting Functions.
<http://www.originlab.com/doc/Origin-Help/PostFit-CompareFitFunc>
"""
return akaike_info_criterion(-0.5 * n_samples * np.log(ssr / n_samples),
n_params, n_samples)
|
58eafad7dd30f2632b69c0b434b36b142119a88eae6136d6e54a55de700bbad2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains functions for computing robust statistics using
Tukey's biweight function.
"""
import numpy as np
from .funcs import median_absolute_deviation
from astropy.utils.decorators import deprecated_renamed_argument
__all__ = ['biweight_location', 'biweight_scale', 'biweight_midvariance',
'biweight_midcovariance', 'biweight_midcorrelation']
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_location(data, c=6.0, M=None, axis=None):
r"""
Compute the biweight location.
The biweight location is a robust statistic for determining the
central location of a distribution. It is given by:
.. math::
\zeta_{biloc}= M + \frac{\Sigma_{|u_i|<1} \ (x_i - M) (1 - u_i^2)^2}
{\Sigma_{|u_i|<1} \ (1 - u_i^2)^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input initial location guess) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight location tuning constant ``c`` is typically 6.0 (the
default).
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 6.0).
M : float or array-like, optional
Initial guess for the location. If ``M`` is a scalar value,
then its value will be used for the entire array (or along each
``axis``, if specified). If ``M`` is an array, then its must be
an array containing the initial location estimate along each
``axis`` of the input array. If `None` (default), then the
median of the input array will be used (or along each ``axis``,
if specified).
axis : int, optional
The axis along which the biweight locations are computed. If
`None` (default), then the biweight location of the flattened
input array will be computed.
Returns
-------
biweight_location : float or `~numpy.ndarray`
The biweight location of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwloc.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight location of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_location
>>> rand = np.random.RandomState(12345)
>>> biloc = biweight_location(rand.randn(1000))
>>> print(biloc) # doctest: +FLOAT_CMP
-0.0175741540445
"""
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = np.median(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis)
if axis is None and mad == 0.:
return M # return median if data is a constant array
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
const_mask = (mad == 0.)
mad[const_mask] = 1. # prevent divide by zero
u = d / (c * mad)
# now remove the outlier points
mask = (np.abs(u) >= 1)
u = (1 - u ** 2) ** 2
u[mask] = 0
# along the input axis if data is constant, d will be zero, thus
# the median value will be returned along that axis
return M.squeeze() + (d * u).sum(axis=axis) / u.sum(axis=axis)
def biweight_scale(data, c=9.0, M=None, axis=None, modify_sample_size=False):
r"""
Compute the biweight scale.
The biweight scale is a robust statistic for determining the
standard deviation of a distribution. It is the square root of the
`biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_.
It is given by:
.. math::
\zeta_{biscl} = \sqrt{n} \ \frac{\sqrt{\Sigma_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4}} {|(\Sigma_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))|}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of biweight scale, :math:`n` is the
total number of points in the array (or along the input ``axis``, if
specified). That definition is used if ``modify_sample_size`` is
`False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \Sigma_{|u_i| < 1} \ 1
which results in a value closer to the true standard deviation for
small sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : int, optional
The axis along which the biweight scales are computed. If
`None` (default), then the biweight scale of the flattened input
array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
scale. If `True`, then the sample size is reduced to correct
for any rejected values (i.e. the sample size used includes only
the non-rejected values), which results in a value closer to the
true standard deviation for small sample sizes or for a large
number of rejected values.
Returns
-------
biweight_scale : float or `~numpy.ndarray`
The biweight scale of the input data. If ``axis`` is `None`
then a scalar will be returned, otherwise a `~numpy.ndarray`
will be returned.
See Also
--------
biweight_midvariance, biweight_midcovariance, biweight_location, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
.. [2] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwscale.htm
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight scale of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_scale
>>> rand = np.random.RandomState(12345)
>>> biscl = biweight_scale(rand.randn(1000))
>>> print(biscl) # doctest: +FLOAT_CMP
0.986726249291
"""
return np.sqrt(
biweight_midvariance(data, c=c, M=M, axis=axis,
modify_sample_size=modify_sample_size))
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_midvariance(data, c=9.0, M=None, axis=None,
modify_sample_size=False):
r"""
Compute the biweight midvariance.
The biweight midvariance is a robust statistic for determining the
variance of a distribution. Its square root is a robust estimator
of scale (i.e. standard deviation). It is given by:
.. math::
\zeta_{bivar} = n \ \frac{\Sigma_{|u_i| < 1} \
(x_i - M)^2 (1 - u_i^2)^4} {(\Sigma_{|u_i| < 1} \
(1 - u_i^2) (1 - 5u_i^2))^2}
where :math:`x` is the input data, :math:`M` is the sample median
(or the input location) and :math:`u_i` is given by:
.. math::
u_{i} = \frac{(x_i - M)}{c * MAD}
where :math:`c` is the tuning constant and :math:`MAD` is the
`median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_. The
biweight midvariance tuning constant ``c`` is typically 9.0 (the
default).
For the standard definition of `biweight midvariance
<https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance>`_,
:math:`n` is the total number of points in the array (or along the
input ``axis``, if specified). That definition is used if
``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of points for which :math:`|u_i| < 1` (i.e. the total number
of non-rejected values), i.e.
.. math::
n = \Sigma_{|u_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified).
axis : int, optional
The axis along which the biweight midvariances are computed. If
`None` (default), then the biweight midvariance of the flattened
input array will be computed.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midvariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true variance for small sample sizes or for a
large number of rejected values.
Returns
-------
biweight_midvariance : float or `~numpy.ndarray`
The biweight midvariance of the input data. If ``axis`` is
`None` then a scalar will be returned, otherwise a
`~numpy.ndarray` will be returned.
See Also
--------
biweight_midcovariance, biweight_midcorrelation, astropy.stats.mad_std, astropy.stats.median_absolute_deviation
References
----------
.. [1] https://en.wikipedia.org/wiki/Robust_measures_of_scale#The_biweight_midvariance
.. [2] Beers, Flynn, and Gebhardt (1990; AJ 100, 32) (http://adsabs.harvard.edu/abs/1990AJ....100...32B)
Examples
--------
Generate random variates from a Gaussian distribution and return the
biweight midvariance of the distribution:
>>> import numpy as np
>>> from astropy.stats import biweight_midvariance
>>> rand = np.random.RandomState(12345)
>>> bivar = biweight_midvariance(rand.randn(1000))
>>> print(bivar) # doctest: +FLOAT_CMP
0.97362869104
"""
data = np.asanyarray(data).astype(np.float64)
if M is None:
M = np.median(data, axis=axis)
if axis is not None:
M = np.expand_dims(M, axis=axis)
# set up the differences
d = data - M
# set up the weighting
mad = median_absolute_deviation(data, axis=axis)
if axis is None and mad == 0.:
return 0. # return zero if data is a constant array
if axis is not None:
mad = np.expand_dims(mad, axis=axis)
const_mask = (mad == 0.)
mad[const_mask] = 1. # prevent divide by zero
u = d / (c * mad)
# now remove the outlier points
mask = np.abs(u) < 1
u = u ** 2
if modify_sample_size:
n = mask.sum(axis=axis)
else:
if axis is None:
n = data.size
else:
n = data.shape[axis]
f1 = d * d * (1. - u)**4
f1[~mask] = 0.
f1 = f1.sum(axis=axis)
f2 = (1. - u) * (1. - 5.*u)
f2[~mask] = 0.
f2 = np.abs(f2.sum(axis=axis))**2
return n * f1 / f2
@deprecated_renamed_argument('a', 'data', '2.0')
def biweight_midcovariance(data, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcovariance between pairs of multiple
variables.
The biweight midcovariance is a robust and resistant estimator of
the covariance between two variables.
This function computes the biweight midcovariance between all pairs
of the input variables (rows) in the input data. The output array
will have a shape of (N_variables, N_variables). The diagonal
elements will be the biweight midvariances of each input variable
(see :func:`biweight_midvariance`). The off-diagonal elements will
be the biweight midcovariances between each pair of input variables.
For example, if the input array ``data`` contains three variables
(rows) ``x``, ``y``, and ``z``, the output `~numpy.ndarray`
midcovariance matrix will be:
.. math::
\begin{pmatrix}
\zeta_{xx} & \zeta_{xy} & \zeta_{xz} \\
\zeta_{yx} & \zeta_{yy} & \zeta_{yz} \\
\zeta_{zx} & \zeta_{zy} & \zeta_{zz}
\end{pmatrix}
where :math:`\zeta_{xx}`, :math:`\zeta_{yy}`, and :math:`\zeta_{zz}`
are the biweight midvariances of each variable. The biweight
midcovariance between :math:`x` and :math:`y` is :math:`\zeta_{xy}`
(:math:`= \zeta_{yx}`). The biweight midcovariance between
:math:`x` and :math:`z` is :math:`\zeta_{xz}` (:math:`=
\zeta_{zx}`). The biweight midcovariance between :math:`y` and
:math:`z` is :math:`\zeta_{yz}` (:math:`= \zeta_{zy}`).
The biweight midcovariance between two variables :math:`x` and
:math:`y` is given by:
.. math::
\zeta_{xy} = n \ \frac{\Sigma_{|u_i| < 1, \ |v_i| < 1} \
(x_i - M_x) (1 - u_i^2)^2 (y_i - M_y) (1 - v_i^2)^2}
{(\Sigma_{|u_i| < 1} \ (1 - u_i^2) (1 - 5u_i^2))
(\Sigma_{|v_i| < 1} \ (1 - v_i^2) (1 - 5v_i^2))}
where :math:`M_x` and :math:`M_y` are the medians (or the input
locations) of the two variables and :math:`u_i` and :math:`v_i` are
given by:
.. math::
u_{i} = \frac{(x_i - M_x)}{c * MAD_x}
v_{i} = \frac{(y_i - M_y)}{c * MAD_y}
where :math:`c` is the biweight tuning constant and :math:`MAD_x`
and :math:`MAD_y` are the `median absolute deviation
<https://en.wikipedia.org/wiki/Median_absolute_deviation>`_ of the
:math:`x` and :math:`y` variables. The biweight midvariance tuning
constant ``c`` is typically 9.0 (the default).
For the standard definition of biweight midcovariance :math:`n` is
the total number of observations of each variable. That definition
is used if ``modify_sample_size`` is `False`, which is the default.
However, if ``modify_sample_size = True``, then :math:`n` is the
number of observations for which :math:`|u_i| < 1` and :math:`|v_i|
< 1`, i.e.
.. math::
n = \Sigma_{|u_i| < 1, \ |v_i| < 1} \ 1
which results in a value closer to the true variance for small
sample sizes or for a large number of rejected values.
Parameters
----------
data : 2D or 1D array-like
Input data either as a 2D or 1D array. For a 2D array, it
should have a shape (N_variables, N_observations). A 1D array
may be input for observations of a single variable, in which
case the biweight midvariance will be calculated (no
covariance). Each row of ``data`` represents a variable, and
each column a single observation of all those variables (same as
the `numpy.cov` convention).
c : float, optional
Tuning constant for the biweight estimator (default = 9.0).
M : float or 1D array-like, optional
The location estimate of each variable, either as a scalar or
array. If ``M`` is an array, then its must be a 1D array
containing the location estimate of each row (i.e. ``a.ndim``
elements). If ``M`` is a scalar value, then its value will be
used for each variable (row). If `None` (default), then the
median of each variable (row) will be used.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of observations of each variable, which follows the
standard definition of biweight midcovariance. If `True`, then
the sample size is reduced to correct for any rejected values
(see formula above), which results in a value closer to the true
covariance for small sample sizes or for a large number of
rejected values.
Returns
-------
biweight_midcovariance : `~numpy.ndarray`
A 2D array representing the biweight midcovariances between each
pair of the variables (rows) in the input array. The output
array will have a shape of (N_variables, N_variables). The
diagonal elements will be the biweight midvariances of each
input variable. The off-diagonal elements will be the biweight
midcovariances between each pair of input variables.
See Also
--------
biweight_midvariance, biweight_midcorrelation, biweight_scale, biweight_location
References
----------
.. [1] http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/biwmidc.htm
Examples
--------
Compute the biweight midcovariance between two random variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcovariance
>>> # Generate two random variables x and y
>>> rng = np.random.RandomState(1)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> # Calculate the biweight midcovariances between x and y
>>> bicov = biweight_midcovariance([x, y])
>>> print(bicov) # doctest: +FLOAT_CMP
[[ 0.82483155 -0.18961219]
[-0.18961219 9.80265764]]
>>> # Print standard deviation estimates
>>> print(np.sqrt(bicov.diagonal())) # doctest: +FLOAT_CMP
[ 0.90820237 3.13091961]
"""
data = np.asanyarray(data).astype(np.float64)
# ensure data is 2D
if data.ndim == 1:
data = data[np.newaxis, :]
if data.ndim != 2:
raise ValueError('The input array must be 2D or 1D.')
# estimate location if not given
if M is None:
M = np.median(data, axis=1)
M = np.asanyarray(M)
if M.ndim > 1:
raise ValueError('M must be a scalar or 1D array.')
# set up the differences
d = (data.T - M).T
# set up the weighting
mad = median_absolute_deviation(data, axis=1)
const_mask = (mad == 0.)
mad[const_mask] = 1. # prevent divide by zero
u = (d.T / (c * mad)).T
# now remove the outlier points
mask = np.abs(u) < 1
u = u ** 2
if modify_sample_size:
maskf = mask.astype(float)
n = np.inner(maskf, maskf)
else:
n = data[0].size
usub1 = (1. - u)
usub5 = (1. - 5. * u)
usub1[~mask] = 0.
numerator = d * usub1 ** 2
denominator = (usub1 * usub5).sum(axis=1)[:, np.newaxis]
numerator_matrix = np.dot(numerator, numerator.T)
denominator_matrix = np.dot(denominator, denominator.T)
return n * (numerator_matrix / denominator_matrix)
def biweight_midcorrelation(x, y, c=9.0, M=None, modify_sample_size=False):
r"""
Compute the biweight midcorrelation between two variables.
The `biweight midcorrelation
<https://en.wikipedia.org/wiki/Biweight_midcorrelation>`_ is a
measure of similarity between samples. It is given by:
.. math::
r_{bicorr} = \frac{\zeta_{xy}}{\sqrt{\zeta_{xx} \ \zeta_{yy}}}
where :math:`\zeta_{xx}` is the biweight midvariance of :math:`x`,
:math:`\zeta_{yy}` is the biweight midvariance of :math:`y`, and
:math:`\zeta_{xy}` is the biweight midcovariance of :math:`x` and
:math:`y`.
Parameters
----------
x, y : 1D array-like
Input arrays for the two variables. ``x`` and ``y`` must be 1D
arrays and have the same number of elements.
c : float, optional
Tuning constant for the biweight estimator (default = 9.0). See
`biweight_midcovariance` for more details.
M : float or array-like, optional
The location estimate. If ``M`` is a scalar value, then its
value will be used for the entire array (or along each ``axis``,
if specified). If ``M`` is an array, then its must be an array
containing the location estimate along each ``axis`` of the
input array. If `None` (default), then the median of the input
array will be used (or along each ``axis``, if specified). See
`biweight_midcovariance` for more details.
modify_sample_size : bool, optional
If `False` (default), then the sample size used is the total
number of elements in the array (or along the input ``axis``, if
specified), which follows the standard definition of biweight
midcovariance. If `True`, then the sample size is reduced to
correct for any rejected values (i.e. the sample size used
includes only the non-rejected values), which results in a value
closer to the true midcovariance for small sample sizes or for a
large number of rejected values. See `biweight_midcovariance`
for more details.
Returns
-------
biweight_midcorrelation : float
The biweight midcorrelation between ``x`` and ``y``.
See Also
--------
biweight_scale, biweight_midvariance, biweight_midcovariance, biweight_location
References
----------
.. [1] https://en.wikipedia.org/wiki/Biweight_midcorrelation
Examples
--------
Calculate the biweight midcorrelation between two variables:
>>> import numpy as np
>>> from astropy.stats import biweight_midcorrelation
>>> rng = np.random.RandomState(12345)
>>> x = rng.normal(0, 1, 200)
>>> y = rng.normal(0, 3, 200)
>>> # Introduce an obvious outlier
>>> x[0] = 30.0
>>> bicorr = biweight_midcorrelation(x, y)
>>> print(bicorr) # doctest: +FLOAT_CMP
-0.0495780713907
"""
x = np.asanyarray(x)
y = np.asanyarray(y)
if x.ndim != 1:
raise ValueError('x must be a 1D array.')
if y.ndim != 1:
raise ValueError('y must be a 1D array.')
if x.shape != y.shape:
raise ValueError('x and y must have the same shape.')
bicorr = biweight_midcovariance([x, y], c=c, M=M,
modify_sample_size=modify_sample_size)
return bicorr[0, 1] / (np.sqrt(bicorr[0, 0] * bicorr[1, 1]))
|
76a2b6331991af366e4eb1c04a416f54e32e2dc0bee53ad152a10cf23f66d269 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Methods for selecting the bin width of histograms
Ported from the astroML project: http://astroML.org/
"""
import numpy as np
from . import bayesian_blocks
__all__ = ['histogram', 'scott_bin_width', 'freedman_bin_width',
'knuth_bin_width', 'calculate_bin_edges']
def calculate_bin_edges(a, bins=10, range=None, weights=None):
"""
Calculate histogram bin edges like `numpy.histogram_bin_edges`.
Parameters
----------
a : array_like
Input data. The bin edges are calculated over the flattened array.
bins : int or list or str (optional)
If ``bins`` is an int, it is the number of bins. If it is a list
it is taken to be the bin edges. If it is a string, it must be one
of 'blocks', 'knuth', 'scott' or 'freedman'. See
`~astropy.stats.histogram` for a description of each method.
range : tuple or None (optional)
The minimum and maximum range for the histogram. If not specified,
it will be (a.min(), a.max()). However, if bins is a list it is
returned unmodified regardless of the range argument.
weights : array_like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges, though they may be used in the future as new methods are added.
"""
# if range is specified, we need to truncate the data for
# the bin-finding routines
if range is not None:
a = a[(a >= range[0]) & (a <= range[1])]
# if bins is a string, first compute bin edges with the desired heuristic
if isinstance(bins, str):
a = np.asarray(a).ravel()
# TODO: if weights is specified, we need to modify things.
# e.g. we could use point measures fitness for Bayesian blocks
if weights is not None:
raise NotImplementedError("weights are not yet supported "
"for the enhanced histogram")
if bins == 'blocks':
bins = bayesian_blocks(a)
elif bins == 'knuth':
da, bins = knuth_bin_width(a, True)
elif bins == 'scott':
da, bins = scott_bin_width(a, True)
elif bins == 'freedman':
da, bins = freedman_bin_width(a, True)
else:
raise ValueError("unrecognized bin code: '{}'".format(bins))
if range:
# Check that the upper and lower edges are what was requested.
# The current implementation of the bin width estimators does not
# guarantee this, it only ensures that data outside the range is
# excluded from calculation of the bin widths.
if bins[0] != range[0]:
bins[0] = range[0]
if bins[-1] != range[1]:
bins[-1] = range[1]
elif np.ndim(bins) == 0:
# Number of bins was given
try:
# This works for numpy 1.15 or later
bins = np.histogram_bin_edges(a, bins,
range=range,
weights=weights)
except AttributeError:
# In this case only (integer number of bins, older version of
# numpy) then calculate like the bin edges manually.
if range is not None:
lower, upper = range
else:
lower = a.min()
upper = a.max()
bins = np.linspace(lower, upper, bins + 1, endpoint=True)
return bins
def histogram(a, bins=10, range=None, weights=None, **kwargs):
"""Enhanced histogram function, providing adaptive binnings
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as ``numpy.histogram()``.
Parameters
----------
a : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-Diaconis rule to determine bins
range : tuple or None (optional)
the minimum and maximum range for the histogram. If not specified,
it will be (x.min(), x.max())
weights : array_like, optional
An array the same shape as ``a``. If given, the histogram accumulates
the value of the weight corresponding to ``a`` instead of returning the
count of values. This argument does not affect determination of bin
edges.
other keyword arguments are described in numpy.histogram().
Returns
-------
hist : array
The values of the histogram. See ``density`` and ``weights`` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
numpy.histogram
"""
bins = calculate_bin_edges(a, bins=bins, range=range, weights=weights)
# Now we call numpy's histogram with the resulting bin edges
return np.histogram(a, bins=bins, range=range, weights=weights, **kwargs)
def scott_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using Scott's rule
Scott's rule is a normal reference rule: it minimizes the integrated
mean squared error in the bin approximation under the assumption that the
data is approximately Gaussian.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using Scott's rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{3.5\sigma}{n^{1/3}}
where :math:`\sigma` is the standard deviation of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] Scott, David W. (1979). "On optimal and data-based histograms".
Biometricka 66 (3): 605-610
See Also
--------
knuth_bin_width
freedman_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
sigma = np.std(data)
dx = 3.5 * sigma / (n ** (1 / 3))
if return_bins:
Nbins = np.ceil((data.max() - data.min()) / dx)
Nbins = max(1, Nbins)
bins = data.min() + dx * np.arange(Nbins + 1)
return dx, bins
else:
return dx
def freedman_bin_width(data, return_bins=False):
r"""Return the optimal histogram bin width using the Freedman-Diaconis rule
The Freedman-Diaconis rule is a normal reference rule like Scott's
rule, but uses rank-based statistics for results which are more robust
to deviations from a normal distribution.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
Returns
-------
width : float
optimal bin width using the Freedman-Diaconis rule
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal bin width is
.. math::
\Delta_b = \frac{2(q_{75} - q_{25})}{n^{1/3}}
where :math:`q_{N}` is the :math:`N` percent quartile of the data, and
:math:`n` is the number of data points [1]_.
References
----------
.. [1] D. Freedman & P. Diaconis (1981)
"On the histogram as a density estimator: L2 theory".
Probability Theory and Related Fields 57 (4): 453-476
See Also
--------
knuth_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
data = np.asarray(data)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
n = data.size
if n < 4:
raise ValueError("data should have more than three entries")
v25, v75 = np.percentile(data, [25, 75])
dx = 2 * (v75 - v25) / (n ** (1 / 3))
if return_bins:
dmin, dmax = data.min(), data.max()
Nbins = max(1, np.ceil((dmax - dmin) / dx))
try:
bins = dmin + dx * np.arange(Nbins + 1)
except ValueError as e:
if 'Maximum allowed size exceeded' in str(e):
raise ValueError(
'The inter-quartile range of the data is too small: '
'failed to construct histogram with {} bins. '
'Please use another bin method, such as '
'bins="scott"'.format(Nbins + 1))
else: # Something else # pragma: no cover
raise
return dx, bins
else:
return dx
def knuth_bin_width(data, return_bins=False, quiet=True):
r"""Return the optimal histogram bin width using Knuth's rule.
Knuth's rule is a fixed-width, Bayesian approach to determining
the optimal bin width of a histogram.
Parameters
----------
data : array-like, ndim=1
observed (one-dimensional) data
return_bins : bool (optional)
if True, then return the bin edges
quiet : bool (optional)
if True (default) then suppress stdout output from scipy.optimize
Returns
-------
dx : float
optimal bin width. Bins are measured starting at the first data point.
bins : ndarray
bin edges: returned if ``return_bins`` is True
Notes
-----
The optimal number of bins is the value M which maximizes the function
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`
[1]_.
References
----------
.. [1] Knuth, K.H. "Optimal Data-Based Binning for Histograms".
arXiv:0605197, 2006
See Also
--------
freedman_bin_width
scott_bin_width
bayesian_blocks
histogram
"""
# import here because of optional scipy dependency
from scipy import optimize
knuthF = _KnuthF(data)
dx0, bins0 = freedman_bin_width(data, True)
M = optimize.fmin(knuthF, len(bins0), disp=not quiet)[0]
bins = knuthF.bins(M)
dx = bins[1] - bins[0]
if return_bins:
return dx, bins
else:
return dx
class _KnuthF:
r"""Class which implements the function minimized by knuth_bin_width
Parameters
----------
data : array-like, one dimension
data to be histogrammed
Notes
-----
the function F is given by
.. math::
F(M|x,I) = n\log(M) + \log\Gamma(\frac{M}{2})
- M\log\Gamma(\frac{1}{2})
- \log\Gamma(\frac{2n+M}{2})
+ \sum_{k=1}^M \log\Gamma(n_k + \frac{1}{2})
where :math:`\Gamma` is the Gamma function, :math:`n` is the number of
data points, :math:`n_k` is the number of measurements in bin :math:`k`.
See Also
--------
knuth_bin_width
"""
def __init__(self, data):
self.data = np.array(data, copy=True)
if self.data.ndim != 1:
raise ValueError("data should be 1-dimensional")
self.data.sort()
self.n = self.data.size
# import here rather than globally: scipy is an optional dependency.
# Note that scipy is imported in the function which calls this,
# so there shouldn't be any issue importing here.
from scipy import special
# create a reference to gammaln to use in self.eval()
self.gammaln = special.gammaln
def bins(self, M):
"""Return the bin edges given a width dx"""
return np.linspace(self.data[0], self.data[-1], int(M) + 1)
def __call__(self, M):
return self.eval(M)
def eval(self, M):
"""Evaluate the Knuth function
Parameters
----------
dx : float
Width of bins
Returns
-------
F : float
evaluation of the negative Knuth likelihood function:
smaller values indicate a better fit.
"""
M = int(M)
if M <= 0:
return np.inf
bins = self.bins(M)
nk, bins = np.histogram(self.data, bins)
return -(self.n * np.log(M) +
self.gammaln(0.5 * M) -
M * self.gammaln(0.5) -
self.gammaln(self.n + 0.5 * M) +
np.sum(self.gammaln(nk + 0.5)))
|
ef9c70afb62e372946297cef23fa45edafc7d6858990e198d1a92aea37dcdd2d | """
Table property for providing information about table.
"""
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import sys
import os
from contextlib import contextmanager
from inspect import isclass
import numpy as np
from astropy.utils.data_info import DataInfo
__all__ = ['table_info', 'TableInfo', 'serialize_method_as']
def table_info(tbl, option='attributes', out=''):
"""
Write summary information about column to the ``out`` filehandle.
By default this prints to standard output via sys.stdout.
The ``option`` argument specifies what type of information
to include. This can be a string, a function, or a list of
strings or functions. Built-in options are:
- ``attributes``: basic column meta data like ``dtype`` or ``format``
- ``stats``: basic statistics: minimum, mean, and maximum
If a function is specified then that function will be called with the
column as its single argument. The function must return an OrderedDict
containing the information attributes.
If a list is provided then the information attributes will be
appended for each of the options, in order.
Examples
--------
>>> from astropy.table.table_helpers import simple_table
>>> t = simple_table(size=2, kinds='if')
>>> t['a'].unit = 'm'
>>> t.info()
<Table length=2>
name dtype unit
---- ------- ----
a int64 m
b float64
>>> t.info('stats')
<Table length=2>
name mean std min max
---- ---- --- --- ---
a 1.5 0.5 1 2
b 1.5 0.5 1.0 2.0
Parameters
----------
option : str, function, list of (str or function)
Info option, defaults to 'attributes'.
out : file-like object, None
Output destination, default is sys.stdout. If None then a
Table with information attributes is returned
Returns
-------
info : `~astropy.table.Table` if out==None else None
"""
from .table import Table
if out == '':
out = sys.stdout
descr_vals = [tbl.__class__.__name__]
if tbl.masked:
descr_vals.append('masked=True')
descr_vals.append('length={0}'.format(len(tbl)))
outlines = ['<' + ' '.join(descr_vals) + '>']
cols = tbl.columns.values()
if tbl.colnames:
infos = []
for col in cols:
infos.append(col.info(option, out=None))
info = Table(infos, names=list(infos[0]))
else:
info = Table()
if out is None:
return info
# Since info is going to a filehandle for viewing then remove uninteresting
# columns.
if 'class' in info.colnames:
# Remove 'class' info column if all table columns are the same class
# and they are the default column class for that table.
uniq_types = set(type(col) for col in cols)
if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass):
del info['class']
if 'n_bad' in info.colnames and np.all(info['n_bad'] == 0):
del info['n_bad']
# Standard attributes has 'length' but this is typically redundant
if 'length' in info.colnames and np.all(info['length'] == len(tbl)):
del info['length']
for name in info.colnames:
if info[name].dtype.kind in 'SU' and np.all(info[name] == ''):
del info[name]
if tbl.colnames:
outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False))
else:
outlines.append('<No columns>')
out.writelines(outline + os.linesep for outline in outlines)
class TableInfo(DataInfo):
_parent = None
def __call__(self, option='attributes', out=''):
return table_info(self._parent, option, out)
__call__.__doc__ = table_info.__doc__
@contextmanager
def serialize_method_as(tbl, serialize_method):
"""Context manager to temporarily override individual
column info.serialize_method dict values. The serialize_method
attribute is an optional dict which might look like ``{'fits':
'jd1_jd2', 'ecsv': 'formatted_value', ..}``.
``serialize_method`` is a str or dict. If str then it the the value
is the ``serialize_method`` that will be used for all formats.
If dict then the key values can be either:
- Column name. This has higher precedence than the second option of
matching class.
- Class (matches any column which is an instance of the class)
This context manager is expected to be used only within ``Table.write``.
It could have been a private method on Table but prefer not to add
clutter to that class.
Parameters
----------
tbl : Table object
Input table
serialize_method : dict, str
Dict with key values of column names or types, or str
Returns
-------
None (context manager)
"""
def get_override_sm(col):
"""
Determine if the ``serialize_method`` str or dict specifies an
override of column presets for ``col``. Returns the matching
serialize_method value or ``None``.
"""
# If a string then all columns match
if isinstance(serialize_method, str):
return serialize_method
# If column name then return that serialize_method
if col.info.name in serialize_method:
return serialize_method[col.info.name]
# Otherwise look for subclass matches
for key in serialize_method:
if isclass(key) and isinstance(col, key):
return serialize_method[key]
return None
# Setup for the context block. Set individual column.info.serialize_method
# values as appropriate and keep a backup copy. If ``serialize_method``
# is None or empty then don't do anything.
if serialize_method:
# Original serialize_method dict, keyed by column name. This only
# gets set if there is an override.
original_sms = {}
# Go through every column and if it has a serialize_method info
# attribute then potentially update it for the duration of the write.
for col in tbl.itercols():
if hasattr(col.info, 'serialize_method'):
override_sm = get_override_sm(col)
if override_sm:
# Make a reference copy of the column serialize_method
# dict which maps format (e.g. 'fits') to the
# appropriate method (e.g. 'data_mask').
original_sms[col.info.name] = col.info.serialize_method
# Set serialize method for *every* available format. This is
# brute force, but at this point the format ('fits', 'ecsv', etc)
# is not actually known (this gets determined by the write function
# in registry.py). Note this creates a new temporary dict object
# so that the restored version is the same original object.
col.info.serialize_method = {fmt: override_sm
for fmt in col.info.serialize_method}
# Finally yield for the context block
try:
yield
finally:
# Teardown (restore) for the context block. Be sure to do this even
# if an exception occurred.
if serialize_method:
for name, original_sm in original_sms.items():
tbl[name].info.serialize_method = original_sm
|
0918b18005fa476cdb6a3b52c458fb64126c4840a042494455e61832df317b10 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The Index class can use several implementations as its
engine. Any implementation should implement the following:
__init__(data, row_index) : initialize index based on key/row list pairs
add(key, row) -> None : add (key, row) to existing data
remove(key, data=None) -> boolean : remove data from self[key], or all of
self[key] if data is None
shift_left(row) -> None : decrement row numbers after row
shift_right(row) -> None : increase row numbers >= row
find(key) -> list : list of rows corresponding to key
range(lower, upper, bounds) -> list : rows in self[k] where k is between
lower and upper (<= or < based on bounds)
sort() -> None : make row order align with key order
sorted_data() -> list of rows in sorted order (by key)
replace_rows(row_map) -> None : replace row numbers based on slice
items() -> list of tuples of the form (key, data)
Notes
-----
When a Table is initialized from another Table, indices are
(deep) copied and their columns are set to the columns of the new Table.
Column creation:
Column(c) -> deep copy of indices
c[[1, 2]] -> deep copy and reordering of indices
c[1:2] -> reference
array.view(Column) -> no indices
"""
from copy import deepcopy
import numpy as np
from .bst import MinValue, MaxValue
from .sorted_array import SortedArray
from astropy.time import Time
class QueryError(ValueError):
'''
Indicates that a given index cannot handle the supplied query.
'''
pass
class Index:
'''
The Index class makes it possible to maintain indices
on columns of a Table, so that column values can be queried
quickly and efficiently. Column values are stored in lexicographic
sorted order, which allows for binary searching in O(log n).
Parameters
----------
columns : list or None
List of columns on which to create an index. If None,
create an empty index for purposes of deep copying.
engine : type, instance, or None
Indexing engine class to use (from among SortedArray, BST,
FastBST, FastRBT, and SCEngine) or actual engine instance.
If the supplied argument is None (by default), use SortedArray.
unique : bool (defaults to False)
Whether the values of the index must be unique
'''
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
# If (and only if) unpickling for protocol >= 2, then args and kwargs
# are both empty. The class __init__ requires at least the `columns`
# arg. In this case return a bare `Index` object which is then morphed
# by the unpickling magic into the correct SlicedIndex object.
if not args and not kwargs:
return self
self.__init__(*args, **kwargs)
return SlicedIndex(self, slice(0, 0, None), original=True)
def __init__(self, columns, engine=None, unique=False):
from .table import Table, Column
if engine is not None and not isinstance(engine, type):
# create from data
self.engine = engine.__class__
self.data = engine
self.columns = columns
return
# by default, use SortedArray
self.engine = engine or SortedArray
if columns is None: # this creates a special exception for deep copying
columns = []
data = []
row_index = []
elif len(columns) == 0:
raise ValueError("Cannot create index without at least one column")
elif len(columns) == 1:
col = columns[0]
row_index = Column(col.argsort())
data = Table([col[row_index]])
else:
num_rows = len(columns[0])
# replace Time columns with approximate form and remainder
new_columns = []
for col in columns:
if isinstance(col, Time):
new_columns.append(col.jd)
remainder = col - col.__class__(col.jd, format='jd')
new_columns.append(remainder.jd)
else:
new_columns.append(col)
# sort the table lexicographically and keep row numbers
table = Table(columns + [np.arange(num_rows)], copy_indices=False)
sort_columns = new_columns[::-1]
try:
lines = table[np.lexsort(sort_columns)]
except TypeError: # arbitrary mixins might not work with lexsort
lines = table[table.argsort()]
data = lines[lines.colnames[:-1]]
row_index = lines[lines.colnames[-1]]
self.data = self.engine(data, row_index, unique=unique)
self.columns = columns
def __len__(self):
'''
Number of rows in index.
'''
return len(self.columns[0])
def replace_col(self, prev_col, new_col):
'''
Replace an indexed column with an updated reference.
Parameters
----------
prev_col : Column
Column reference to replace
new_col : Column
New column reference
'''
self.columns[self.col_position(prev_col.info.name)] = new_col
def reload(self):
'''
Recreate the index based on data in self.columns.
'''
self.__init__(self.columns, engine=self.engine)
def col_position(self, col_name):
'''
Return the position of col_name in self.columns.
Parameters
----------
col_name : str
Name of column to look up
'''
for i, c in enumerate(self.columns):
if c.info.name == col_name:
return i
raise ValueError("Column does not belong to index: {0}".format(col_name))
def insert_row(self, pos, vals, columns):
'''
Insert a new row from the given values.
Parameters
----------
pos : int
Position at which to insert row
vals : list or tuple
List of values to insert into a new row
columns : list
Table column references
'''
key = [None] * len(self.columns)
for i, col in enumerate(columns):
try:
key[i] = vals[self.col_position(col.info.name)]
except ValueError: # not a member of index
continue
num_rows = len(self.columns[0])
if pos < num_rows:
# shift all rows >= pos to the right
self.data.shift_right(pos)
self.data.add(tuple(key), pos)
def get_row_specifier(self, row_specifier):
'''
Return an iterable corresponding to the
input row specifier.
Parameters
----------
row_specifier : int, list, ndarray, or slice
'''
if isinstance(row_specifier, (int, np.integer)):
# single row
return (row_specifier,)
elif isinstance(row_specifier, (list, np.ndarray)):
return row_specifier
elif isinstance(row_specifier, slice):
col_len = len(self.columns[0])
return range(*row_specifier.indices(col_len))
raise ValueError("Expected int, array of ints, or slice but "
"got {0} in remove_rows".format(row_specifier))
def remove_rows(self, row_specifier):
'''
Remove the given rows from the index.
Parameters
----------
row_specifier : int, list, ndarray, or slice
Indicates which row(s) to remove
'''
rows = []
# To maintain the correct row order, we loop twice,
# deleting rows first and then reordering the remaining rows
for row in self.get_row_specifier(row_specifier):
self.remove_row(row, reorder=False)
rows.append(row)
# second pass - row order is reversed to maintain
# correct row numbers
for row in reversed(sorted(rows)):
self.data.shift_left(row)
def remove_row(self, row, reorder=True):
'''
Remove the given row from the index.
Parameters
----------
row : int
Position of row to remove
reorder : bool
Whether to reorder indices after removal
'''
# for removal, form a key consisting of column values in this row
if not self.data.remove(tuple([col[row] for col in self.columns]), row):
raise ValueError("Could not remove row {0} from index".format(row))
# decrement the row number of all later rows
if reorder:
self.data.shift_left(row)
def find(self, key):
'''
Return the row values corresponding to key, in sorted order.
Parameters
----------
key : tuple
Values to search for in each column
'''
return self.data.find(key)
def same_prefix(self, key):
'''
Return rows whose keys contain the supplied key as a prefix.
Parameters
----------
key : tuple
Prefix for which to search
'''
return self.same_prefix_range(key, key, (True, True))
def same_prefix_range(self, lower, upper, bounds=(True, True)):
'''
Return rows whose keys have a prefix in the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
n = len(lower)
ncols = len(self.columns)
a = MinValue() if bounds[0] else MaxValue()
b = MaxValue() if bounds[1] else MinValue()
# [x, y] search corresponds to [(x, min), (y, max)]
# (x, y) search corresponds to ((x, max), (x, min))
lower = lower + tuple((ncols - n) * [a])
upper = upper + tuple((ncols - n) * [b])
return self.data.range(lower, upper, bounds)
def range(self, lower, upper, bounds=(True, True)):
'''
Return rows within the given range.
Parameters
----------
lower : tuple
Lower prefix bound
upper : tuple
Upper prefix bound
bounds : tuple (x, y) of bools
Indicates whether the search should be inclusive or
exclusive with respect to the endpoints. The first
argument x corresponds to an inclusive lower bound,
and the second argument y to an inclusive upper bound.
'''
return self.data.range(lower, upper, bounds)
def replace(self, row, col_name, val):
'''
Replace the value of a column at a given position.
Parameters
----------
row : int
Row number to modify
col_name : str
Name of the Column to modify
val : col.info.dtype
Value to insert at specified row of col
'''
self.remove_row(row, reorder=False)
key = [c[row] for c in self.columns]
key[self.col_position(col_name)] = val
self.data.add(tuple(key), row)
def replace_rows(self, col_slice):
'''
Modify rows in this index to agree with the specified
slice. For example, given an index
{'5': 1, '2': 0, '3': 2} on a column ['2', '5', '3'],
an input col_slice of [2, 0] will result in the relabeling
{'3': 0, '2': 1} on the sliced column ['3', '2'].
Parameters
----------
col_slice : list
Indices to slice
'''
row_map = dict((row, i) for i, row in enumerate(col_slice))
self.data.replace_rows(row_map)
def sort(self):
'''
Make row numbers follow the same sort order as the keys
of the index.
'''
self.data.sort()
def sorted_data(self):
'''
Returns a list of rows in sorted order based on keys;
essentially acts as an argsort() on columns.
'''
return self.data.sorted_data()
def __getitem__(self, item):
'''
Returns a sliced version of this index.
Parameters
----------
item : slice
Input slice
Returns
-------
SlicedIndex
A sliced reference to this index.
'''
return SlicedIndex(self, item)
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self)
def __deepcopy__(self, memo):
'''
Return a deep copy of this index.
Notes
-----
The default deep copy must be overridden to perform
a shallow copy of the index columns, avoiding infinite recursion.
Parameters
----------
memo : dict
'''
# Bypass Index.__new__ to create an actual Index, not a SlicedIndex.
index = super().__new__(self.__class__)
index.__init__(None, engine=self.engine)
index.data = deepcopy(self.data, memo)
index.columns = self.columns[:] # new list, same columns
memo[id(self)] = index
return index
class SlicedIndex:
'''
This class provides a wrapper around an actual Index object
to make index slicing function correctly. Since numpy expects
array slices to provide an actual data view, a SlicedIndex should
retrieve data directly from the original index and then adapt
it to the sliced coordinate system as appropriate.
Parameters
----------
index : Index
The original Index reference
index_slice : slice
The slice to which this SlicedIndex corresponds
original : bool
Whether this SlicedIndex represents the original index itself.
For the most part this is similar to index[:] but certain
copying operations are avoided, and the slice retains the
length of the actual index despite modification.
'''
def __init__(self, index, index_slice, original=False):
self.index = index
self.original = original
self._frozen = False
if isinstance(index_slice, tuple):
self.start, self._stop, self.step = index_slice
else: # index_slice is an actual slice
num_rows = len(index.columns[0])
self.start, self._stop, self.step = index_slice.indices(num_rows)
@property
def length(self):
return 1 + (self.stop - self.start - 1) // self.step
@property
def stop(self):
'''
The stopping position of the slice, or the end of the
index if this is an original slice.
'''
return len(self.index) if self.original else self._stop
def __getitem__(self, item):
'''
Returns another slice of this Index slice.
Parameters
----------
item : slice
Index slice
'''
if self.length <= 0:
# empty slice
return SlicedIndex(self.index, slice(1, 0))
start, stop, step = item.indices(self.length)
new_start = self.orig_coords(start)
new_stop = self.orig_coords(stop)
new_step = self.step * step
return SlicedIndex(self.index, (new_start, new_stop, new_step))
def sliced_coords(self, rows):
'''
Convert the input rows to the sliced coordinate system.
Parameters
----------
rows : list
Rows in the original coordinate system
Returns
-------
sliced_rows : list
Rows in the sliced coordinate system
'''
if self.original:
return rows
else:
rows = np.array(rows)
row0 = rows - self.start
if self.step != 1:
correct_mod = np.mod(row0, self.step) == 0
row0 = row0[correct_mod]
if self.step > 0:
ok = (row0 >= 0) & (row0 < self.stop - self.start)
else:
ok = (row0 <= 0) & (row0 > self.stop - self.start)
return row0[ok] // self.step
def orig_coords(self, row):
'''
Convert the input row from sliced coordinates back
to original coordinates.
Parameters
----------
row : int
Row in the sliced coordinate system
Returns
-------
orig_row : int
Row in the original coordinate system
'''
return row if self.original else self.start + row * self.step
def find(self, key):
return self.sliced_coords(self.index.find(key))
def where(self, col_map):
return self.sliced_coords(self.index.where(col_map))
def range(self, lower, upper):
return self.sliced_coords(self.index.range(lower, upper))
def same_prefix(self, key):
return self.sliced_coords(self.index.same_prefix(key))
def sorted_data(self):
return self.sliced_coords(self.index.sorted_data())
def replace(self, row, col, val):
if not self._frozen:
self.index.replace(self.orig_coords(row), col, val)
def copy(self):
if not self.original:
# replace self.index with a new object reference
self.index = deepcopy(self.index)
return self.index
def insert_row(self, pos, vals, columns):
if not self._frozen:
self.copy().insert_row(self.orig_coords(pos), vals,
columns)
def get_row_specifier(self, row_specifier):
return [self.orig_coords(x) for x in
self.index.get_row_specifier(row_specifier)]
def remove_rows(self, row_specifier):
if not self._frozen:
self.copy().remove_rows(row_specifier)
def replace_rows(self, col_slice):
if not self._frozen:
self.index.replace_rows([self.orig_coords(x) for x in col_slice])
def sort(self):
if not self._frozen:
self.copy().sort()
def __repr__(self):
if self.original:
return repr(self.index)
return 'Index slice {0} of\n{1}'.format(
(self.start, self.stop, self.step), self.index)
def __str__(self):
return repr(self)
def replace_col(self, prev_col, new_col):
self.index.replace_col(prev_col, new_col)
def reload(self):
self.index.reload()
def col_position(self, col_name):
return self.index.col_position(col_name)
def get_slice(self, col_slice, item):
'''
Return a newly created index from the given slice.
Parameters
----------
col_slice : Column object
Already existing slice of a single column
item : list or ndarray
Slice for retrieval
'''
from .table import Table
if len(self.columns) == 1:
return Index([col_slice], engine=self.data.__class__)
t = Table(self.columns, copy_indices=False)
with t.index_mode('discard_on_copy'):
new_cols = t[item].columns.values()
return Index(new_cols, engine=self.data.__class__)
@property
def columns(self):
return self.index.columns
@property
def data(self):
return self.index.data
def get_index(table, table_copy=None, names=None):
"""
Inputs a table and some subset of its columns as table_copy.
List or tuple containing names of columns as names,and returns an index
corresponding to this subset or list or None if no such index exists.
Parameters
----------
table : `Table`
Input table
table_copy : `Table`, optional
Subset of the columns in the ``table`` argument
names : list, tuple, optional
Subset of column names in the ``table`` argument
Returns
-------
Index of columns or None
"""
if names is not None and table_copy is not None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is None and table_copy is None:
raise ValueError('one and only one argument from "table_copy" or'
' "names" is required')
if names is not None:
names = set(names)
else:
names = set(table_copy.colnames)
if not names <= set(table.colnames):
raise ValueError('{} is not a subset of table columns'.format(names))
for name in names:
for index in table[name].info.indices:
if set([col.info.name for col in index.columns]) == names:
return index
return None
def get_index_by_names(table, names):
'''
Returns an index in ``table`` corresponding to the ``names`` columns or None
if no such index exists.
Parameters
----------
table : `Table`
Input table
nmaes : tuple, list
Column names
'''
names = list(names)
for index in table.indices:
index_names = [col.info.name for col in index.columns]
if index_names == names:
return index
else:
return None
class _IndexModeContext:
'''
A context manager that allows for special indexing modes, which
are intended to improve performance. Currently the allowed modes
are "freeze", in which indices are not modified upon column modification,
"copy_on_getitem", in which indices are copied upon column slicing,
and "discard_on_copy", in which indices are discarded upon table
copying/slicing.
'''
_col_subclasses = {}
def __init__(self, table, mode):
'''
Parameters
----------
table : Table
The table to which the mode should be applied
mode : str
Either 'freeze', 'copy_on_getitem', or 'discard_on_copy'.
In 'discard_on_copy' mode,
indices are not copied whenever columns or tables are copied.
In 'freeze' mode, indices are not modified whenever columns are
modified; at the exit of the context, indices refresh themselves
based on column values. This mode is intended for scenarios in
which one intends to make many additions or modifications on an
indexed column.
In 'copy_on_getitem' mode, indices are copied when taking column
slices as well as table slices, so col[i0:i1] will preserve
indices.
'''
self.table = table
self.mode = mode
# Used by copy_on_getitem
self._orig_classes = []
if mode not in ('freeze', 'discard_on_copy', 'copy_on_getitem'):
raise ValueError("Expected a mode of either 'freeze', "
"'discard_on_copy', or 'copy_on_getitem', got "
"'{0}'".format(mode))
def __enter__(self):
if self.mode == 'discard_on_copy':
self.table._copy_indices = False
elif self.mode == 'copy_on_getitem':
for col in self.table.columns.values():
self._orig_classes.append(col.__class__)
col.__class__ = self._get_copy_on_getitem_shim(col.__class__)
else:
for index in self.table.indices:
index._frozen = True
def __exit__(self, exc_type, exc_value, traceback):
if self.mode == 'discard_on_copy':
self.table._copy_indices = True
elif self.mode == 'copy_on_getitem':
for col in reversed(self.table.columns.values()):
col.__class__ = self._orig_classes.pop()
else:
for index in self.table.indices:
index._frozen = False
index.reload()
def _get_copy_on_getitem_shim(self, cls):
"""
This creates a subclass of the column's class which overrides that
class's ``__getitem__``, such that when returning a slice of the
column, the relevant indices are also copied over to the slice.
Ideally, rather than shimming in a new ``__class__`` we would be able
to just flip a flag that is checked by the base class's
``__getitem__``. Unfortunately, since the flag needs to be a Python
variable, this slows down ``__getitem__`` too much in the more common
case where a copy of the indices is not needed. See the docstring for
``astropy.table._column_mixins`` for more information on that.
"""
if cls in self._col_subclasses:
return self._col_subclasses[cls]
def __getitem__(self, item):
value = cls.__getitem__(self, item)
if type(value) is type(self):
value = self.info.slice_indices(value, item, len(self))
return value
clsname = '_{0}WithIndexCopy'.format(cls.__name__)
new_cls = type(str(clsname), (cls,), {'__getitem__': __getitem__})
self._col_subclasses[cls] = new_cls
return new_cls
class TableIndices(list):
'''
A special list of table indices allowing
for retrieval by column name(s).
Parameters
----------
lst : list
List of indices
'''
def __init__(self, lst):
super().__init__(lst)
def __getitem__(self, item):
'''
Retrieve an item from the list of indices.
Parameters
----------
item : int, str, tuple, or list
Position in list or name(s) of indexed column(s)
'''
if isinstance(item, str):
item = [item]
if isinstance(item, (list, tuple)):
item = list(item)
for index in self:
try:
for name in item:
index.col_position(name)
if len(index.columns) == len(item):
return index
except ValueError:
pass
# index search failed
raise IndexError("No index found for {0}".format(item))
return super().__getitem__(item)
class TableLoc:
"""
A pseudo-list of Table rows allowing for retrieval
of rows by indexed column values.
Parameters
----------
table : Table
Indexed table to use
"""
def __init__(self, table):
self.table = table
self.indices = table.indices
if len(self.indices) == 0:
raise ValueError("Cannot create TableLoc object with no indices")
def _get_rows(self, item):
"""
Retrieve Table rows indexes by value slice.
"""
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
if len(index.columns) > 1:
raise ValueError("Cannot use .loc on multi-column indices")
if isinstance(item, slice):
# None signifies no upper/lower bound
start = MinValue() if item.start is None else item.start
stop = MaxValue() if item.stop is None else item.stop
rows = index.range((start,), (stop,))
else:
if not isinstance(item, (list, np.ndarray)): # single element
item = [item]
# item should be a list or ndarray of values
rows = []
for key in item:
p = index.find((key,))
if len(p) == 0:
raise KeyError('No matches found for key {0}'.format(key))
else:
rows.extend(p)
return rows
def __getitem__(self, item):
"""
Retrieve Table rows by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return self.table[rows[0]]
return self.table[rows]
def __setitem__(self, key, value):
"""
Assign Table row's by value slice.
Parameters
----------
key : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
value : New values of the row elements.
Can be a list of tuples/lists to update the row.
"""
rows = self._get_rows(key)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(key))
elif len(rows) == 1: # single row
self.table[rows[0]] = value
else: # multiple rows
if len(rows) == len(value):
for row, val in zip(rows, value):
self.table[row] = val
else:
raise ValueError('Right side should contain {0} values'.format(len(rows)))
class TableLocIndices(TableLoc):
def __getitem__(self, item):
"""
Retrieve Table row's indices by value slice.
Parameters
----------
item : column element, list, ndarray, slice or tuple
Can be a value of the table primary index, a list/ndarray
of such values, or a value slice (both endpoints are included).
If a tuple is provided, the first element must be
an index to use instead of the primary key, and the
second element must be as above.
"""
rows = self._get_rows(item)
if len(rows) == 0: # no matches found
raise KeyError('No matches found for key {0}'.format(item))
elif len(rows) == 1: # single row
return rows[0]
return rows
class TableILoc(TableLoc):
'''
A variant of TableLoc allowing for row retrieval by
indexed order rather than data values.
Parameters
----------
table : Table
Indexed table to use
'''
def __init__(self, table):
super().__init__(table)
def __getitem__(self, item):
if isinstance(item, tuple):
key, item = item
else:
key = self.table.primary_key
index = self.indices[key]
rows = index.sorted_data()[item]
table_slice = self.table[rows]
if len(table_slice) == 0: # no matches found
raise IndexError('Invalid index for iloc: {0}'.format(item))
return table_slice
|
9bb698f01d1a7179cc8308f65d7c470b35ebe6c6ef835e02b76267f80d5ce319 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from os.path import abspath, dirname, join
from .table import Table
from astropy.io import registry as io_registry
from astropy import config as _config
from astropy import extern
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table.jsviewer`.
"""
jquery_url = _config.ConfigItem(
'https://code.jquery.com/jquery-3.1.1.min.js',
'The URL to the jquery library.')
datatables_url = _config.ConfigItem(
'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
'The URL to the jquery datatables library.')
css_urls = _config.ConfigItem(
['https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css'],
'The URLs to the css file(s) to include.', cfgtype='list')
conf = Conf()
EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'js'))
EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'css'))
_SORTING_SCRIPT_PART_1 = """
var astropy_sort_num = function(a, b) {{
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}}
"""
_SORTING_SCRIPT_PART_2 = """
jQuery.extend( jQuery.fn.dataTableExt.oSort, {{
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }}
}});
"""
IPYNB_JS_SCRIPT = """
<script>
%(sorting_script1)s
require.config({{paths: {{
datatables: '{datatables_url}'
}}}});
require(["datatables"], function(){{
console.log("$('#{tid}').dataTable()");
%(sorting_script2)s
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}});
</script>
""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1,
sorting_script2=_SORTING_SCRIPT_PART_2)
HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """
$(document).ready(function() {{
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}} );
"""
# Default CSS for the JSViewer writer
DEFAULT_CSS = """\
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
"""
# Default CSS used when rendering a table in the IPython notebook
DEFAULT_CSS_NB = """\
table.dataTable {clear: both; width: auto !important; margin: 0 !important;}
.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{
display: inline-block; margin-right: 1em; }
.paginate_button { margin-right: 5px; }
"""
class JSViewer:
"""Provides an interactive HTML export of a Table.
This class provides an interface to the `DataTables
<https://datatables.net/>`_ library, which allow to visualize interactively
an HTML table. It is used by the `~astropy.table.Table.show_in_browser`
method.
Parameters
----------
use_local_files : bool, optional
Use local files or a CDN for JavaScript libraries. Default False.
display_length : int, optional
Number or rows to show. Default to 50.
"""
def __init__(self, use_local_files=False, display_length=50):
self._use_local_files = use_local_files
self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1],
[10, 25, 50, 100, 500, 1000, "All"]]
self.display_length = display_length
for L in self.display_length_menu:
if display_length not in L:
L.insert(0, display_length)
@property
def jquery_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.1.1.min.js'),
'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')]
else:
return [conf.jquery_url, conf.datatables_url]
@property
def css_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_CSS_DIR,
'jquery.dataTables.css')]
else:
return conf.css_urls
def _jstable_file(self):
if self._use_local_files:
return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min')
else:
return conf.datatables_url[:-3]
def ipynb(self, table_id, css=None, sort_columns='[]'):
html = '<style>{0}</style>'.format(css if css is not None
else DEFAULT_CSS_NB)
html += IPYNB_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
datatables_url=self._jstable_file(),
tid=table_id, sort_columns=sort_columns)
return html
def html_js(self, table_id='table0', sort_columns='[]'):
return HTML_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
tid=table_id, sort_columns=sort_columns).strip()
def write_table_jsviewer(table, filename, table_id=None, max_lines=5000,
table_class="display compact", jskwargs=None,
css=DEFAULT_CSS, htmldict=None):
if table_id is None:
table_id = 'table{id}'.format(id=id(table))
jskwargs = jskwargs or {}
jsv = JSViewer(**jskwargs)
sortable_columns = [i for i, col in enumerate(table.columns.values())
if col.dtype.kind in 'iufc']
html_options = {
'table_id': table_id,
'table_class': table_class,
'css': css,
'cssfiles': jsv.css_urls,
'jsfiles': jsv.jquery_urls,
'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns)
}
if htmldict:
html_options.update(htmldict)
if max_lines < len(table):
table = table[:max_lines]
table.write(filename, format='html', htmldict=html_options)
io_registry.register_writer('jsviewer', Table, write_table_jsviewer)
|
f75f3589f18427c6e099d3e64a315c451d9995ae7b9fe4d74f784da6bc5fe599 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy.io import registry
from .info import serialize_method_as
__doctest_skip__ = ['TableRead', 'TableWrite']
class TableRead(registry.UnifiedReadWrite):
"""Read and parse a data table and return as a Table.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily reading a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table.read('table.dat', format='ascii')
>>> events = Table.read('events.fits', format='fits')
Get help on the available readers for ``Table`` using the``help()`` method::
>>> Table.read.help() # Get help reading Table and list supported formats
>>> Table.read.help('fits') # Get detailed help on Table FITS reader
>>> Table.read.list_formats() # Print list of available formats
See also: http://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data reader. If supplied the
first argument is typically the input filename.
format : str
File format specifier.
**kwargs : dict, optional
Keyword arguments passed through to data reader.
Returns
-------
out : `Table`
Table corresponding to file contents
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'read')
def __call__(self, *args, **kwargs):
cls = self._cls
out = registry.read(cls, *args, **kwargs)
# For some readers (e.g., ascii.ecsv), the returned `out` class is not
# guaranteed to be the same as the desired output `cls`. If so,
# try coercing to desired class without copying (io.registry.read
# would normally do a copy). The normal case here is swapping
# Table <=> QTable.
if cls is not out.__class__:
try:
out = cls(out, copy=False)
except Exception:
raise TypeError('could not convert reader output to {0} '
'class.'.format(cls.__name__))
return out
class TableWrite(registry.UnifiedReadWrite):
"""
Write this Table object out in the specified format.
This function provides the Table interface to the astropy unified I/O
layer. This allows easily writing a file in many supported data formats
using syntax such as::
>>> from astropy.table import Table
>>> dat = Table([[1, 2], [3, 4]], names=('a', 'b'))
>>> dat.write('table.dat', format='ascii')
Get help on the available writers for ``Table`` using the``help()`` method::
>>> Table.write.help() # Get help writing Table and list supported formats
>>> Table.write.help('fits') # Get detailed help on Table FITS writer
>>> Table.write.list_formats() # Print list of available formats
The ``serialize_method`` argument is explained in the section on
`Table serialization methods
<http://docs.astropy.org/en/latest/io/unified.html#table-serialization-methods>`_.
See also: http://docs.astropy.org/en/stable/io/unified.html
Parameters
----------
*args : tuple, optional
Positional arguments passed through to data writer. If supplied the
first argument is the output filename.
format : str
File format specifier.
serialize_method : str, dict, optional
Serialization method specifier for columns.
**kwargs : dict, optional
Keyword arguments passed through to data writer.
Notes
-----
"""
def __init__(self, instance, cls):
super().__init__(instance, cls, 'write')
def __call__(self, *args, **kwargs):
serialize_method = kwargs.pop('serialize_method', None)
instance = self._instance
with serialize_method_as(instance, serialize_method):
registry.write(instance, *args, **kwargs)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.